19b8d05b8SZbigniew Bodek /*- 20835cc78SMarcin Wojtas * SPDX-License-Identifier: BSD-2-Clause 39b8d05b8SZbigniew Bodek * 48d6806cdSOsama Abboud * Copyright (c) 2015-2024 Amazon.com, Inc. or its affiliates. 59b8d05b8SZbigniew Bodek * All rights reserved. 69b8d05b8SZbigniew Bodek * 79b8d05b8SZbigniew Bodek * Redistribution and use in source and binary forms, with or without 89b8d05b8SZbigniew Bodek * modification, are permitted provided that the following conditions 99b8d05b8SZbigniew Bodek * are met: 109b8d05b8SZbigniew Bodek * 119b8d05b8SZbigniew Bodek * 1. Redistributions of source code must retain the above copyright 129b8d05b8SZbigniew Bodek * notice, this list of conditions and the following disclaimer. 139b8d05b8SZbigniew Bodek * 149b8d05b8SZbigniew Bodek * 2. Redistributions in binary form must reproduce the above copyright 159b8d05b8SZbigniew Bodek * notice, this list of conditions and the following disclaimer in the 169b8d05b8SZbigniew Bodek * documentation and/or other materials provided with the distribution. 179b8d05b8SZbigniew Bodek * 189b8d05b8SZbigniew Bodek * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 199b8d05b8SZbigniew Bodek * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 209b8d05b8SZbigniew Bodek * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 219b8d05b8SZbigniew Bodek * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 229b8d05b8SZbigniew Bodek * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 239b8d05b8SZbigniew Bodek * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 249b8d05b8SZbigniew Bodek * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 259b8d05b8SZbigniew Bodek * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 269b8d05b8SZbigniew Bodek * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 279b8d05b8SZbigniew Bodek * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 289b8d05b8SZbigniew Bodek * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 299b8d05b8SZbigniew Bodek */ 309b8d05b8SZbigniew Bodek #include <sys/cdefs.h> 31b40dd828SAndriy Gapon #include "opt_rss.h" 32b40dd828SAndriy Gapon 339b8d05b8SZbigniew Bodek #include <sys/param.h> 349b8d05b8SZbigniew Bodek #include <sys/systm.h> 359b8d05b8SZbigniew Bodek #include <sys/bus.h> 369b8d05b8SZbigniew Bodek #include <sys/endian.h> 3782e558eaSDawid Gorecki #include <sys/eventhandler.h> 389b8d05b8SZbigniew Bodek #include <sys/kernel.h> 399b8d05b8SZbigniew Bodek #include <sys/kthread.h> 409b8d05b8SZbigniew Bodek #include <sys/malloc.h> 419b8d05b8SZbigniew Bodek #include <sys/mbuf.h> 429b8d05b8SZbigniew Bodek #include <sys/module.h> 439b8d05b8SZbigniew Bodek #include <sys/rman.h> 449b8d05b8SZbigniew Bodek #include <sys/smp.h> 459b8d05b8SZbigniew Bodek #include <sys/socket.h> 469b8d05b8SZbigniew Bodek #include <sys/sockio.h> 479b8d05b8SZbigniew Bodek #include <sys/sysctl.h> 489b8d05b8SZbigniew Bodek #include <sys/taskqueue.h> 499b8d05b8SZbigniew Bodek #include <sys/time.h> 5082e558eaSDawid Gorecki 5182e558eaSDawid Gorecki #include <vm/vm.h> 5282e558eaSDawid Gorecki #include <vm/pmap.h> 539b8d05b8SZbigniew Bodek 540ac122c3SDawid Gorecki #include <machine/atomic.h> 559b8d05b8SZbigniew Bodek #include <machine/bus.h> 569b8d05b8SZbigniew Bodek #include <machine/in_cksum.h> 5782e558eaSDawid Gorecki #include <machine/resource.h> 5882e558eaSDawid Gorecki 5982e558eaSDawid Gorecki #include <dev/pci/pcireg.h> 6082e558eaSDawid Gorecki #include <dev/pci/pcivar.h> 619b8d05b8SZbigniew Bodek 629b8d05b8SZbigniew Bodek #include <net/bpf.h> 639b8d05b8SZbigniew Bodek #include <net/ethernet.h> 649b8d05b8SZbigniew Bodek #include <net/if.h> 659b8d05b8SZbigniew Bodek #include <net/if_arp.h> 669b8d05b8SZbigniew Bodek #include <net/if_dl.h> 679b8d05b8SZbigniew Bodek #include <net/if_media.h> 689b8d05b8SZbigniew Bodek #include <net/if_types.h> 6982e558eaSDawid Gorecki #include <net/if_var.h> 709b8d05b8SZbigniew Bodek #include <net/if_vlan_var.h> 719b8d05b8SZbigniew Bodek #include <netinet/in.h> 7282e558eaSDawid Gorecki #include <netinet/in_systm.h> 739b8d05b8SZbigniew Bodek #include <netinet/if_ether.h> 749b8d05b8SZbigniew Bodek #include <netinet/ip.h> 759b8d05b8SZbigniew Bodek #include <netinet/ip6.h> 769b8d05b8SZbigniew Bodek #include <netinet/tcp.h> 779b8d05b8SZbigniew Bodek #include <netinet/udp.h> 789b8d05b8SZbigniew Bodek 799b8d05b8SZbigniew Bodek #include "ena.h" 8082e558eaSDawid Gorecki #include "ena_datapath.h" 81986e7b92SArtur Rojek #include "ena_rss.h" 8282e558eaSDawid Gorecki #include "ena_sysctl.h" 839b8d05b8SZbigniew Bodek 84d17b7d87SMarcin Wojtas #ifdef DEV_NETMAP 85d17b7d87SMarcin Wojtas #include "ena_netmap.h" 86d17b7d87SMarcin Wojtas #endif /* DEV_NETMAP */ 87d17b7d87SMarcin Wojtas 889b8d05b8SZbigniew Bodek /********************************************************* 899b8d05b8SZbigniew Bodek * Function prototypes 909b8d05b8SZbigniew Bodek *********************************************************/ 919b8d05b8SZbigniew Bodek static int ena_probe(device_t); 929b8d05b8SZbigniew Bodek static void ena_intr_msix_mgmnt(void *); 939b8d05b8SZbigniew Bodek static void ena_free_pci_resources(struct ena_adapter *); 949b8d05b8SZbigniew Bodek static int ena_change_mtu(if_t, int); 959b8d05b8SZbigniew Bodek static inline void ena_alloc_counters(counter_u64_t *, int); 969b8d05b8SZbigniew Bodek static inline void ena_free_counters(counter_u64_t *, int); 979b8d05b8SZbigniew Bodek static inline void ena_reset_counters(counter_u64_t *, int); 9882e558eaSDawid Gorecki static void ena_init_io_rings_common(struct ena_adapter *, struct ena_ring *, 9982e558eaSDawid Gorecki uint16_t); 1007d8c4feeSMarcin Wojtas static void ena_init_io_rings_basic(struct ena_adapter *); 1017d8c4feeSMarcin Wojtas static void ena_init_io_rings_advanced(struct ena_adapter *); 102cd5d5804SMarcin Wojtas static void ena_init_io_rings(struct ena_adapter *); 1039b8d05b8SZbigniew Bodek static void ena_free_io_ring_resources(struct ena_adapter *, unsigned int); 1049b8d05b8SZbigniew Bodek static void ena_free_all_io_rings_resources(struct ena_adapter *); 1059b8d05b8SZbigniew Bodek static int ena_setup_tx_dma_tag(struct ena_adapter *); 1069b8d05b8SZbigniew Bodek static int ena_free_tx_dma_tag(struct ena_adapter *); 1079b8d05b8SZbigniew Bodek static int ena_setup_rx_dma_tag(struct ena_adapter *); 1089b8d05b8SZbigniew Bodek static int ena_free_rx_dma_tag(struct ena_adapter *); 1096f2128c7SMarcin Wojtas static void ena_release_all_tx_dmamap(struct ena_ring *); 1109b8d05b8SZbigniew Bodek static int ena_setup_tx_resources(struct ena_adapter *, int); 1119b8d05b8SZbigniew Bodek static void ena_free_tx_resources(struct ena_adapter *, int); 1129b8d05b8SZbigniew Bodek static int ena_setup_all_tx_resources(struct ena_adapter *); 1139b8d05b8SZbigniew Bodek static void ena_free_all_tx_resources(struct ena_adapter *); 1149b8d05b8SZbigniew Bodek static int ena_setup_rx_resources(struct ena_adapter *, unsigned int); 1159b8d05b8SZbigniew Bodek static void ena_free_rx_resources(struct ena_adapter *, unsigned int); 1169b8d05b8SZbigniew Bodek static int ena_setup_all_rx_resources(struct ena_adapter *); 1179b8d05b8SZbigniew Bodek static void ena_free_all_rx_resources(struct ena_adapter *); 1189b8d05b8SZbigniew Bodek static inline int ena_alloc_rx_mbuf(struct ena_adapter *, struct ena_ring *, 1199b8d05b8SZbigniew Bodek struct ena_rx_buffer *); 1209b8d05b8SZbigniew Bodek static void ena_free_rx_mbuf(struct ena_adapter *, struct ena_ring *, 1219b8d05b8SZbigniew Bodek struct ena_rx_buffer *); 1229b8d05b8SZbigniew Bodek static void ena_free_rx_bufs(struct ena_adapter *, unsigned int); 1239b8d05b8SZbigniew Bodek static void ena_refill_all_rx_bufs(struct ena_adapter *); 1249b8d05b8SZbigniew Bodek static void ena_free_all_rx_bufs(struct ena_adapter *); 1259b8d05b8SZbigniew Bodek static void ena_free_tx_bufs(struct ena_adapter *, unsigned int); 1269b8d05b8SZbigniew Bodek static void ena_free_all_tx_bufs(struct ena_adapter *); 1279b8d05b8SZbigniew Bodek static void ena_destroy_all_tx_queues(struct ena_adapter *); 1289b8d05b8SZbigniew Bodek static void ena_destroy_all_rx_queues(struct ena_adapter *); 1299b8d05b8SZbigniew Bodek static void ena_destroy_all_io_queues(struct ena_adapter *); 1309b8d05b8SZbigniew Bodek static int ena_create_io_queues(struct ena_adapter *); 1315cb9db07SMarcin Wojtas static int ena_handle_msix(void *); 1329b8d05b8SZbigniew Bodek static int ena_enable_msix(struct ena_adapter *); 1339b8d05b8SZbigniew Bodek static void ena_setup_mgmnt_intr(struct ena_adapter *); 13477958fcdSMarcin Wojtas static int ena_setup_io_intr(struct ena_adapter *); 1359b8d05b8SZbigniew Bodek static int ena_request_mgmnt_irq(struct ena_adapter *); 1369b8d05b8SZbigniew Bodek static int ena_request_io_irq(struct ena_adapter *); 1379b8d05b8SZbigniew Bodek static void ena_free_mgmnt_irq(struct ena_adapter *); 1389b8d05b8SZbigniew Bodek static void ena_free_io_irq(struct ena_adapter *); 1399b8d05b8SZbigniew Bodek static void ena_free_irqs(struct ena_adapter *); 1409b8d05b8SZbigniew Bodek static void ena_disable_msix(struct ena_adapter *); 1419b8d05b8SZbigniew Bodek static void ena_unmask_all_io_irqs(struct ena_adapter *); 1429b8d05b8SZbigniew Bodek static int ena_up_complete(struct ena_adapter *); 1439b8d05b8SZbigniew Bodek static uint64_t ena_get_counter(if_t, ift_counter); 1449b8d05b8SZbigniew Bodek static int ena_media_change(if_t); 1459b8d05b8SZbigniew Bodek static void ena_media_status(if_t, struct ifmediareq *); 1469b8d05b8SZbigniew Bodek static void ena_init(void *); 1479b8d05b8SZbigniew Bodek static int ena_ioctl(if_t, u_long, caddr_t); 1489b8d05b8SZbigniew Bodek static int ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *); 1499b8d05b8SZbigniew Bodek static void ena_update_host_info(struct ena_admin_host_info *, if_t); 1509b8d05b8SZbigniew Bodek static void ena_update_hwassist(struct ena_adapter *); 151aa386085SZhenlei Huang static void ena_setup_ifnet(device_t, struct ena_adapter *, 1529b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *); 1533fc5d816SMarcin Wojtas static int ena_enable_wc(device_t, struct resource *); 1544fa9e02dSMarcin Wojtas static int ena_set_queues_placement_policy(device_t, struct ena_com_dev *, 1554fa9e02dSMarcin Wojtas struct ena_admin_feature_llq_desc *, struct ena_llq_configurations *); 15690232d18SDawid Gorecki static int ena_map_llq_mem_bar(device_t, struct ena_com_dev *); 1577d8c4feeSMarcin Wojtas static uint32_t ena_calc_max_io_queue_num(device_t, struct ena_com_dev *, 1589b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *); 159b1c38df0SOsama Abboud static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *, struct ena_adapter *); 16046021271SMarcin Wojtas static void ena_config_host_info(struct ena_com_dev *, device_t); 1619b8d05b8SZbigniew Bodek static int ena_attach(device_t); 1629b8d05b8SZbigniew Bodek static int ena_detach(device_t); 1639b8d05b8SZbigniew Bodek static int ena_device_init(struct ena_adapter *, device_t, 1649b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *, int *); 165aa9c3226SMarcin Wojtas static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *); 1669b8d05b8SZbigniew Bodek static void ena_update_on_link_change(void *, struct ena_admin_aenq_entry *); 16782e558eaSDawid Gorecki static void unimplemented_aenq_handler(void *, struct ena_admin_aenq_entry *); 168f180142cSMarcin Wojtas static int ena_copy_eni_metrics(struct ena_adapter *); 16936d42c86SOsama Abboud static int ena_copy_srd_metrics(struct ena_adapter *); 170f97993adSOsama Abboud static int ena_copy_customer_metrics(struct ena_adapter *); 1719b8d05b8SZbigniew Bodek static void ena_timer_service(void *); 172a33ec635SOsama Abboud static enum ena_regs_reset_reason_types check_cdesc_in_tx_cq(struct ena_adapter *, 173a33ec635SOsama Abboud struct ena_ring *); 174f9c9c01dSOsama Abboud #ifdef DEV_NETMAP 175f9c9c01dSOsama Abboud static int ena_reinit_netmap(struct ena_adapter *adapter); 176f9c9c01dSOsama Abboud #endif 1779b8d05b8SZbigniew Bodek 1788f15f8a7SDawid Gorecki static char ena_version[] = ENA_DEVICE_NAME ENA_DRV_MODULE_NAME 1798f15f8a7SDawid Gorecki " v" ENA_DRV_MODULE_VERSION; 1809b8d05b8SZbigniew Bodek 1819b8d05b8SZbigniew Bodek static ena_vendor_info_t ena_vendor_info_array[] = { 1829b8d05b8SZbigniew Bodek { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF, 0 }, 1837d2e6f20SMarcin Wojtas { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF_RSERV0, 0 }, 1849b8d05b8SZbigniew Bodek { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF, 0 }, 1857d2e6f20SMarcin Wojtas { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF_RSERV0, 0 }, 1869b8d05b8SZbigniew Bodek /* Last entry */ 1879b8d05b8SZbigniew Bodek { 0, 0, 0 } 1889b8d05b8SZbigniew Bodek }; 1899b8d05b8SZbigniew Bodek 19007aff471SArtur Rojek struct sx ena_global_lock; 19107aff471SArtur Rojek 1929b8d05b8SZbigniew Bodek /* 1939b8d05b8SZbigniew Bodek * Contains pointers to event handlers, e.g. link state chage. 1949b8d05b8SZbigniew Bodek */ 1959b8d05b8SZbigniew Bodek static struct ena_aenq_handlers aenq_handlers; 1969b8d05b8SZbigniew Bodek 1979b8d05b8SZbigniew Bodek void 1989b8d05b8SZbigniew Bodek ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1999b8d05b8SZbigniew Bodek { 2000bdffe59SMarcin Wojtas if (error != 0) 2019b8d05b8SZbigniew Bodek return; 2029b8d05b8SZbigniew Bodek *(bus_addr_t *)arg = segs[0].ds_addr; 2039b8d05b8SZbigniew Bodek } 2049b8d05b8SZbigniew Bodek 2059b8d05b8SZbigniew Bodek int 20682e558eaSDawid Gorecki ena_dma_alloc(device_t dmadev, bus_size_t size, ena_mem_handle_t *dma, 20782e558eaSDawid Gorecki int mapflags, bus_size_t alignment, int domain) 2089b8d05b8SZbigniew Bodek { 2099b8d05b8SZbigniew Bodek struct ena_adapter *adapter = device_get_softc(dmadev); 2103fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 2110bdffe59SMarcin Wojtas uint32_t maxsize; 2120bdffe59SMarcin Wojtas uint64_t dma_space_addr; 2139b8d05b8SZbigniew Bodek int error; 2149b8d05b8SZbigniew Bodek 2150bdffe59SMarcin Wojtas maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE; 2160bdffe59SMarcin Wojtas 2170bdffe59SMarcin Wojtas dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width); 2183f9ed7abSMarcin Wojtas if (unlikely(dma_space_addr == 0)) 2199b8d05b8SZbigniew Bodek dma_space_addr = BUS_SPACE_MAXADDR; 2200bdffe59SMarcin Wojtas 2219b8d05b8SZbigniew Bodek error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */ 2224f8f476eSMarcin Wojtas alignment, 0, /* alignment, bounds */ 2238a573700SZbigniew Bodek dma_space_addr, /* lowaddr of exclusion window */ 2248a573700SZbigniew Bodek BUS_SPACE_MAXADDR, /* highaddr of exclusion window */ 2259b8d05b8SZbigniew Bodek NULL, NULL, /* filter, filterarg */ 2269b8d05b8SZbigniew Bodek maxsize, /* maxsize */ 2279b8d05b8SZbigniew Bodek 1, /* nsegments */ 2289b8d05b8SZbigniew Bodek maxsize, /* maxsegsize */ 2299b8d05b8SZbigniew Bodek BUS_DMA_ALLOCNOW, /* flags */ 2309b8d05b8SZbigniew Bodek NULL, /* lockfunc */ 2319b8d05b8SZbigniew Bodek NULL, /* lockarg */ 2329b8d05b8SZbigniew Bodek &dma->tag); 2333f9ed7abSMarcin Wojtas if (unlikely(error != 0)) { 2343fc5d816SMarcin Wojtas ena_log(pdev, ERR, "bus_dma_tag_create failed: %d\n", error); 2359b8d05b8SZbigniew Bodek goto fail_tag; 2369b8d05b8SZbigniew Bodek } 2379b8d05b8SZbigniew Bodek 238eb4c4f4aSMarcin Wojtas error = bus_dma_tag_set_domain(dma->tag, domain); 239eb4c4f4aSMarcin Wojtas if (unlikely(error != 0)) { 240eb4c4f4aSMarcin Wojtas ena_log(pdev, ERR, "bus_dma_tag_set_domain failed: %d\n", 241eb4c4f4aSMarcin Wojtas error); 242eb4c4f4aSMarcin Wojtas goto fail_map_create; 243eb4c4f4aSMarcin Wojtas } 244eb4c4f4aSMarcin Wojtas 2459b8d05b8SZbigniew Bodek error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 2469b8d05b8SZbigniew Bodek BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map); 2473f9ed7abSMarcin Wojtas if (unlikely(error != 0)) { 2483fc5d816SMarcin Wojtas ena_log(pdev, ERR, "bus_dmamem_alloc(%ju) failed: %d\n", 2494e8acd84SMarcin Wojtas (uintmax_t)size, error); 2509b8d05b8SZbigniew Bodek goto fail_map_create; 2519b8d05b8SZbigniew Bodek } 2529b8d05b8SZbigniew Bodek 2539b8d05b8SZbigniew Bodek dma->paddr = 0; 25482e558eaSDawid Gorecki error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 25582e558eaSDawid Gorecki ena_dmamap_callback, &dma->paddr, mapflags); 2563f9ed7abSMarcin Wojtas if (unlikely((error != 0) || (dma->paddr == 0))) { 2573fc5d816SMarcin Wojtas ena_log(pdev, ERR, "bus_dmamap_load failed: %d\n", error); 2589b8d05b8SZbigniew Bodek goto fail_map_load; 2599b8d05b8SZbigniew Bodek } 2609b8d05b8SZbigniew Bodek 261e8073738SMarcin Wojtas bus_dmamap_sync(dma->tag, dma->map, 262e8073738SMarcin Wojtas BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 263e8073738SMarcin Wojtas 2649b8d05b8SZbigniew Bodek return (0); 2659b8d05b8SZbigniew Bodek 2669b8d05b8SZbigniew Bodek fail_map_load: 2679b8d05b8SZbigniew Bodek bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 2687d2544e6SMarcin Wojtas fail_map_create: 2699b8d05b8SZbigniew Bodek bus_dma_tag_destroy(dma->tag); 2709b8d05b8SZbigniew Bodek fail_tag: 2719b8d05b8SZbigniew Bodek dma->tag = NULL; 2725b14f92eSMarcin Wojtas dma->vaddr = NULL; 2735b14f92eSMarcin Wojtas dma->paddr = 0; 2749b8d05b8SZbigniew Bodek 2759b8d05b8SZbigniew Bodek return (error); 2769b8d05b8SZbigniew Bodek } 2779b8d05b8SZbigniew Bodek 2789b8d05b8SZbigniew Bodek static void 2799b8d05b8SZbigniew Bodek ena_free_pci_resources(struct ena_adapter *adapter) 2809b8d05b8SZbigniew Bodek { 2819b8d05b8SZbigniew Bodek device_t pdev = adapter->pdev; 2829b8d05b8SZbigniew Bodek 2839b8d05b8SZbigniew Bodek if (adapter->memory != NULL) { 2849b8d05b8SZbigniew Bodek bus_release_resource(pdev, SYS_RES_MEMORY, 2859b8d05b8SZbigniew Bodek PCIR_BAR(ENA_MEM_BAR), adapter->memory); 2869b8d05b8SZbigniew Bodek } 2879b8d05b8SZbigniew Bodek 2889b8d05b8SZbigniew Bodek if (adapter->registers != NULL) { 2899b8d05b8SZbigniew Bodek bus_release_resource(pdev, SYS_RES_MEMORY, 2909b8d05b8SZbigniew Bodek PCIR_BAR(ENA_REG_BAR), adapter->registers); 2919b8d05b8SZbigniew Bodek } 2921c808fcdSMichal Krawczyk 2931c808fcdSMichal Krawczyk if (adapter->msix != NULL) { 29482e558eaSDawid Gorecki bus_release_resource(pdev, SYS_RES_MEMORY, adapter->msix_rid, 29582e558eaSDawid Gorecki adapter->msix); 2961c808fcdSMichal Krawczyk } 2979b8d05b8SZbigniew Bodek } 2989b8d05b8SZbigniew Bodek 2999b8d05b8SZbigniew Bodek static int 3009b8d05b8SZbigniew Bodek ena_probe(device_t dev) 3019b8d05b8SZbigniew Bodek { 3029b8d05b8SZbigniew Bodek ena_vendor_info_t *ent; 3039b8d05b8SZbigniew Bodek uint16_t pci_vendor_id = 0; 3049b8d05b8SZbigniew Bodek uint16_t pci_device_id = 0; 3059b8d05b8SZbigniew Bodek 3069b8d05b8SZbigniew Bodek pci_vendor_id = pci_get_vendor(dev); 3079b8d05b8SZbigniew Bodek pci_device_id = pci_get_device(dev); 3089b8d05b8SZbigniew Bodek 3099b8d05b8SZbigniew Bodek ent = ena_vendor_info_array; 3109b8d05b8SZbigniew Bodek while (ent->vendor_id != 0) { 3119b8d05b8SZbigniew Bodek if ((pci_vendor_id == ent->vendor_id) && 3129b8d05b8SZbigniew Bodek (pci_device_id == ent->device_id)) { 31382e558eaSDawid Gorecki ena_log_raw(DBG, "vendor=%x device=%x\n", pci_vendor_id, 31482e558eaSDawid Gorecki pci_device_id); 3159b8d05b8SZbigniew Bodek 3168f15f8a7SDawid Gorecki device_set_desc(dev, ENA_DEVICE_DESC); 3179b8d05b8SZbigniew Bodek return (BUS_PROBE_DEFAULT); 3189b8d05b8SZbigniew Bodek } 3199b8d05b8SZbigniew Bodek 3209b8d05b8SZbigniew Bodek ent++; 3219b8d05b8SZbigniew Bodek } 3229b8d05b8SZbigniew Bodek 3239b8d05b8SZbigniew Bodek return (ENXIO); 3249b8d05b8SZbigniew Bodek } 3259b8d05b8SZbigniew Bodek 3269b8d05b8SZbigniew Bodek static int 3279b8d05b8SZbigniew Bodek ena_change_mtu(if_t ifp, int new_mtu) 3289b8d05b8SZbigniew Bodek { 3299b8d05b8SZbigniew Bodek struct ena_adapter *adapter = if_getsoftc(ifp); 3303fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 3313cfadb28SMarcin Wojtas int rc; 3329b8d05b8SZbigniew Bodek 3333cfadb28SMarcin Wojtas if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) { 33482e558eaSDawid Gorecki ena_log(pdev, ERR, "Invalid MTU setting. new_mtu: %d max mtu: %d min mtu: %d\n", 3353cfadb28SMarcin Wojtas new_mtu, adapter->max_mtu, ENA_MIN_MTU); 3363cfadb28SMarcin Wojtas return (EINVAL); 3379b8d05b8SZbigniew Bodek } 3389b8d05b8SZbigniew Bodek 3399b8d05b8SZbigniew Bodek rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu); 3403cfadb28SMarcin Wojtas if (likely(rc == 0)) { 3413fc5d816SMarcin Wojtas ena_log(pdev, DBG, "set MTU to %d\n", new_mtu); 3423cfadb28SMarcin Wojtas if_setmtu(ifp, new_mtu); 3433cfadb28SMarcin Wojtas } else { 3443fc5d816SMarcin Wojtas ena_log(pdev, ERR, "Failed to set MTU to %d\n", new_mtu); 3453cfadb28SMarcin Wojtas } 3469b8d05b8SZbigniew Bodek 3473cfadb28SMarcin Wojtas return (rc); 3489b8d05b8SZbigniew Bodek } 3499b8d05b8SZbigniew Bodek 3509b8d05b8SZbigniew Bodek static inline void 3519b8d05b8SZbigniew Bodek ena_alloc_counters(counter_u64_t *begin, int size) 3529b8d05b8SZbigniew Bodek { 3539b8d05b8SZbigniew Bodek counter_u64_t *end = (counter_u64_t *)((char *)begin + size); 3549b8d05b8SZbigniew Bodek 3559b8d05b8SZbigniew Bodek for (; begin < end; ++begin) 3569b8d05b8SZbigniew Bodek *begin = counter_u64_alloc(M_WAITOK); 3579b8d05b8SZbigniew Bodek } 3589b8d05b8SZbigniew Bodek 3599b8d05b8SZbigniew Bodek static inline void 3609b8d05b8SZbigniew Bodek ena_free_counters(counter_u64_t *begin, int size) 3619b8d05b8SZbigniew Bodek { 3629b8d05b8SZbigniew Bodek counter_u64_t *end = (counter_u64_t *)((char *)begin + size); 3639b8d05b8SZbigniew Bodek 3649b8d05b8SZbigniew Bodek for (; begin < end; ++begin) 3659b8d05b8SZbigniew Bodek counter_u64_free(*begin); 3669b8d05b8SZbigniew Bodek } 3679b8d05b8SZbigniew Bodek 3689b8d05b8SZbigniew Bodek static inline void 3699b8d05b8SZbigniew Bodek ena_reset_counters(counter_u64_t *begin, int size) 3709b8d05b8SZbigniew Bodek { 3719b8d05b8SZbigniew Bodek counter_u64_t *end = (counter_u64_t *)((char *)begin + size); 3729b8d05b8SZbigniew Bodek 3739b8d05b8SZbigniew Bodek for (; begin < end; ++begin) 3749b8d05b8SZbigniew Bodek counter_u64_zero(*begin); 3759b8d05b8SZbigniew Bodek } 3769b8d05b8SZbigniew Bodek 3779b8d05b8SZbigniew Bodek static void 3789b8d05b8SZbigniew Bodek ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring, 3799b8d05b8SZbigniew Bodek uint16_t qid) 3809b8d05b8SZbigniew Bodek { 3819b8d05b8SZbigniew Bodek ring->qid = qid; 3829b8d05b8SZbigniew Bodek ring->adapter = adapter; 3839b8d05b8SZbigniew Bodek ring->ena_dev = adapter->ena_dev; 384b72f1f45SMark Johnston atomic_store_8(&ring->first_interrupt, 0); 385d12f7bfcSMarcin Wojtas ring->no_interrupt_event_cnt = 0; 3869b8d05b8SZbigniew Bodek } 3879b8d05b8SZbigniew Bodek 388cd5d5804SMarcin Wojtas static void 3897d8c4feeSMarcin Wojtas ena_init_io_rings_basic(struct ena_adapter *adapter) 3909b8d05b8SZbigniew Bodek { 3919b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev; 3929b8d05b8SZbigniew Bodek struct ena_ring *txr, *rxr; 3939b8d05b8SZbigniew Bodek struct ena_que *que; 3949b8d05b8SZbigniew Bodek int i; 3959b8d05b8SZbigniew Bodek 3969b8d05b8SZbigniew Bodek ena_dev = adapter->ena_dev; 3979b8d05b8SZbigniew Bodek 3987d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 3999b8d05b8SZbigniew Bodek txr = &adapter->tx_ring[i]; 4009b8d05b8SZbigniew Bodek rxr = &adapter->rx_ring[i]; 4019b8d05b8SZbigniew Bodek 4029b8d05b8SZbigniew Bodek /* TX/RX common ring state */ 4039b8d05b8SZbigniew Bodek ena_init_io_rings_common(adapter, txr, i); 4049b8d05b8SZbigniew Bodek ena_init_io_rings_common(adapter, rxr, i); 4059b8d05b8SZbigniew Bodek 4069b8d05b8SZbigniew Bodek /* TX specific ring state */ 4079b8d05b8SZbigniew Bodek txr->tx_max_header_size = ena_dev->tx_max_header_size; 4089b8d05b8SZbigniew Bodek txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; 4099b8d05b8SZbigniew Bodek 4109b8d05b8SZbigniew Bodek que = &adapter->que[i]; 4119b8d05b8SZbigniew Bodek que->adapter = adapter; 4129b8d05b8SZbigniew Bodek que->id = i; 4139b8d05b8SZbigniew Bodek que->tx_ring = txr; 4149b8d05b8SZbigniew Bodek que->rx_ring = rxr; 4159b8d05b8SZbigniew Bodek 4169b8d05b8SZbigniew Bodek txr->que = que; 4179b8d05b8SZbigniew Bodek rxr->que = que; 418efe6ab18SMarcin Wojtas 419efe6ab18SMarcin Wojtas rxr->empty_rx_queue = 0; 4207d8c4feeSMarcin Wojtas rxr->rx_mbuf_sz = ena_mbuf_sz; 4219b8d05b8SZbigniew Bodek } 4229b8d05b8SZbigniew Bodek } 4239b8d05b8SZbigniew Bodek 4249b8d05b8SZbigniew Bodek static void 4257d8c4feeSMarcin Wojtas ena_init_io_rings_advanced(struct ena_adapter *adapter) 4267d8c4feeSMarcin Wojtas { 4277d8c4feeSMarcin Wojtas struct ena_ring *txr, *rxr; 4287d8c4feeSMarcin Wojtas int i; 4297d8c4feeSMarcin Wojtas 4307d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 4317d8c4feeSMarcin Wojtas txr = &adapter->tx_ring[i]; 4327d8c4feeSMarcin Wojtas rxr = &adapter->rx_ring[i]; 4337d8c4feeSMarcin Wojtas 4347d8c4feeSMarcin Wojtas /* Allocate a buf ring */ 4357d8c4feeSMarcin Wojtas txr->buf_ring_size = adapter->buf_ring_size; 43682e558eaSDawid Gorecki txr->br = buf_ring_alloc(txr->buf_ring_size, M_DEVBUF, M_WAITOK, 43782e558eaSDawid Gorecki &txr->ring_mtx); 4387d8c4feeSMarcin Wojtas 4397d8c4feeSMarcin Wojtas /* Allocate Tx statistics. */ 4407d8c4feeSMarcin Wojtas ena_alloc_counters((counter_u64_t *)&txr->tx_stats, 4417d8c4feeSMarcin Wojtas sizeof(txr->tx_stats)); 442d8aba82bSDawid Gorecki txr->tx_last_cleanup_ticks = ticks; 4437d8c4feeSMarcin Wojtas 4447d8c4feeSMarcin Wojtas /* Allocate Rx statistics. */ 4457d8c4feeSMarcin Wojtas ena_alloc_counters((counter_u64_t *)&rxr->rx_stats, 4467d8c4feeSMarcin Wojtas sizeof(rxr->rx_stats)); 4477d8c4feeSMarcin Wojtas 4487d8c4feeSMarcin Wojtas /* Initialize locks */ 4497d8c4feeSMarcin Wojtas snprintf(txr->mtx_name, nitems(txr->mtx_name), "%s:tx(%d)", 4507d8c4feeSMarcin Wojtas device_get_nameunit(adapter->pdev), i); 4517d8c4feeSMarcin Wojtas snprintf(rxr->mtx_name, nitems(rxr->mtx_name), "%s:rx(%d)", 4527d8c4feeSMarcin Wojtas device_get_nameunit(adapter->pdev), i); 4537d8c4feeSMarcin Wojtas 4547d8c4feeSMarcin Wojtas mtx_init(&txr->ring_mtx, txr->mtx_name, NULL, MTX_DEF); 4557d8c4feeSMarcin Wojtas } 4567d8c4feeSMarcin Wojtas } 4577d8c4feeSMarcin Wojtas 4587d8c4feeSMarcin Wojtas static void 4597d8c4feeSMarcin Wojtas ena_init_io_rings(struct ena_adapter *adapter) 4607d8c4feeSMarcin Wojtas { 4617d8c4feeSMarcin Wojtas /* 4627d8c4feeSMarcin Wojtas * IO rings initialization can be divided into the 2 steps: 4637d8c4feeSMarcin Wojtas * 1. Initialize variables and fields with initial values and copy 4647d8c4feeSMarcin Wojtas * them from adapter/ena_dev (basic) 4657d8c4feeSMarcin Wojtas * 2. Allocate mutex, counters and buf_ring (advanced) 4667d8c4feeSMarcin Wojtas */ 4677d8c4feeSMarcin Wojtas ena_init_io_rings_basic(adapter); 4687d8c4feeSMarcin Wojtas ena_init_io_rings_advanced(adapter); 4697d8c4feeSMarcin Wojtas } 4707d8c4feeSMarcin Wojtas 4717d8c4feeSMarcin Wojtas static void 4729b8d05b8SZbigniew Bodek ena_free_io_ring_resources(struct ena_adapter *adapter, unsigned int qid) 4739b8d05b8SZbigniew Bodek { 4749b8d05b8SZbigniew Bodek struct ena_ring *txr = &adapter->tx_ring[qid]; 4759b8d05b8SZbigniew Bodek struct ena_ring *rxr = &adapter->rx_ring[qid]; 4769b8d05b8SZbigniew Bodek 4779b8d05b8SZbigniew Bodek ena_free_counters((counter_u64_t *)&txr->tx_stats, 4789b8d05b8SZbigniew Bodek sizeof(txr->tx_stats)); 4799b8d05b8SZbigniew Bodek ena_free_counters((counter_u64_t *)&rxr->rx_stats, 4809b8d05b8SZbigniew Bodek sizeof(rxr->rx_stats)); 4819b8d05b8SZbigniew Bodek 4827d2544e6SMarcin Wojtas ENA_RING_MTX_LOCK(txr); 4837d2544e6SMarcin Wojtas drbr_free(txr->br, M_DEVBUF); 4847d2544e6SMarcin Wojtas ENA_RING_MTX_UNLOCK(txr); 4857d2544e6SMarcin Wojtas 4869b8d05b8SZbigniew Bodek mtx_destroy(&txr->ring_mtx); 4879b8d05b8SZbigniew Bodek } 4889b8d05b8SZbigniew Bodek 4899b8d05b8SZbigniew Bodek static void 4909b8d05b8SZbigniew Bodek ena_free_all_io_rings_resources(struct ena_adapter *adapter) 4919b8d05b8SZbigniew Bodek { 4929b8d05b8SZbigniew Bodek int i; 4939b8d05b8SZbigniew Bodek 4947d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) 4959b8d05b8SZbigniew Bodek ena_free_io_ring_resources(adapter, i); 4969b8d05b8SZbigniew Bodek } 4979b8d05b8SZbigniew Bodek 4989b8d05b8SZbigniew Bodek static int 4999b8d05b8SZbigniew Bodek ena_setup_tx_dma_tag(struct ena_adapter *adapter) 5009b8d05b8SZbigniew Bodek { 5019b8d05b8SZbigniew Bodek int ret; 5029b8d05b8SZbigniew Bodek 5039b8d05b8SZbigniew Bodek /* Create DMA tag for Tx buffers */ 5049b8d05b8SZbigniew Bodek ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), 5059b8d05b8SZbigniew Bodek 1, 0, /* alignment, bounds */ 5068a573700SZbigniew Bodek ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */ 5078a573700SZbigniew Bodek BUS_SPACE_MAXADDR, /* highaddr of excl window */ 5089b8d05b8SZbigniew Bodek NULL, NULL, /* filter, filterarg */ 5099b8d05b8SZbigniew Bodek ENA_TSO_MAXSIZE, /* maxsize */ 5108a573700SZbigniew Bodek adapter->max_tx_sgl_size - 1, /* nsegments */ 5119b8d05b8SZbigniew Bodek ENA_TSO_MAXSIZE, /* maxsegsize */ 5129b8d05b8SZbigniew Bodek 0, /* flags */ 5139b8d05b8SZbigniew Bodek NULL, /* lockfunc */ 5149b8d05b8SZbigniew Bodek NULL, /* lockfuncarg */ 5159b8d05b8SZbigniew Bodek &adapter->tx_buf_tag); 5169b8d05b8SZbigniew Bodek 5179b8d05b8SZbigniew Bodek return (ret); 5189b8d05b8SZbigniew Bodek } 5199b8d05b8SZbigniew Bodek 5209b8d05b8SZbigniew Bodek static int 5219b8d05b8SZbigniew Bodek ena_free_tx_dma_tag(struct ena_adapter *adapter) 5229b8d05b8SZbigniew Bodek { 5239b8d05b8SZbigniew Bodek int ret; 5249b8d05b8SZbigniew Bodek 5259b8d05b8SZbigniew Bodek ret = bus_dma_tag_destroy(adapter->tx_buf_tag); 5269b8d05b8SZbigniew Bodek 5273f9ed7abSMarcin Wojtas if (likely(ret == 0)) 5289b8d05b8SZbigniew Bodek adapter->tx_buf_tag = NULL; 5299b8d05b8SZbigniew Bodek 5309b8d05b8SZbigniew Bodek return (ret); 5319b8d05b8SZbigniew Bodek } 5329b8d05b8SZbigniew Bodek 5339b8d05b8SZbigniew Bodek static int 5349b8d05b8SZbigniew Bodek ena_setup_rx_dma_tag(struct ena_adapter *adapter) 5359b8d05b8SZbigniew Bodek { 5369b8d05b8SZbigniew Bodek int ret; 5379b8d05b8SZbigniew Bodek 5389b8d05b8SZbigniew Bodek /* Create DMA tag for Rx buffers*/ 5399b8d05b8SZbigniew Bodek ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent */ 5409b8d05b8SZbigniew Bodek 1, 0, /* alignment, bounds */ 5418a573700SZbigniew Bodek ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */ 5428a573700SZbigniew Bodek BUS_SPACE_MAXADDR, /* highaddr of excl window */ 5439b8d05b8SZbigniew Bodek NULL, NULL, /* filter, filterarg */ 54404cf2b88SMarcin Wojtas ena_mbuf_sz, /* maxsize */ 5454727bda6SMarcin Wojtas adapter->max_rx_sgl_size, /* nsegments */ 54604cf2b88SMarcin Wojtas ena_mbuf_sz, /* maxsegsize */ 5479b8d05b8SZbigniew Bodek 0, /* flags */ 5489b8d05b8SZbigniew Bodek NULL, /* lockfunc */ 5499b8d05b8SZbigniew Bodek NULL, /* lockarg */ 5509b8d05b8SZbigniew Bodek &adapter->rx_buf_tag); 5519b8d05b8SZbigniew Bodek 5529b8d05b8SZbigniew Bodek return (ret); 5539b8d05b8SZbigniew Bodek } 5549b8d05b8SZbigniew Bodek 5559b8d05b8SZbigniew Bodek static int 5569b8d05b8SZbigniew Bodek ena_free_rx_dma_tag(struct ena_adapter *adapter) 5579b8d05b8SZbigniew Bodek { 5589b8d05b8SZbigniew Bodek int ret; 5599b8d05b8SZbigniew Bodek 5609b8d05b8SZbigniew Bodek ret = bus_dma_tag_destroy(adapter->rx_buf_tag); 5619b8d05b8SZbigniew Bodek 5623f9ed7abSMarcin Wojtas if (likely(ret == 0)) 5639b8d05b8SZbigniew Bodek adapter->rx_buf_tag = NULL; 5649b8d05b8SZbigniew Bodek 5659b8d05b8SZbigniew Bodek return (ret); 5669b8d05b8SZbigniew Bodek } 5679b8d05b8SZbigniew Bodek 56838727218SOsama Abboud int 56938727218SOsama Abboud validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id, int tx_req_id_rc) 57038727218SOsama Abboud { 57138727218SOsama Abboud struct ena_adapter *adapter = tx_ring->adapter; 57238727218SOsama Abboud enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; 57338727218SOsama Abboud 57438727218SOsama Abboud if (unlikely(tx_req_id_rc != 0)) { 57538727218SOsama Abboud if (tx_req_id_rc == ENA_COM_FAULT) { 57638727218SOsama Abboud reset_reason = ENA_REGS_RESET_TX_DESCRIPTOR_MALFORMED; 57738727218SOsama Abboud ena_log(adapter->pdev, ERR, 57838727218SOsama Abboud "TX descriptor malformed. req_id %hu qid %hu\n", 57938727218SOsama Abboud req_id, tx_ring->qid); 58038727218SOsama Abboud } else if (tx_req_id_rc == ENA_COM_INVAL) { 58138727218SOsama Abboud ena_log_nm(adapter->pdev, WARN, 58238727218SOsama Abboud "Invalid req_id %hu in qid %hu\n", 58338727218SOsama Abboud req_id, tx_ring->qid); 58438727218SOsama Abboud counter_u64_add(tx_ring->tx_stats.bad_req_id, 1); 58538727218SOsama Abboud } 58638727218SOsama Abboud 58738727218SOsama Abboud ena_trigger_reset(adapter, reset_reason); 58838727218SOsama Abboud return (EFAULT); 58938727218SOsama Abboud } 59038727218SOsama Abboud 59138727218SOsama Abboud return (0); 59238727218SOsama Abboud } 59338727218SOsama Abboud 5946f2128c7SMarcin Wojtas static void 5956f2128c7SMarcin Wojtas ena_release_all_tx_dmamap(struct ena_ring *tx_ring) 5966f2128c7SMarcin Wojtas { 5976f2128c7SMarcin Wojtas struct ena_adapter *adapter = tx_ring->adapter; 5986f2128c7SMarcin Wojtas struct ena_tx_buffer *tx_info; 59982e558eaSDawid Gorecki bus_dma_tag_t tx_tag = adapter->tx_buf_tag; 6006f2128c7SMarcin Wojtas int i; 6016f2128c7SMarcin Wojtas #ifdef DEV_NETMAP 6026f2128c7SMarcin Wojtas struct ena_netmap_tx_info *nm_info; 6036f2128c7SMarcin Wojtas int j; 6046f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */ 6056f2128c7SMarcin Wojtas 6066f2128c7SMarcin Wojtas for (i = 0; i < tx_ring->ring_size; ++i) { 6076f2128c7SMarcin Wojtas tx_info = &tx_ring->tx_buffer_info[i]; 6086f2128c7SMarcin Wojtas #ifdef DEV_NETMAP 6097583c633SJustin Hibbits if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) { 6106f2128c7SMarcin Wojtas nm_info = &tx_info->nm_info; 6116f2128c7SMarcin Wojtas for (j = 0; j < ENA_PKT_MAX_BUFS; ++j) { 6126f2128c7SMarcin Wojtas if (nm_info->map_seg[j] != NULL) { 6136f2128c7SMarcin Wojtas bus_dmamap_destroy(tx_tag, 6146f2128c7SMarcin Wojtas nm_info->map_seg[j]); 6156f2128c7SMarcin Wojtas nm_info->map_seg[j] = NULL; 6166f2128c7SMarcin Wojtas } 6176f2128c7SMarcin Wojtas } 6186f2128c7SMarcin Wojtas } 6196f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */ 620888810f0SMarcin Wojtas if (tx_info->dmamap != NULL) { 621888810f0SMarcin Wojtas bus_dmamap_destroy(tx_tag, tx_info->dmamap); 622888810f0SMarcin Wojtas tx_info->dmamap = NULL; 6236f2128c7SMarcin Wojtas } 6246f2128c7SMarcin Wojtas } 6256f2128c7SMarcin Wojtas } 6266f2128c7SMarcin Wojtas 6279b8d05b8SZbigniew Bodek /** 6289b8d05b8SZbigniew Bodek * ena_setup_tx_resources - allocate Tx resources (Descriptors) 6299b8d05b8SZbigniew Bodek * @adapter: network interface device structure 6309b8d05b8SZbigniew Bodek * @qid: queue index 6319b8d05b8SZbigniew Bodek * 6329b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 6339b8d05b8SZbigniew Bodek **/ 6349b8d05b8SZbigniew Bodek static int 6359b8d05b8SZbigniew Bodek ena_setup_tx_resources(struct ena_adapter *adapter, int qid) 6369b8d05b8SZbigniew Bodek { 6373fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 6386d1ef2abSArtur Rojek char thread_name[MAXCOMLEN + 1]; 6399b8d05b8SZbigniew Bodek struct ena_que *que = &adapter->que[qid]; 6409b8d05b8SZbigniew Bodek struct ena_ring *tx_ring = que->tx_ring; 6416d1ef2abSArtur Rojek cpuset_t *cpu_mask = NULL; 6429b8d05b8SZbigniew Bodek int size, i, err; 6436f2128c7SMarcin Wojtas #ifdef DEV_NETMAP 6446f2128c7SMarcin Wojtas bus_dmamap_t *map; 6456f2128c7SMarcin Wojtas int j; 6466f2128c7SMarcin Wojtas 6476f2128c7SMarcin Wojtas ena_netmap_reset_tx_ring(adapter, qid); 6486f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */ 6499b8d05b8SZbigniew Bodek 6509b8d05b8SZbigniew Bodek size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; 6519b8d05b8SZbigniew Bodek 6529b8d05b8SZbigniew Bodek tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 6533f9ed7abSMarcin Wojtas if (unlikely(tx_ring->tx_buffer_info == NULL)) 6547d2544e6SMarcin Wojtas return (ENOMEM); 6559b8d05b8SZbigniew Bodek 6569b8d05b8SZbigniew Bodek size = sizeof(uint16_t) * tx_ring->ring_size; 6579b8d05b8SZbigniew Bodek tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 6583f9ed7abSMarcin Wojtas if (unlikely(tx_ring->free_tx_ids == NULL)) 6597d2544e6SMarcin Wojtas goto err_buf_info_free; 6609b8d05b8SZbigniew Bodek 6614fa9e02dSMarcin Wojtas size = tx_ring->tx_max_header_size; 6624fa9e02dSMarcin Wojtas tx_ring->push_buf_intermediate_buf = malloc(size, M_DEVBUF, 6634fa9e02dSMarcin Wojtas M_NOWAIT | M_ZERO); 6644fa9e02dSMarcin Wojtas if (unlikely(tx_ring->push_buf_intermediate_buf == NULL)) 6654fa9e02dSMarcin Wojtas goto err_tx_ids_free; 6664fa9e02dSMarcin Wojtas 6679b8d05b8SZbigniew Bodek /* Req id stack for TX OOO completions */ 6689b8d05b8SZbigniew Bodek for (i = 0; i < tx_ring->ring_size; i++) 6699b8d05b8SZbigniew Bodek tx_ring->free_tx_ids[i] = i; 6709b8d05b8SZbigniew Bodek 6719b8d05b8SZbigniew Bodek /* Reset TX statistics. */ 6729b8d05b8SZbigniew Bodek ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats, 6739b8d05b8SZbigniew Bodek sizeof(tx_ring->tx_stats)); 6749b8d05b8SZbigniew Bodek 6759b8d05b8SZbigniew Bodek tx_ring->next_to_use = 0; 6769b8d05b8SZbigniew Bodek tx_ring->next_to_clean = 0; 677af66d7d0SMarcin Wojtas tx_ring->acum_pkts = 0; 6789b8d05b8SZbigniew Bodek 6799b8d05b8SZbigniew Bodek /* Make sure that drbr is empty */ 680b38cf613SZbigniew Bodek ENA_RING_MTX_LOCK(tx_ring); 6819b8d05b8SZbigniew Bodek drbr_flush(adapter->ifp, tx_ring->br); 682b38cf613SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 6839b8d05b8SZbigniew Bodek 6849b8d05b8SZbigniew Bodek /* ... and create the buffer DMA maps */ 6859b8d05b8SZbigniew Bodek for (i = 0; i < tx_ring->ring_size; i++) { 6869b8d05b8SZbigniew Bodek err = bus_dmamap_create(adapter->tx_buf_tag, 0, 687888810f0SMarcin Wojtas &tx_ring->tx_buffer_info[i].dmamap); 6883f9ed7abSMarcin Wojtas if (unlikely(err != 0)) { 6893fc5d816SMarcin Wojtas ena_log(pdev, ERR, 69082e558eaSDawid Gorecki "Unable to create Tx DMA map for buffer %d\n", i); 6916f2128c7SMarcin Wojtas goto err_map_release; 6929b8d05b8SZbigniew Bodek } 6936f2128c7SMarcin Wojtas 6946f2128c7SMarcin Wojtas #ifdef DEV_NETMAP 6957583c633SJustin Hibbits if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) { 6966f2128c7SMarcin Wojtas map = tx_ring->tx_buffer_info[i].nm_info.map_seg; 6976f2128c7SMarcin Wojtas for (j = 0; j < ENA_PKT_MAX_BUFS; j++) { 6986f2128c7SMarcin Wojtas err = bus_dmamap_create(adapter->tx_buf_tag, 0, 6996f2128c7SMarcin Wojtas &map[j]); 7006f2128c7SMarcin Wojtas if (unlikely(err != 0)) { 7013fc5d816SMarcin Wojtas ena_log(pdev, ERR, 70282e558eaSDawid Gorecki "Unable to create Tx DMA for buffer %d %d\n", 70382e558eaSDawid Gorecki i, j); 7046f2128c7SMarcin Wojtas goto err_map_release; 7056f2128c7SMarcin Wojtas } 7066f2128c7SMarcin Wojtas } 7076f2128c7SMarcin Wojtas } 7086f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */ 7099b8d05b8SZbigniew Bodek } 7109b8d05b8SZbigniew Bodek 7119b8d05b8SZbigniew Bodek /* Allocate taskqueues */ 7129b8d05b8SZbigniew Bodek TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring); 7139b8d05b8SZbigniew Bodek tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT, 7149b8d05b8SZbigniew Bodek taskqueue_thread_enqueue, &tx_ring->enqueue_tq); 7153f9ed7abSMarcin Wojtas if (unlikely(tx_ring->enqueue_tq == NULL)) { 7163fc5d816SMarcin Wojtas ena_log(pdev, ERR, 7179b8d05b8SZbigniew Bodek "Unable to create taskqueue for enqueue task\n"); 7189b8d05b8SZbigniew Bodek i = tx_ring->ring_size; 7196f2128c7SMarcin Wojtas goto err_map_release; 7209b8d05b8SZbigniew Bodek } 7219b8d05b8SZbigniew Bodek 7225cb9db07SMarcin Wojtas tx_ring->running = true; 7235cb9db07SMarcin Wojtas 7246d1ef2abSArtur Rojek #ifdef RSS 7256d1ef2abSArtur Rojek cpu_mask = &que->cpu_mask; 7266d1ef2abSArtur Rojek snprintf(thread_name, sizeof(thread_name), "%s txeq %d", 7276d1ef2abSArtur Rojek device_get_nameunit(adapter->pdev), que->cpu); 7286d1ef2abSArtur Rojek #else 7296d1ef2abSArtur Rojek snprintf(thread_name, sizeof(thread_name), "%s txeq %d", 7306d1ef2abSArtur Rojek device_get_nameunit(adapter->pdev), que->id); 7316d1ef2abSArtur Rojek #endif 7326d1ef2abSArtur Rojek taskqueue_start_threads_cpuset(&tx_ring->enqueue_tq, 1, PI_NET, 7336d1ef2abSArtur Rojek cpu_mask, "%s", thread_name); 7349b8d05b8SZbigniew Bodek 7359b8d05b8SZbigniew Bodek return (0); 7369b8d05b8SZbigniew Bodek 7376f2128c7SMarcin Wojtas err_map_release: 7386f2128c7SMarcin Wojtas ena_release_all_tx_dmamap(tx_ring); 7394fa9e02dSMarcin Wojtas err_tx_ids_free: 740cd5d5804SMarcin Wojtas free(tx_ring->free_tx_ids, M_DEVBUF); 7417d2544e6SMarcin Wojtas tx_ring->free_tx_ids = NULL; 7427d2544e6SMarcin Wojtas err_buf_info_free: 743cd5d5804SMarcin Wojtas free(tx_ring->tx_buffer_info, M_DEVBUF); 7447d2544e6SMarcin Wojtas tx_ring->tx_buffer_info = NULL; 7457d2544e6SMarcin Wojtas 7469b8d05b8SZbigniew Bodek return (ENOMEM); 7479b8d05b8SZbigniew Bodek } 7489b8d05b8SZbigniew Bodek 7499b8d05b8SZbigniew Bodek /** 7509b8d05b8SZbigniew Bodek * ena_free_tx_resources - Free Tx Resources per Queue 7519b8d05b8SZbigniew Bodek * @adapter: network interface device structure 7529b8d05b8SZbigniew Bodek * @qid: queue index 7539b8d05b8SZbigniew Bodek * 7549b8d05b8SZbigniew Bodek * Free all transmit software resources 7559b8d05b8SZbigniew Bodek **/ 7569b8d05b8SZbigniew Bodek static void 7579b8d05b8SZbigniew Bodek ena_free_tx_resources(struct ena_adapter *adapter, int qid) 7589b8d05b8SZbigniew Bodek { 7599b8d05b8SZbigniew Bodek struct ena_ring *tx_ring = &adapter->tx_ring[qid]; 7606f2128c7SMarcin Wojtas #ifdef DEV_NETMAP 7616f2128c7SMarcin Wojtas struct ena_netmap_tx_info *nm_info; 7626f2128c7SMarcin Wojtas int j; 7636f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */ 7649b8d05b8SZbigniew Bodek 76582e558eaSDawid Gorecki while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task, NULL)) 7669b8d05b8SZbigniew Bodek taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task); 7679b8d05b8SZbigniew Bodek 7689b8d05b8SZbigniew Bodek taskqueue_free(tx_ring->enqueue_tq); 7699b8d05b8SZbigniew Bodek 770b38cf613SZbigniew Bodek ENA_RING_MTX_LOCK(tx_ring); 7719b8d05b8SZbigniew Bodek /* Flush buffer ring, */ 7729b8d05b8SZbigniew Bodek drbr_flush(adapter->ifp, tx_ring->br); 7739b8d05b8SZbigniew Bodek 7749b8d05b8SZbigniew Bodek /* Free buffer DMA maps, */ 7759b8d05b8SZbigniew Bodek for (int i = 0; i < tx_ring->ring_size; i++) { 776e8073738SMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, 777888810f0SMarcin Wojtas tx_ring->tx_buffer_info[i].dmamap, BUS_DMASYNC_POSTWRITE); 7789b8d05b8SZbigniew Bodek bus_dmamap_unload(adapter->tx_buf_tag, 779888810f0SMarcin Wojtas tx_ring->tx_buffer_info[i].dmamap); 7809b8d05b8SZbigniew Bodek bus_dmamap_destroy(adapter->tx_buf_tag, 781888810f0SMarcin Wojtas tx_ring->tx_buffer_info[i].dmamap); 7824fa9e02dSMarcin Wojtas 7836f2128c7SMarcin Wojtas #ifdef DEV_NETMAP 7847583c633SJustin Hibbits if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) { 7856f2128c7SMarcin Wojtas nm_info = &tx_ring->tx_buffer_info[i].nm_info; 7866f2128c7SMarcin Wojtas for (j = 0; j < ENA_PKT_MAX_BUFS; j++) { 7876f2128c7SMarcin Wojtas if (nm_info->socket_buf_idx[j] != 0) { 7886f2128c7SMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, 7896f2128c7SMarcin Wojtas nm_info->map_seg[j], 7906f2128c7SMarcin Wojtas BUS_DMASYNC_POSTWRITE); 7916f2128c7SMarcin Wojtas ena_netmap_unload(adapter, 7926f2128c7SMarcin Wojtas nm_info->map_seg[j]); 7936f2128c7SMarcin Wojtas } 7946f2128c7SMarcin Wojtas bus_dmamap_destroy(adapter->tx_buf_tag, 7956f2128c7SMarcin Wojtas nm_info->map_seg[j]); 7966f2128c7SMarcin Wojtas nm_info->socket_buf_idx[j] = 0; 7976f2128c7SMarcin Wojtas } 7986f2128c7SMarcin Wojtas } 7996f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */ 8006f2128c7SMarcin Wojtas 801e8073738SMarcin Wojtas m_freem(tx_ring->tx_buffer_info[i].mbuf); 802e8073738SMarcin Wojtas tx_ring->tx_buffer_info[i].mbuf = NULL; 8039b8d05b8SZbigniew Bodek } 804416e8864SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 8059b8d05b8SZbigniew Bodek 8069b8d05b8SZbigniew Bodek /* And free allocated memory. */ 807cd5d5804SMarcin Wojtas free(tx_ring->tx_buffer_info, M_DEVBUF); 8089b8d05b8SZbigniew Bodek tx_ring->tx_buffer_info = NULL; 8099b8d05b8SZbigniew Bodek 810cd5d5804SMarcin Wojtas free(tx_ring->free_tx_ids, M_DEVBUF); 8119b8d05b8SZbigniew Bodek tx_ring->free_tx_ids = NULL; 8124fa9e02dSMarcin Wojtas 8138483b844SMarcin Wojtas free(tx_ring->push_buf_intermediate_buf, M_DEVBUF); 8144fa9e02dSMarcin Wojtas tx_ring->push_buf_intermediate_buf = NULL; 8159b8d05b8SZbigniew Bodek } 8169b8d05b8SZbigniew Bodek 8179b8d05b8SZbigniew Bodek /** 8189b8d05b8SZbigniew Bodek * ena_setup_all_tx_resources - allocate all queues Tx resources 8199b8d05b8SZbigniew Bodek * @adapter: network interface device structure 8209b8d05b8SZbigniew Bodek * 8219b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 8229b8d05b8SZbigniew Bodek **/ 8239b8d05b8SZbigniew Bodek static int 8249b8d05b8SZbigniew Bodek ena_setup_all_tx_resources(struct ena_adapter *adapter) 8259b8d05b8SZbigniew Bodek { 8269b8d05b8SZbigniew Bodek int i, rc; 8279b8d05b8SZbigniew Bodek 8287d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 8299b8d05b8SZbigniew Bodek rc = ena_setup_tx_resources(adapter, i); 8300bdffe59SMarcin Wojtas if (rc != 0) { 8313fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 8329b8d05b8SZbigniew Bodek "Allocation for Tx Queue %u failed\n", i); 8339b8d05b8SZbigniew Bodek goto err_setup_tx; 8349b8d05b8SZbigniew Bodek } 8357d2544e6SMarcin Wojtas } 8369b8d05b8SZbigniew Bodek 8379b8d05b8SZbigniew Bodek return (0); 8389b8d05b8SZbigniew Bodek 8399b8d05b8SZbigniew Bodek err_setup_tx: 8409b8d05b8SZbigniew Bodek /* Rewind the index freeing the rings as we go */ 8419b8d05b8SZbigniew Bodek while (i--) 8429b8d05b8SZbigniew Bodek ena_free_tx_resources(adapter, i); 8439b8d05b8SZbigniew Bodek return (rc); 8449b8d05b8SZbigniew Bodek } 8459b8d05b8SZbigniew Bodek 8469b8d05b8SZbigniew Bodek /** 8479b8d05b8SZbigniew Bodek * ena_free_all_tx_resources - Free Tx Resources for All Queues 8489b8d05b8SZbigniew Bodek * @adapter: network interface device structure 8499b8d05b8SZbigniew Bodek * 8509b8d05b8SZbigniew Bodek * Free all transmit software resources 8519b8d05b8SZbigniew Bodek **/ 8529b8d05b8SZbigniew Bodek static void 8539b8d05b8SZbigniew Bodek ena_free_all_tx_resources(struct ena_adapter *adapter) 8549b8d05b8SZbigniew Bodek { 8559b8d05b8SZbigniew Bodek int i; 8569b8d05b8SZbigniew Bodek 8577d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) 8589b8d05b8SZbigniew Bodek ena_free_tx_resources(adapter, i); 8599b8d05b8SZbigniew Bodek } 8609b8d05b8SZbigniew Bodek 8619b8d05b8SZbigniew Bodek /** 8629b8d05b8SZbigniew Bodek * ena_setup_rx_resources - allocate Rx resources (Descriptors) 8639b8d05b8SZbigniew Bodek * @adapter: network interface device structure 8649b8d05b8SZbigniew Bodek * @qid: queue index 8659b8d05b8SZbigniew Bodek * 8669b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 8679b8d05b8SZbigniew Bodek **/ 8689b8d05b8SZbigniew Bodek static int 8699b8d05b8SZbigniew Bodek ena_setup_rx_resources(struct ena_adapter *adapter, unsigned int qid) 8709b8d05b8SZbigniew Bodek { 8713fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 8729b8d05b8SZbigniew Bodek struct ena_que *que = &adapter->que[qid]; 8739b8d05b8SZbigniew Bodek struct ena_ring *rx_ring = que->rx_ring; 8749b8d05b8SZbigniew Bodek int size, err, i; 8759b8d05b8SZbigniew Bodek 8769b8d05b8SZbigniew Bodek size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size; 8779b8d05b8SZbigniew Bodek 8789a0f2079SMarcin Wojtas #ifdef DEV_NETMAP 8799a0f2079SMarcin Wojtas ena_netmap_reset_rx_ring(adapter, qid); 8809a0f2079SMarcin Wojtas rx_ring->initialized = false; 8819a0f2079SMarcin Wojtas #endif /* DEV_NETMAP */ 8829a0f2079SMarcin Wojtas 8839b8d05b8SZbigniew Bodek /* 8849b8d05b8SZbigniew Bodek * Alloc extra element so in rx path 8859b8d05b8SZbigniew Bodek * we can always prefetch rx_info + 1 8869b8d05b8SZbigniew Bodek */ 8879b8d05b8SZbigniew Bodek size += sizeof(struct ena_rx_buffer); 8889b8d05b8SZbigniew Bodek 889cd5d5804SMarcin Wojtas rx_ring->rx_buffer_info = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO); 8909b8d05b8SZbigniew Bodek 89143fefd16SMarcin Wojtas size = sizeof(uint16_t) * rx_ring->ring_size; 89243fefd16SMarcin Wojtas rx_ring->free_rx_ids = malloc(size, M_DEVBUF, M_WAITOK); 89343fefd16SMarcin Wojtas 89443fefd16SMarcin Wojtas for (i = 0; i < rx_ring->ring_size; i++) 89543fefd16SMarcin Wojtas rx_ring->free_rx_ids[i] = i; 89643fefd16SMarcin Wojtas 8979b8d05b8SZbigniew Bodek /* Reset RX statistics. */ 8989b8d05b8SZbigniew Bodek ena_reset_counters((counter_u64_t *)&rx_ring->rx_stats, 8999b8d05b8SZbigniew Bodek sizeof(rx_ring->rx_stats)); 9009b8d05b8SZbigniew Bodek 9019b8d05b8SZbigniew Bodek rx_ring->next_to_clean = 0; 9029b8d05b8SZbigniew Bodek rx_ring->next_to_use = 0; 9039b8d05b8SZbigniew Bodek 9049b8d05b8SZbigniew Bodek /* ... and create the buffer DMA maps */ 9059b8d05b8SZbigniew Bodek for (i = 0; i < rx_ring->ring_size; i++) { 9069b8d05b8SZbigniew Bodek err = bus_dmamap_create(adapter->rx_buf_tag, 0, 9079b8d05b8SZbigniew Bodek &(rx_ring->rx_buffer_info[i].map)); 9089b8d05b8SZbigniew Bodek if (err != 0) { 9093fc5d816SMarcin Wojtas ena_log(pdev, ERR, 9109b8d05b8SZbigniew Bodek "Unable to create Rx DMA map for buffer %d\n", i); 9117d2544e6SMarcin Wojtas goto err_buf_info_unmap; 9129b8d05b8SZbigniew Bodek } 9139b8d05b8SZbigniew Bodek } 9149b8d05b8SZbigniew Bodek 9159b8d05b8SZbigniew Bodek /* Create LRO for the ring */ 9167583c633SJustin Hibbits if ((if_getcapenable(adapter->ifp) & IFCAP_LRO) != 0) { 9179b8d05b8SZbigniew Bodek int err = tcp_lro_init(&rx_ring->lro); 9180bdffe59SMarcin Wojtas if (err != 0) { 9193fc5d816SMarcin Wojtas ena_log(pdev, ERR, "LRO[%d] Initialization failed!\n", 9203fc5d816SMarcin Wojtas qid); 9219b8d05b8SZbigniew Bodek } else { 9223fc5d816SMarcin Wojtas ena_log(pdev, DBG, "RX Soft LRO[%d] Initialized\n", 9233fc5d816SMarcin Wojtas qid); 9249b8d05b8SZbigniew Bodek rx_ring->lro.ifp = adapter->ifp; 9259b8d05b8SZbigniew Bodek } 9269b8d05b8SZbigniew Bodek } 9279b8d05b8SZbigniew Bodek 9289b8d05b8SZbigniew Bodek return (0); 9299b8d05b8SZbigniew Bodek 9307d2544e6SMarcin Wojtas err_buf_info_unmap: 9319b8d05b8SZbigniew Bodek while (i--) { 9329b8d05b8SZbigniew Bodek bus_dmamap_destroy(adapter->rx_buf_tag, 9339b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info[i].map); 9349b8d05b8SZbigniew Bodek } 9359b8d05b8SZbigniew Bodek 93643fefd16SMarcin Wojtas free(rx_ring->free_rx_ids, M_DEVBUF); 93743fefd16SMarcin Wojtas rx_ring->free_rx_ids = NULL; 938cd5d5804SMarcin Wojtas free(rx_ring->rx_buffer_info, M_DEVBUF); 9399b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info = NULL; 9409b8d05b8SZbigniew Bodek return (ENOMEM); 9419b8d05b8SZbigniew Bodek } 9429b8d05b8SZbigniew Bodek 9439b8d05b8SZbigniew Bodek /** 9449b8d05b8SZbigniew Bodek * ena_free_rx_resources - Free Rx Resources 9459b8d05b8SZbigniew Bodek * @adapter: network interface device structure 9469b8d05b8SZbigniew Bodek * @qid: queue index 9479b8d05b8SZbigniew Bodek * 9489b8d05b8SZbigniew Bodek * Free all receive software resources 9499b8d05b8SZbigniew Bodek **/ 9509b8d05b8SZbigniew Bodek static void 9519b8d05b8SZbigniew Bodek ena_free_rx_resources(struct ena_adapter *adapter, unsigned int qid) 9529b8d05b8SZbigniew Bodek { 9539b8d05b8SZbigniew Bodek struct ena_ring *rx_ring = &adapter->rx_ring[qid]; 9549b8d05b8SZbigniew Bodek 9559b8d05b8SZbigniew Bodek /* Free buffer DMA maps, */ 9569b8d05b8SZbigniew Bodek for (int i = 0; i < rx_ring->ring_size; i++) { 957e8073738SMarcin Wojtas bus_dmamap_sync(adapter->rx_buf_tag, 958e8073738SMarcin Wojtas rx_ring->rx_buffer_info[i].map, BUS_DMASYNC_POSTREAD); 9599b8d05b8SZbigniew Bodek m_freem(rx_ring->rx_buffer_info[i].mbuf); 9609b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info[i].mbuf = NULL; 9619b8d05b8SZbigniew Bodek bus_dmamap_unload(adapter->rx_buf_tag, 9629b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info[i].map); 9639b8d05b8SZbigniew Bodek bus_dmamap_destroy(adapter->rx_buf_tag, 9649b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info[i].map); 9659b8d05b8SZbigniew Bodek } 9669b8d05b8SZbigniew Bodek 9679b8d05b8SZbigniew Bodek /* free LRO resources, */ 9689b8d05b8SZbigniew Bodek tcp_lro_free(&rx_ring->lro); 9699b8d05b8SZbigniew Bodek 9709b8d05b8SZbigniew Bodek /* free allocated memory */ 971cd5d5804SMarcin Wojtas free(rx_ring->rx_buffer_info, M_DEVBUF); 9729b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info = NULL; 9739b8d05b8SZbigniew Bodek 97443fefd16SMarcin Wojtas free(rx_ring->free_rx_ids, M_DEVBUF); 97543fefd16SMarcin Wojtas rx_ring->free_rx_ids = NULL; 9769b8d05b8SZbigniew Bodek } 9779b8d05b8SZbigniew Bodek 9789b8d05b8SZbigniew Bodek /** 9799b8d05b8SZbigniew Bodek * ena_setup_all_rx_resources - allocate all queues Rx resources 9809b8d05b8SZbigniew Bodek * @adapter: network interface device structure 9819b8d05b8SZbigniew Bodek * 9829b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 9839b8d05b8SZbigniew Bodek **/ 9849b8d05b8SZbigniew Bodek static int 9859b8d05b8SZbigniew Bodek ena_setup_all_rx_resources(struct ena_adapter *adapter) 9869b8d05b8SZbigniew Bodek { 9879b8d05b8SZbigniew Bodek int i, rc = 0; 9889b8d05b8SZbigniew Bodek 9897d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 9909b8d05b8SZbigniew Bodek rc = ena_setup_rx_resources(adapter, i); 9910bdffe59SMarcin Wojtas if (rc != 0) { 9923fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 9939b8d05b8SZbigniew Bodek "Allocation for Rx Queue %u failed\n", i); 9949b8d05b8SZbigniew Bodek goto err_setup_rx; 9959b8d05b8SZbigniew Bodek } 9967d2544e6SMarcin Wojtas } 9979b8d05b8SZbigniew Bodek return (0); 9989b8d05b8SZbigniew Bodek 9999b8d05b8SZbigniew Bodek err_setup_rx: 10009b8d05b8SZbigniew Bodek /* rewind the index freeing the rings as we go */ 10019b8d05b8SZbigniew Bodek while (i--) 10029b8d05b8SZbigniew Bodek ena_free_rx_resources(adapter, i); 10039b8d05b8SZbigniew Bodek return (rc); 10049b8d05b8SZbigniew Bodek } 10059b8d05b8SZbigniew Bodek 10069b8d05b8SZbigniew Bodek /** 10079b8d05b8SZbigniew Bodek * ena_free_all_rx_resources - Free Rx resources for all queues 10089b8d05b8SZbigniew Bodek * @adapter: network interface device structure 10099b8d05b8SZbigniew Bodek * 10109b8d05b8SZbigniew Bodek * Free all receive software resources 10119b8d05b8SZbigniew Bodek **/ 10129b8d05b8SZbigniew Bodek static void 10139b8d05b8SZbigniew Bodek ena_free_all_rx_resources(struct ena_adapter *adapter) 10149b8d05b8SZbigniew Bodek { 10159b8d05b8SZbigniew Bodek int i; 10169b8d05b8SZbigniew Bodek 10177d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) 10189b8d05b8SZbigniew Bodek ena_free_rx_resources(adapter, i); 10199b8d05b8SZbigniew Bodek } 10209b8d05b8SZbigniew Bodek 10219b8d05b8SZbigniew Bodek static inline int 102282e558eaSDawid Gorecki ena_alloc_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring, 102382e558eaSDawid Gorecki struct ena_rx_buffer *rx_info) 10249b8d05b8SZbigniew Bodek { 10253fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 10269b8d05b8SZbigniew Bodek struct ena_com_buf *ena_buf; 10279b8d05b8SZbigniew Bodek bus_dma_segment_t segs[1]; 10289b8d05b8SZbigniew Bodek int nsegs, error; 10294727bda6SMarcin Wojtas int mlen; 10309b8d05b8SZbigniew Bodek 10319b8d05b8SZbigniew Bodek /* if previous allocated frag is not used */ 10323f9ed7abSMarcin Wojtas if (unlikely(rx_info->mbuf != NULL)) 10339b8d05b8SZbigniew Bodek return (0); 10349b8d05b8SZbigniew Bodek 10359b8d05b8SZbigniew Bodek /* Get mbuf using UMA allocator */ 103604cf2b88SMarcin Wojtas rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, 103704cf2b88SMarcin Wojtas rx_ring->rx_mbuf_sz); 10389b8d05b8SZbigniew Bodek 10393f9ed7abSMarcin Wojtas if (unlikely(rx_info->mbuf == NULL)) { 10404727bda6SMarcin Wojtas counter_u64_add(rx_ring->rx_stats.mjum_alloc_fail, 1); 10414727bda6SMarcin Wojtas rx_info->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 10424727bda6SMarcin Wojtas if (unlikely(rx_info->mbuf == NULL)) { 10439b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1); 10449b8d05b8SZbigniew Bodek return (ENOMEM); 10459b8d05b8SZbigniew Bodek } 10464727bda6SMarcin Wojtas mlen = MCLBYTES; 10474727bda6SMarcin Wojtas } else { 104804cf2b88SMarcin Wojtas mlen = rx_ring->rx_mbuf_sz; 10494727bda6SMarcin Wojtas } 10509b8d05b8SZbigniew Bodek /* Set mbuf length*/ 10514727bda6SMarcin Wojtas rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = mlen; 10529b8d05b8SZbigniew Bodek 10539b8d05b8SZbigniew Bodek /* Map packets for DMA */ 105482e558eaSDawid Gorecki ena_log(pdev, DBG, 105582e558eaSDawid Gorecki "Using tag %p for buffers' DMA mapping, mbuf %p len: %d\n", 10569b8d05b8SZbigniew Bodek adapter->rx_buf_tag, rx_info->mbuf, rx_info->mbuf->m_len); 10579b8d05b8SZbigniew Bodek error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map, 10589b8d05b8SZbigniew Bodek rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 10593f9ed7abSMarcin Wojtas if (unlikely((error != 0) || (nsegs != 1))) { 10603fc5d816SMarcin Wojtas ena_log(pdev, WARN, 10613fc5d816SMarcin Wojtas "failed to map mbuf, error: %d, nsegs: %d\n", error, nsegs); 10629b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1); 10639b8d05b8SZbigniew Bodek goto exit; 10649b8d05b8SZbigniew Bodek } 10659b8d05b8SZbigniew Bodek 10669b8d05b8SZbigniew Bodek bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD); 10679b8d05b8SZbigniew Bodek 10689b8d05b8SZbigniew Bodek ena_buf = &rx_info->ena_buf; 10699b8d05b8SZbigniew Bodek ena_buf->paddr = segs[0].ds_addr; 10704727bda6SMarcin Wojtas ena_buf->len = mlen; 10719b8d05b8SZbigniew Bodek 107282e558eaSDawid Gorecki ena_log(pdev, DBG, 107382e558eaSDawid Gorecki "ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n", 10749b8d05b8SZbigniew Bodek rx_info->mbuf, rx_info, ena_buf->len, (uintmax_t)ena_buf->paddr); 10759b8d05b8SZbigniew Bodek 10769b8d05b8SZbigniew Bodek return (0); 10779b8d05b8SZbigniew Bodek 10789b8d05b8SZbigniew Bodek exit: 10799b8d05b8SZbigniew Bodek m_freem(rx_info->mbuf); 10809b8d05b8SZbigniew Bodek rx_info->mbuf = NULL; 10819b8d05b8SZbigniew Bodek return (EFAULT); 10829b8d05b8SZbigniew Bodek } 10839b8d05b8SZbigniew Bodek 10849b8d05b8SZbigniew Bodek static void 10859b8d05b8SZbigniew Bodek ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring, 10869b8d05b8SZbigniew Bodek struct ena_rx_buffer *rx_info) 10879b8d05b8SZbigniew Bodek { 10884e8acd84SMarcin Wojtas if (rx_info->mbuf == NULL) { 10893fc5d816SMarcin Wojtas ena_log(adapter->pdev, WARN, 10903fc5d816SMarcin Wojtas "Trying to free unallocated buffer\n"); 10919b8d05b8SZbigniew Bodek return; 10924e8acd84SMarcin Wojtas } 10939b8d05b8SZbigniew Bodek 1094e8073738SMarcin Wojtas bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, 1095e8073738SMarcin Wojtas BUS_DMASYNC_POSTREAD); 10969b8d05b8SZbigniew Bodek bus_dmamap_unload(adapter->rx_buf_tag, rx_info->map); 10979b8d05b8SZbigniew Bodek m_freem(rx_info->mbuf); 10989b8d05b8SZbigniew Bodek rx_info->mbuf = NULL; 10999b8d05b8SZbigniew Bodek } 11009b8d05b8SZbigniew Bodek 11019b8d05b8SZbigniew Bodek /** 11029b8d05b8SZbigniew Bodek * ena_refill_rx_bufs - Refills ring with descriptors 11039b8d05b8SZbigniew Bodek * @rx_ring: the ring which we want to feed with free descriptors 11049b8d05b8SZbigniew Bodek * @num: number of descriptors to refill 11059b8d05b8SZbigniew Bodek * Refills the ring with newly allocated DMA-mapped mbufs for receiving 11069b8d05b8SZbigniew Bodek **/ 110738c7b965SMarcin Wojtas int 11089b8d05b8SZbigniew Bodek ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num) 11099b8d05b8SZbigniew Bodek { 11109b8d05b8SZbigniew Bodek struct ena_adapter *adapter = rx_ring->adapter; 11113fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 111243fefd16SMarcin Wojtas uint16_t next_to_use, req_id; 11139b8d05b8SZbigniew Bodek uint32_t i; 11149b8d05b8SZbigniew Bodek int rc; 11159b8d05b8SZbigniew Bodek 11163fc5d816SMarcin Wojtas ena_log_io(adapter->pdev, DBG, "refill qid: %d\n", rx_ring->qid); 11179b8d05b8SZbigniew Bodek 11189b8d05b8SZbigniew Bodek next_to_use = rx_ring->next_to_use; 11199b8d05b8SZbigniew Bodek 11209b8d05b8SZbigniew Bodek for (i = 0; i < num; i++) { 112143fefd16SMarcin Wojtas struct ena_rx_buffer *rx_info; 112243fefd16SMarcin Wojtas 11233fc5d816SMarcin Wojtas ena_log_io(pdev, DBG, "RX buffer - next to use: %d\n", 11243fc5d816SMarcin Wojtas next_to_use); 11259b8d05b8SZbigniew Bodek 112643fefd16SMarcin Wojtas req_id = rx_ring->free_rx_ids[next_to_use]; 112743fefd16SMarcin Wojtas rx_info = &rx_ring->rx_buffer_info[req_id]; 11289a0f2079SMarcin Wojtas #ifdef DEV_NETMAP 1129358bcc4cSMarcin Wojtas if (ena_rx_ring_in_netmap(adapter, rx_ring->qid)) 113082e558eaSDawid Gorecki rc = ena_netmap_alloc_rx_slot(adapter, rx_ring, 113182e558eaSDawid Gorecki rx_info); 11329a0f2079SMarcin Wojtas else 11339a0f2079SMarcin Wojtas #endif /* DEV_NETMAP */ 11349b8d05b8SZbigniew Bodek rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info); 11353f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 11363fc5d816SMarcin Wojtas ena_log_io(pdev, WARN, 11374e8acd84SMarcin Wojtas "failed to alloc buffer for rx queue %d\n", 11384e8acd84SMarcin Wojtas rx_ring->qid); 11399b8d05b8SZbigniew Bodek break; 11409b8d05b8SZbigniew Bodek } 11419b8d05b8SZbigniew Bodek rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, 114243fefd16SMarcin Wojtas &rx_info->ena_buf, req_id); 11430bdffe59SMarcin Wojtas if (unlikely(rc != 0)) { 11443fc5d816SMarcin Wojtas ena_log_io(pdev, WARN, 11459b8d05b8SZbigniew Bodek "failed to add buffer for rx queue %d\n", 11469b8d05b8SZbigniew Bodek rx_ring->qid); 11479b8d05b8SZbigniew Bodek break; 11489b8d05b8SZbigniew Bodek } 11499b8d05b8SZbigniew Bodek next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, 11509b8d05b8SZbigniew Bodek rx_ring->ring_size); 11519b8d05b8SZbigniew Bodek } 11529b8d05b8SZbigniew Bodek 11533f9ed7abSMarcin Wojtas if (unlikely(i < num)) { 11549b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.refil_partial, 1); 11553fc5d816SMarcin Wojtas ena_log_io(pdev, WARN, 11564e8acd84SMarcin Wojtas "refilled rx qid %d with only %d mbufs (from %d)\n", 11574e8acd84SMarcin Wojtas rx_ring->qid, i, num); 11589b8d05b8SZbigniew Bodek } 11599b8d05b8SZbigniew Bodek 11608483b844SMarcin Wojtas if (likely(i != 0)) 11619b8d05b8SZbigniew Bodek ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); 11628483b844SMarcin Wojtas 11639b8d05b8SZbigniew Bodek rx_ring->next_to_use = next_to_use; 11649b8d05b8SZbigniew Bodek return (i); 11659b8d05b8SZbigniew Bodek } 11669b8d05b8SZbigniew Bodek 1167f9c9c01dSOsama Abboud #ifdef DEV_NETMAP 1168f9c9c01dSOsama Abboud static int 1169f9c9c01dSOsama Abboud ena_reinit_netmap(struct ena_adapter *adapter) 1170f9c9c01dSOsama Abboud { 1171f9c9c01dSOsama Abboud int rc; 1172f9c9c01dSOsama Abboud 1173f9c9c01dSOsama Abboud netmap_detach(adapter->ifp); 1174f9c9c01dSOsama Abboud rc = ena_netmap_attach(adapter); 1175f9c9c01dSOsama Abboud if (rc != 0) 1176f9c9c01dSOsama Abboud ena_log(adapter->pdev, ERR, "netmap attach failed: %d\n", rc); 1177f9c9c01dSOsama Abboud 1178f9c9c01dSOsama Abboud return rc; 1179f9c9c01dSOsama Abboud } 1180f9c9c01dSOsama Abboud 1181f9c9c01dSOsama Abboud #endif /* DEV_NETMAP */ 11827d8c4feeSMarcin Wojtas int 118321823546SMarcin Wojtas ena_update_buf_ring_size(struct ena_adapter *adapter, 118421823546SMarcin Wojtas uint32_t new_buf_ring_size) 118521823546SMarcin Wojtas { 118621823546SMarcin Wojtas uint32_t old_buf_ring_size; 118721823546SMarcin Wojtas int rc = 0; 118821823546SMarcin Wojtas bool dev_was_up; 118921823546SMarcin Wojtas 119021823546SMarcin Wojtas old_buf_ring_size = adapter->buf_ring_size; 119121823546SMarcin Wojtas adapter->buf_ring_size = new_buf_ring_size; 119221823546SMarcin Wojtas 119321823546SMarcin Wojtas dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); 119421823546SMarcin Wojtas ena_down(adapter); 119521823546SMarcin Wojtas 119621823546SMarcin Wojtas /* Reconfigure buf ring for all Tx rings. */ 119721823546SMarcin Wojtas ena_free_all_io_rings_resources(adapter); 119821823546SMarcin Wojtas ena_init_io_rings_advanced(adapter); 1199f9c9c01dSOsama Abboud #ifdef DEV_NETMAP 1200f9c9c01dSOsama Abboud rc = ena_reinit_netmap(adapter); 1201f9c9c01dSOsama Abboud if (rc != 0) 1202f9c9c01dSOsama Abboud return rc; 1203f9c9c01dSOsama Abboud 1204f9c9c01dSOsama Abboud #endif /* DEV_NETMAP */ 120521823546SMarcin Wojtas if (dev_was_up) { 120621823546SMarcin Wojtas /* 120721823546SMarcin Wojtas * If ena_up() fails, it's not because of recent buf_ring size 120821823546SMarcin Wojtas * changes. Because of that, we just want to revert old drbr 120921823546SMarcin Wojtas * value and trigger the reset because something else had to 121021823546SMarcin Wojtas * go wrong. 121121823546SMarcin Wojtas */ 121221823546SMarcin Wojtas rc = ena_up(adapter); 121321823546SMarcin Wojtas if (unlikely(rc != 0)) { 12143fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 121521823546SMarcin Wojtas "Failed to configure device after setting new drbr size: %u. Reverting old value: %u and triggering the reset\n", 121621823546SMarcin Wojtas new_buf_ring_size, old_buf_ring_size); 121721823546SMarcin Wojtas 121821823546SMarcin Wojtas /* Revert old size and trigger the reset */ 121921823546SMarcin Wojtas adapter->buf_ring_size = old_buf_ring_size; 122021823546SMarcin Wojtas ena_free_all_io_rings_resources(adapter); 122121823546SMarcin Wojtas ena_init_io_rings_advanced(adapter); 1222f9c9c01dSOsama Abboud #ifdef DEV_NETMAP 1223f9c9c01dSOsama Abboud rc = ena_reinit_netmap(adapter); 1224f9c9c01dSOsama Abboud if (rc != 0) 1225f9c9c01dSOsama Abboud return rc; 122621823546SMarcin Wojtas 1227f9c9c01dSOsama Abboud #endif /* DEV_NETMAP */ 122821823546SMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, 122921823546SMarcin Wojtas adapter); 123021823546SMarcin Wojtas ena_trigger_reset(adapter, ENA_REGS_RESET_OS_TRIGGER); 123121823546SMarcin Wojtas } 123221823546SMarcin Wojtas } 123321823546SMarcin Wojtas 123421823546SMarcin Wojtas return (rc); 123521823546SMarcin Wojtas } 123621823546SMarcin Wojtas 123721823546SMarcin Wojtas int 12387d8c4feeSMarcin Wojtas ena_update_queue_size(struct ena_adapter *adapter, uint32_t new_tx_size, 12397d8c4feeSMarcin Wojtas uint32_t new_rx_size) 12407d8c4feeSMarcin Wojtas { 12417d8c4feeSMarcin Wojtas uint32_t old_tx_size, old_rx_size; 12427d8c4feeSMarcin Wojtas int rc = 0; 12437d8c4feeSMarcin Wojtas bool dev_was_up; 12447d8c4feeSMarcin Wojtas 12459762a033SMarcin Wojtas old_tx_size = adapter->requested_tx_ring_size; 12469762a033SMarcin Wojtas old_rx_size = adapter->requested_rx_ring_size; 12479762a033SMarcin Wojtas adapter->requested_tx_ring_size = new_tx_size; 12489762a033SMarcin Wojtas adapter->requested_rx_ring_size = new_rx_size; 12497d8c4feeSMarcin Wojtas 12507d8c4feeSMarcin Wojtas dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); 12517d8c4feeSMarcin Wojtas ena_down(adapter); 12527d8c4feeSMarcin Wojtas 12537d8c4feeSMarcin Wojtas /* Configure queues with new size. */ 12547d8c4feeSMarcin Wojtas ena_init_io_rings_basic(adapter); 1255f9c9c01dSOsama Abboud #ifdef DEV_NETMAP 1256f9c9c01dSOsama Abboud rc = ena_reinit_netmap(adapter); 1257f9c9c01dSOsama Abboud if (rc != 0) 1258f9c9c01dSOsama Abboud return rc; 1259f9c9c01dSOsama Abboud 1260f9c9c01dSOsama Abboud #endif /* DEV_NETMAP */ 12617d8c4feeSMarcin Wojtas if (dev_was_up) { 12627d8c4feeSMarcin Wojtas rc = ena_up(adapter); 12637d8c4feeSMarcin Wojtas if (unlikely(rc != 0)) { 12643fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 12657d8c4feeSMarcin Wojtas "Failed to configure device with the new sizes - Tx: %u Rx: %u. Reverting old values - Tx: %u Rx: %u\n", 12667d8c4feeSMarcin Wojtas new_tx_size, new_rx_size, old_tx_size, old_rx_size); 12677d8c4feeSMarcin Wojtas 12687d8c4feeSMarcin Wojtas /* Revert old size. */ 12699762a033SMarcin Wojtas adapter->requested_tx_ring_size = old_tx_size; 12709762a033SMarcin Wojtas adapter->requested_rx_ring_size = old_rx_size; 12717d8c4feeSMarcin Wojtas ena_init_io_rings_basic(adapter); 1272f9c9c01dSOsama Abboud #ifdef DEV_NETMAP 1273f9c9c01dSOsama Abboud rc = ena_reinit_netmap(adapter); 1274f9c9c01dSOsama Abboud if (rc != 0) 1275f9c9c01dSOsama Abboud return rc; 12767d8c4feeSMarcin Wojtas 1277f9c9c01dSOsama Abboud #endif /* DEV_NETMAP */ 12787d8c4feeSMarcin Wojtas /* And try again. */ 12797d8c4feeSMarcin Wojtas rc = ena_up(adapter); 12807d8c4feeSMarcin Wojtas if (unlikely(rc != 0)) { 12813fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 12827d8c4feeSMarcin Wojtas "Failed to revert old queue sizes. Triggering device reset.\n"); 12837d8c4feeSMarcin Wojtas /* 12847d8c4feeSMarcin Wojtas * If we've failed again, something had to go 12857d8c4feeSMarcin Wojtas * wrong. After reset, the device should try to 12867d8c4feeSMarcin Wojtas * go up 12877d8c4feeSMarcin Wojtas */ 12887d8c4feeSMarcin Wojtas ENA_FLAG_SET_ATOMIC( 12897d8c4feeSMarcin Wojtas ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); 12907d8c4feeSMarcin Wojtas ena_trigger_reset(adapter, 12917d8c4feeSMarcin Wojtas ENA_REGS_RESET_OS_TRIGGER); 12927d8c4feeSMarcin Wojtas } 12937d8c4feeSMarcin Wojtas } 12947d8c4feeSMarcin Wojtas } 12957d8c4feeSMarcin Wojtas 12967d8c4feeSMarcin Wojtas return (rc); 12977d8c4feeSMarcin Wojtas } 12987d8c4feeSMarcin Wojtas 12999b8d05b8SZbigniew Bodek static void 130056d41ad5SMarcin Wojtas ena_update_io_rings(struct ena_adapter *adapter, uint32_t num) 130156d41ad5SMarcin Wojtas { 130256d41ad5SMarcin Wojtas ena_free_all_io_rings_resources(adapter); 130356d41ad5SMarcin Wojtas /* Force indirection table to be reinitialized */ 130456d41ad5SMarcin Wojtas ena_com_rss_destroy(adapter->ena_dev); 130556d41ad5SMarcin Wojtas 130656d41ad5SMarcin Wojtas adapter->num_io_queues = num; 130756d41ad5SMarcin Wojtas ena_init_io_rings(adapter); 130856d41ad5SMarcin Wojtas } 130956d41ad5SMarcin Wojtas 1310f9e1d947SOsama Abboud int 1311f9e1d947SOsama Abboud ena_update_base_cpu(struct ena_adapter *adapter, int new_num) 1312f9e1d947SOsama Abboud { 1313f9e1d947SOsama Abboud int old_num; 1314f9e1d947SOsama Abboud int rc = 0; 1315f9e1d947SOsama Abboud bool dev_was_up; 1316f9e1d947SOsama Abboud 1317f9e1d947SOsama Abboud dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); 1318f9e1d947SOsama Abboud old_num = adapter->irq_cpu_base; 1319f9e1d947SOsama Abboud 1320f9e1d947SOsama Abboud ena_down(adapter); 1321f9e1d947SOsama Abboud 1322f9e1d947SOsama Abboud adapter->irq_cpu_base = new_num; 1323f9e1d947SOsama Abboud 1324f9e1d947SOsama Abboud if (dev_was_up) { 1325f9e1d947SOsama Abboud rc = ena_up(adapter); 1326f9e1d947SOsama Abboud if (unlikely(rc != 0)) { 1327f9e1d947SOsama Abboud ena_log(adapter->pdev, ERR, 1328f9e1d947SOsama Abboud "Failed to configure device %d IRQ base CPU. " 1329f9e1d947SOsama Abboud "Reverting to previous value: %d\n", 1330f9e1d947SOsama Abboud new_num, old_num); 1331f9e1d947SOsama Abboud 1332f9e1d947SOsama Abboud adapter->irq_cpu_base = old_num; 1333f9e1d947SOsama Abboud 1334f9e1d947SOsama Abboud rc = ena_up(adapter); 1335f9e1d947SOsama Abboud if (unlikely(rc != 0)) { 1336f9e1d947SOsama Abboud ena_log(adapter->pdev, ERR, 1337f9e1d947SOsama Abboud "Failed to revert to previous setup." 1338f9e1d947SOsama Abboud "Triggering device reset.\n"); 1339f9e1d947SOsama Abboud ENA_FLAG_SET_ATOMIC( 1340f9e1d947SOsama Abboud ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); 1341f9e1d947SOsama Abboud ena_trigger_reset(adapter, 1342f9e1d947SOsama Abboud ENA_REGS_RESET_OS_TRIGGER); 1343f9e1d947SOsama Abboud } 1344f9e1d947SOsama Abboud } 1345f9e1d947SOsama Abboud } 1346f9e1d947SOsama Abboud return (rc); 1347f9e1d947SOsama Abboud } 1348f9e1d947SOsama Abboud 1349f9e1d947SOsama Abboud int 1350f9e1d947SOsama Abboud ena_update_cpu_stride(struct ena_adapter *adapter, uint32_t new_num) 1351f9e1d947SOsama Abboud { 1352f9e1d947SOsama Abboud uint32_t old_num; 1353f9e1d947SOsama Abboud int rc = 0; 1354f9e1d947SOsama Abboud bool dev_was_up; 1355f9e1d947SOsama Abboud 1356f9e1d947SOsama Abboud dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); 1357f9e1d947SOsama Abboud old_num = adapter->irq_cpu_stride; 1358f9e1d947SOsama Abboud 1359f9e1d947SOsama Abboud ena_down(adapter); 1360f9e1d947SOsama Abboud 1361f9e1d947SOsama Abboud adapter->irq_cpu_stride = new_num; 1362f9e1d947SOsama Abboud 1363f9e1d947SOsama Abboud if (dev_was_up) { 1364f9e1d947SOsama Abboud rc = ena_up(adapter); 1365f9e1d947SOsama Abboud if (unlikely(rc != 0)) { 1366f9e1d947SOsama Abboud ena_log(adapter->pdev, ERR, 1367f9e1d947SOsama Abboud "Failed to configure device %d IRQ CPU stride. " 1368f9e1d947SOsama Abboud "Reverting to previous value: %d\n", 1369f9e1d947SOsama Abboud new_num, old_num); 1370f9e1d947SOsama Abboud 1371f9e1d947SOsama Abboud adapter->irq_cpu_stride = old_num; 1372f9e1d947SOsama Abboud 1373f9e1d947SOsama Abboud rc = ena_up(adapter); 1374f9e1d947SOsama Abboud if (unlikely(rc != 0)) { 1375f9e1d947SOsama Abboud ena_log(adapter->pdev, ERR, 1376f9e1d947SOsama Abboud "Failed to revert to previous setup." 1377f9e1d947SOsama Abboud "Triggering device reset.\n"); 1378f9e1d947SOsama Abboud ENA_FLAG_SET_ATOMIC( 1379f9e1d947SOsama Abboud ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); 1380f9e1d947SOsama Abboud ena_trigger_reset(adapter, 1381f9e1d947SOsama Abboud ENA_REGS_RESET_OS_TRIGGER); 1382f9e1d947SOsama Abboud } 1383f9e1d947SOsama Abboud } 1384f9e1d947SOsama Abboud } 1385f9e1d947SOsama Abboud return (rc); 1386f9e1d947SOsama Abboud } 1387f9e1d947SOsama Abboud 138856d41ad5SMarcin Wojtas /* Caller should sanitize new_num */ 138956d41ad5SMarcin Wojtas int 139056d41ad5SMarcin Wojtas ena_update_io_queue_nb(struct ena_adapter *adapter, uint32_t new_num) 139156d41ad5SMarcin Wojtas { 139256d41ad5SMarcin Wojtas uint32_t old_num; 139356d41ad5SMarcin Wojtas int rc = 0; 139456d41ad5SMarcin Wojtas bool dev_was_up; 139556d41ad5SMarcin Wojtas 139656d41ad5SMarcin Wojtas dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); 139756d41ad5SMarcin Wojtas old_num = adapter->num_io_queues; 139856d41ad5SMarcin Wojtas ena_down(adapter); 139956d41ad5SMarcin Wojtas 140056d41ad5SMarcin Wojtas ena_update_io_rings(adapter, new_num); 1401f9c9c01dSOsama Abboud #ifdef DEV_NETMAP 1402f9c9c01dSOsama Abboud rc = ena_reinit_netmap(adapter); 1403f9c9c01dSOsama Abboud if (rc != 0) 1404f9c9c01dSOsama Abboud return rc; 140556d41ad5SMarcin Wojtas 1406f9c9c01dSOsama Abboud #endif /* DEV_NETMAP */ 140756d41ad5SMarcin Wojtas if (dev_was_up) { 140856d41ad5SMarcin Wojtas rc = ena_up(adapter); 140956d41ad5SMarcin Wojtas if (unlikely(rc != 0)) { 14103fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 141156d41ad5SMarcin Wojtas "Failed to configure device with %u IO queues. " 141256d41ad5SMarcin Wojtas "Reverting to previous value: %u\n", 141356d41ad5SMarcin Wojtas new_num, old_num); 141456d41ad5SMarcin Wojtas 141556d41ad5SMarcin Wojtas ena_update_io_rings(adapter, old_num); 1416f9c9c01dSOsama Abboud #ifdef DEV_NETMAP 1417f9c9c01dSOsama Abboud rc = ena_reinit_netmap(adapter); 1418f9c9c01dSOsama Abboud if (rc != 0) 1419f9c9c01dSOsama Abboud return rc; 142056d41ad5SMarcin Wojtas 1421f9c9c01dSOsama Abboud #endif /* DEV_NETMAP */ 142256d41ad5SMarcin Wojtas rc = ena_up(adapter); 142356d41ad5SMarcin Wojtas if (unlikely(rc != 0)) { 14243fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 142556d41ad5SMarcin Wojtas "Failed to revert to previous setup IO " 142656d41ad5SMarcin Wojtas "queues. Triggering device reset.\n"); 142756d41ad5SMarcin Wojtas ENA_FLAG_SET_ATOMIC( 142856d41ad5SMarcin Wojtas ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); 142956d41ad5SMarcin Wojtas ena_trigger_reset(adapter, 143056d41ad5SMarcin Wojtas ENA_REGS_RESET_OS_TRIGGER); 143156d41ad5SMarcin Wojtas } 143256d41ad5SMarcin Wojtas } 143356d41ad5SMarcin Wojtas } 143456d41ad5SMarcin Wojtas 143556d41ad5SMarcin Wojtas return (rc); 143656d41ad5SMarcin Wojtas } 143756d41ad5SMarcin Wojtas 143856d41ad5SMarcin Wojtas static void 14399b8d05b8SZbigniew Bodek ena_free_rx_bufs(struct ena_adapter *adapter, unsigned int qid) 14409b8d05b8SZbigniew Bodek { 14419b8d05b8SZbigniew Bodek struct ena_ring *rx_ring = &adapter->rx_ring[qid]; 14429b8d05b8SZbigniew Bodek unsigned int i; 14439b8d05b8SZbigniew Bodek 14449b8d05b8SZbigniew Bodek for (i = 0; i < rx_ring->ring_size; i++) { 14459b8d05b8SZbigniew Bodek struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; 14469b8d05b8SZbigniew Bodek 14470bdffe59SMarcin Wojtas if (rx_info->mbuf != NULL) 14489b8d05b8SZbigniew Bodek ena_free_rx_mbuf(adapter, rx_ring, rx_info); 14499a0f2079SMarcin Wojtas #ifdef DEV_NETMAP 14509a0f2079SMarcin Wojtas if (((if_getflags(adapter->ifp) & IFF_DYING) == 0) && 14517583c633SJustin Hibbits (if_getcapenable(adapter->ifp) & IFCAP_NETMAP)) { 14529a0f2079SMarcin Wojtas if (rx_info->netmap_buf_idx != 0) 14539a0f2079SMarcin Wojtas ena_netmap_free_rx_slot(adapter, rx_ring, 14549a0f2079SMarcin Wojtas rx_info); 14559a0f2079SMarcin Wojtas } 14569a0f2079SMarcin Wojtas #endif /* DEV_NETMAP */ 14579b8d05b8SZbigniew Bodek } 14589b8d05b8SZbigniew Bodek } 14599b8d05b8SZbigniew Bodek 14609b8d05b8SZbigniew Bodek /** 14619b8d05b8SZbigniew Bodek * ena_refill_all_rx_bufs - allocate all queues Rx buffers 14629b8d05b8SZbigniew Bodek * @adapter: network interface device structure 14639b8d05b8SZbigniew Bodek * 14649b8d05b8SZbigniew Bodek */ 14659b8d05b8SZbigniew Bodek static void 14669b8d05b8SZbigniew Bodek ena_refill_all_rx_bufs(struct ena_adapter *adapter) 14679b8d05b8SZbigniew Bodek { 14689b8d05b8SZbigniew Bodek struct ena_ring *rx_ring; 14699b8d05b8SZbigniew Bodek int i, rc, bufs_num; 14709b8d05b8SZbigniew Bodek 14717d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 14729b8d05b8SZbigniew Bodek rx_ring = &adapter->rx_ring[i]; 14739b8d05b8SZbigniew Bodek bufs_num = rx_ring->ring_size - 1; 14749b8d05b8SZbigniew Bodek rc = ena_refill_rx_bufs(rx_ring, bufs_num); 14759b8d05b8SZbigniew Bodek if (unlikely(rc != bufs_num)) 14763fc5d816SMarcin Wojtas ena_log_io(adapter->pdev, WARN, 14773fc5d816SMarcin Wojtas "refilling Queue %d failed. " 147882e558eaSDawid Gorecki "Allocated %d buffers from: %d\n", 147982e558eaSDawid Gorecki i, rc, bufs_num); 14809a0f2079SMarcin Wojtas #ifdef DEV_NETMAP 14819a0f2079SMarcin Wojtas rx_ring->initialized = true; 14829a0f2079SMarcin Wojtas #endif /* DEV_NETMAP */ 14839b8d05b8SZbigniew Bodek } 14849b8d05b8SZbigniew Bodek } 14859b8d05b8SZbigniew Bodek 14869b8d05b8SZbigniew Bodek static void 14879b8d05b8SZbigniew Bodek ena_free_all_rx_bufs(struct ena_adapter *adapter) 14889b8d05b8SZbigniew Bodek { 14899b8d05b8SZbigniew Bodek int i; 14909b8d05b8SZbigniew Bodek 14917d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) 14929b8d05b8SZbigniew Bodek ena_free_rx_bufs(adapter, i); 14939b8d05b8SZbigniew Bodek } 14949b8d05b8SZbigniew Bodek 14959b8d05b8SZbigniew Bodek /** 14969b8d05b8SZbigniew Bodek * ena_free_tx_bufs - Free Tx Buffers per Queue 14979b8d05b8SZbigniew Bodek * @adapter: network interface device structure 14989b8d05b8SZbigniew Bodek * @qid: queue index 14999b8d05b8SZbigniew Bodek **/ 15009b8d05b8SZbigniew Bodek static void 15019b8d05b8SZbigniew Bodek ena_free_tx_bufs(struct ena_adapter *adapter, unsigned int qid) 15029b8d05b8SZbigniew Bodek { 15034e8acd84SMarcin Wojtas bool print_once = true; 15049b8d05b8SZbigniew Bodek struct ena_ring *tx_ring = &adapter->tx_ring[qid]; 15059b8d05b8SZbigniew Bodek 1506416e8864SZbigniew Bodek ENA_RING_MTX_LOCK(tx_ring); 15079b8d05b8SZbigniew Bodek for (int i = 0; i < tx_ring->ring_size; i++) { 15089b8d05b8SZbigniew Bodek struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; 15099b8d05b8SZbigniew Bodek 15109b8d05b8SZbigniew Bodek if (tx_info->mbuf == NULL) 15119b8d05b8SZbigniew Bodek continue; 15129b8d05b8SZbigniew Bodek 15134e8acd84SMarcin Wojtas if (print_once) { 15143fc5d816SMarcin Wojtas ena_log(adapter->pdev, WARN, 151582e558eaSDawid Gorecki "free uncompleted tx mbuf qid %d idx 0x%x\n", qid, 151682e558eaSDawid Gorecki i); 15174e8acd84SMarcin Wojtas print_once = false; 15184e8acd84SMarcin Wojtas } else { 15193fc5d816SMarcin Wojtas ena_log(adapter->pdev, DBG, 152082e558eaSDawid Gorecki "free uncompleted tx mbuf qid %d idx 0x%x\n", qid, 152182e558eaSDawid Gorecki i); 15224e8acd84SMarcin Wojtas } 15239b8d05b8SZbigniew Bodek 1524888810f0SMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap, 1525e8073738SMarcin Wojtas BUS_DMASYNC_POSTWRITE); 1526888810f0SMarcin Wojtas bus_dmamap_unload(adapter->tx_buf_tag, tx_info->dmamap); 15274fa9e02dSMarcin Wojtas 15289b8d05b8SZbigniew Bodek m_free(tx_info->mbuf); 15299b8d05b8SZbigniew Bodek tx_info->mbuf = NULL; 15309b8d05b8SZbigniew Bodek } 1531416e8864SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 15329b8d05b8SZbigniew Bodek } 15339b8d05b8SZbigniew Bodek 15349b8d05b8SZbigniew Bodek static void 15359b8d05b8SZbigniew Bodek ena_free_all_tx_bufs(struct ena_adapter *adapter) 15369b8d05b8SZbigniew Bodek { 15377d8c4feeSMarcin Wojtas for (int i = 0; i < adapter->num_io_queues; i++) 15389b8d05b8SZbigniew Bodek ena_free_tx_bufs(adapter, i); 15399b8d05b8SZbigniew Bodek } 15409b8d05b8SZbigniew Bodek 15419b8d05b8SZbigniew Bodek static void 15429b8d05b8SZbigniew Bodek ena_destroy_all_tx_queues(struct ena_adapter *adapter) 15439b8d05b8SZbigniew Bodek { 15449b8d05b8SZbigniew Bodek uint16_t ena_qid; 15459b8d05b8SZbigniew Bodek int i; 15469b8d05b8SZbigniew Bodek 15477d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 15489b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(i); 15499b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); 15509b8d05b8SZbigniew Bodek } 15519b8d05b8SZbigniew Bodek } 15529b8d05b8SZbigniew Bodek 15539b8d05b8SZbigniew Bodek static void 15549b8d05b8SZbigniew Bodek ena_destroy_all_rx_queues(struct ena_adapter *adapter) 15559b8d05b8SZbigniew Bodek { 15569b8d05b8SZbigniew Bodek uint16_t ena_qid; 15579b8d05b8SZbigniew Bodek int i; 15589b8d05b8SZbigniew Bodek 15597d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 15609b8d05b8SZbigniew Bodek ena_qid = ENA_IO_RXQ_IDX(i); 15619b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); 15629b8d05b8SZbigniew Bodek } 15639b8d05b8SZbigniew Bodek } 15649b8d05b8SZbigniew Bodek 15659b8d05b8SZbigniew Bodek static void 15669b8d05b8SZbigniew Bodek ena_destroy_all_io_queues(struct ena_adapter *adapter) 15679b8d05b8SZbigniew Bodek { 15685cb9db07SMarcin Wojtas struct ena_que *queue; 15695cb9db07SMarcin Wojtas int i; 15705cb9db07SMarcin Wojtas 15717d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 15725cb9db07SMarcin Wojtas queue = &adapter->que[i]; 157382e558eaSDawid Gorecki while (taskqueue_cancel(queue->cleanup_tq, &queue->cleanup_task, NULL)) 157482e558eaSDawid Gorecki taskqueue_drain(queue->cleanup_tq, &queue->cleanup_task); 15755cb9db07SMarcin Wojtas taskqueue_free(queue->cleanup_tq); 15765cb9db07SMarcin Wojtas } 15775cb9db07SMarcin Wojtas 15789b8d05b8SZbigniew Bodek ena_destroy_all_tx_queues(adapter); 15799b8d05b8SZbigniew Bodek ena_destroy_all_rx_queues(adapter); 15809b8d05b8SZbigniew Bodek } 15819b8d05b8SZbigniew Bodek 15829b8d05b8SZbigniew Bodek static int 15839b8d05b8SZbigniew Bodek ena_create_io_queues(struct ena_adapter *adapter) 15849b8d05b8SZbigniew Bodek { 15859b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 15869b8d05b8SZbigniew Bodek struct ena_com_create_io_ctx ctx; 15879b8d05b8SZbigniew Bodek struct ena_ring *ring; 15885cb9db07SMarcin Wojtas struct ena_que *queue; 15899b8d05b8SZbigniew Bodek uint16_t ena_qid; 15909b8d05b8SZbigniew Bodek uint32_t msix_vector; 15916d1ef2abSArtur Rojek cpuset_t *cpu_mask = NULL; 15929b8d05b8SZbigniew Bodek int rc, i; 15939b8d05b8SZbigniew Bodek 15949b8d05b8SZbigniew Bodek /* Create TX queues */ 15957d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 15969b8d05b8SZbigniew Bodek msix_vector = ENA_IO_IRQ_IDX(i); 15979b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(i); 15989b8d05b8SZbigniew Bodek ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 15999b8d05b8SZbigniew Bodek ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 16009762a033SMarcin Wojtas ctx.queue_size = adapter->requested_tx_ring_size; 16019b8d05b8SZbigniew Bodek ctx.msix_vector = msix_vector; 16029b8d05b8SZbigniew Bodek ctx.qid = ena_qid; 1603eb4c4f4aSMarcin Wojtas ctx.numa_node = adapter->que[i].domain; 1604eb4c4f4aSMarcin Wojtas 16059b8d05b8SZbigniew Bodek rc = ena_com_create_io_queue(ena_dev, &ctx); 16060bdffe59SMarcin Wojtas if (rc != 0) { 16073fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 16089b8d05b8SZbigniew Bodek "Failed to create io TX queue #%d rc: %d\n", i, rc); 16099b8d05b8SZbigniew Bodek goto err_tx; 16109b8d05b8SZbigniew Bodek } 16119b8d05b8SZbigniew Bodek ring = &adapter->tx_ring[i]; 16129b8d05b8SZbigniew Bodek rc = ena_com_get_io_handlers(ena_dev, ena_qid, 161382e558eaSDawid Gorecki &ring->ena_com_io_sq, &ring->ena_com_io_cq); 16140bdffe59SMarcin Wojtas if (rc != 0) { 16153fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 16169b8d05b8SZbigniew Bodek "Failed to get TX queue handlers. TX queue num" 161782e558eaSDawid Gorecki " %d rc: %d\n", 161882e558eaSDawid Gorecki i, rc); 16199b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(ena_dev, ena_qid); 16209b8d05b8SZbigniew Bodek goto err_tx; 16219b8d05b8SZbigniew Bodek } 1622eb4c4f4aSMarcin Wojtas 1623eb4c4f4aSMarcin Wojtas if (ctx.numa_node >= 0) { 1624eb4c4f4aSMarcin Wojtas ena_com_update_numa_node(ring->ena_com_io_cq, 1625eb4c4f4aSMarcin Wojtas ctx.numa_node); 1626eb4c4f4aSMarcin Wojtas } 16279b8d05b8SZbigniew Bodek } 16289b8d05b8SZbigniew Bodek 16299b8d05b8SZbigniew Bodek /* Create RX queues */ 16307d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 16319b8d05b8SZbigniew Bodek msix_vector = ENA_IO_IRQ_IDX(i); 16329b8d05b8SZbigniew Bodek ena_qid = ENA_IO_RXQ_IDX(i); 16339b8d05b8SZbigniew Bodek ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 16349b8d05b8SZbigniew Bodek ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 16359762a033SMarcin Wojtas ctx.queue_size = adapter->requested_rx_ring_size; 16369b8d05b8SZbigniew Bodek ctx.msix_vector = msix_vector; 16379b8d05b8SZbigniew Bodek ctx.qid = ena_qid; 1638eb4c4f4aSMarcin Wojtas ctx.numa_node = adapter->que[i].domain; 1639eb4c4f4aSMarcin Wojtas 16409b8d05b8SZbigniew Bodek rc = ena_com_create_io_queue(ena_dev, &ctx); 16413f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 16423fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 16439b8d05b8SZbigniew Bodek "Failed to create io RX queue[%d] rc: %d\n", i, rc); 16449b8d05b8SZbigniew Bodek goto err_rx; 16459b8d05b8SZbigniew Bodek } 16469b8d05b8SZbigniew Bodek 16479b8d05b8SZbigniew Bodek ring = &adapter->rx_ring[i]; 16489b8d05b8SZbigniew Bodek rc = ena_com_get_io_handlers(ena_dev, ena_qid, 164982e558eaSDawid Gorecki &ring->ena_com_io_sq, &ring->ena_com_io_cq); 16503f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 16513fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 16529b8d05b8SZbigniew Bodek "Failed to get RX queue handlers. RX queue num" 165382e558eaSDawid Gorecki " %d rc: %d\n", 165482e558eaSDawid Gorecki i, rc); 16559b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(ena_dev, ena_qid); 16569b8d05b8SZbigniew Bodek goto err_rx; 16579b8d05b8SZbigniew Bodek } 1658eb4c4f4aSMarcin Wojtas 1659eb4c4f4aSMarcin Wojtas if (ctx.numa_node >= 0) { 1660eb4c4f4aSMarcin Wojtas ena_com_update_numa_node(ring->ena_com_io_cq, 1661eb4c4f4aSMarcin Wojtas ctx.numa_node); 1662eb4c4f4aSMarcin Wojtas } 16639b8d05b8SZbigniew Bodek } 16649b8d05b8SZbigniew Bodek 16657d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 16665cb9db07SMarcin Wojtas queue = &adapter->que[i]; 16675cb9db07SMarcin Wojtas 16686c3e93cbSGleb Smirnoff NET_TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue); 16695cb9db07SMarcin Wojtas queue->cleanup_tq = taskqueue_create_fast("ena cleanup", 16705cb9db07SMarcin Wojtas M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq); 16715cb9db07SMarcin Wojtas 16726d1ef2abSArtur Rojek #ifdef RSS 16736d1ef2abSArtur Rojek cpu_mask = &queue->cpu_mask; 16746d1ef2abSArtur Rojek #endif 16756d1ef2abSArtur Rojek taskqueue_start_threads_cpuset(&queue->cleanup_tq, 1, PI_NET, 167682e558eaSDawid Gorecki cpu_mask, "%s queue %d cleanup", 16775cb9db07SMarcin Wojtas device_get_nameunit(adapter->pdev), i); 16785cb9db07SMarcin Wojtas } 16795cb9db07SMarcin Wojtas 16809b8d05b8SZbigniew Bodek return (0); 16819b8d05b8SZbigniew Bodek 16829b8d05b8SZbigniew Bodek err_rx: 16839b8d05b8SZbigniew Bodek while (i--) 16849b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i)); 16857d8c4feeSMarcin Wojtas i = adapter->num_io_queues; 16869b8d05b8SZbigniew Bodek err_tx: 16879b8d05b8SZbigniew Bodek while (i--) 16889b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i)); 16899b8d05b8SZbigniew Bodek 16909b8d05b8SZbigniew Bodek return (ENXIO); 16919b8d05b8SZbigniew Bodek } 16929b8d05b8SZbigniew Bodek 16939b8d05b8SZbigniew Bodek /********************************************************************* 16949b8d05b8SZbigniew Bodek * 16959b8d05b8SZbigniew Bodek * MSIX & Interrupt Service routine 16969b8d05b8SZbigniew Bodek * 16979b8d05b8SZbigniew Bodek **********************************************************************/ 16989b8d05b8SZbigniew Bodek 16999b8d05b8SZbigniew Bodek /** 17009b8d05b8SZbigniew Bodek * ena_handle_msix - MSIX Interrupt Handler for admin/async queue 17019b8d05b8SZbigniew Bodek * @arg: interrupt number 17029b8d05b8SZbigniew Bodek **/ 17039b8d05b8SZbigniew Bodek static void 17049b8d05b8SZbigniew Bodek ena_intr_msix_mgmnt(void *arg) 17059b8d05b8SZbigniew Bodek { 17069b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)arg; 17079b8d05b8SZbigniew Bodek 17089b8d05b8SZbigniew Bodek ena_com_admin_q_comp_intr_handler(adapter->ena_dev); 1709fd43fd2aSMarcin Wojtas if (likely(ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter))) 17109b8d05b8SZbigniew Bodek ena_com_aenq_intr_handler(adapter->ena_dev, arg); 17119b8d05b8SZbigniew Bodek } 17129b8d05b8SZbigniew Bodek 17135cb9db07SMarcin Wojtas /** 17145cb9db07SMarcin Wojtas * ena_handle_msix - MSIX Interrupt Handler for Tx/Rx 17155cb9db07SMarcin Wojtas * @arg: queue 17165cb9db07SMarcin Wojtas **/ 17175cb9db07SMarcin Wojtas static int 17185cb9db07SMarcin Wojtas ena_handle_msix(void *arg) 17195cb9db07SMarcin Wojtas { 17205cb9db07SMarcin Wojtas struct ena_que *queue = arg; 17215cb9db07SMarcin Wojtas struct ena_adapter *adapter = queue->adapter; 17225cb9db07SMarcin Wojtas if_t ifp = adapter->ifp; 17235cb9db07SMarcin Wojtas 17245cb9db07SMarcin Wojtas if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)) 17255cb9db07SMarcin Wojtas return (FILTER_STRAY); 17265cb9db07SMarcin Wojtas 17275cb9db07SMarcin Wojtas taskqueue_enqueue(queue->cleanup_tq, &queue->cleanup_task); 17285cb9db07SMarcin Wojtas 17295cb9db07SMarcin Wojtas return (FILTER_HANDLED); 17305cb9db07SMarcin Wojtas } 17315cb9db07SMarcin Wojtas 17329b8d05b8SZbigniew Bodek static int 17339b8d05b8SZbigniew Bodek ena_enable_msix(struct ena_adapter *adapter) 17349b8d05b8SZbigniew Bodek { 17359b8d05b8SZbigniew Bodek device_t dev = adapter->pdev; 17368805021aSMarcin Wojtas int msix_vecs, msix_req; 17378805021aSMarcin Wojtas int i, rc = 0; 17389b8d05b8SZbigniew Bodek 1739fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) { 17403fc5d816SMarcin Wojtas ena_log(dev, ERR, "Error, MSI-X is already enabled\n"); 1741fd43fd2aSMarcin Wojtas return (EINVAL); 1742fd43fd2aSMarcin Wojtas } 1743fd43fd2aSMarcin Wojtas 17449b8d05b8SZbigniew Bodek /* Reserved the max msix vectors we might need */ 17457d8c4feeSMarcin Wojtas msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues); 17469b8d05b8SZbigniew Bodek 1747cd5d5804SMarcin Wojtas adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry), 1748cd5d5804SMarcin Wojtas M_DEVBUF, M_WAITOK | M_ZERO); 1749cd5d5804SMarcin Wojtas 175082e558eaSDawid Gorecki ena_log(dev, DBG, "trying to enable MSI-X, vectors: %d\n", msix_vecs); 17519b8d05b8SZbigniew Bodek 17529b8d05b8SZbigniew Bodek for (i = 0; i < msix_vecs; i++) { 17539b8d05b8SZbigniew Bodek adapter->msix_entries[i].entry = i; 17549b8d05b8SZbigniew Bodek /* Vectors must start from 1 */ 17559b8d05b8SZbigniew Bodek adapter->msix_entries[i].vector = i + 1; 17569b8d05b8SZbigniew Bodek } 17579b8d05b8SZbigniew Bodek 17588805021aSMarcin Wojtas msix_req = msix_vecs; 17599b8d05b8SZbigniew Bodek rc = pci_alloc_msix(dev, &msix_vecs); 17603f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 176182e558eaSDawid Gorecki ena_log(dev, ERR, "Failed to enable MSIX, vectors %d rc %d\n", 176282e558eaSDawid Gorecki msix_vecs, rc); 17637d2544e6SMarcin Wojtas 17649b8d05b8SZbigniew Bodek rc = ENOSPC; 17657d2544e6SMarcin Wojtas goto err_msix_free; 17669b8d05b8SZbigniew Bodek } 17679b8d05b8SZbigniew Bodek 17688805021aSMarcin Wojtas if (msix_vecs != msix_req) { 17692b5b60feSMarcin Wojtas if (msix_vecs == ENA_ADMIN_MSIX_VEC) { 17703fc5d816SMarcin Wojtas ena_log(dev, ERR, 17712b5b60feSMarcin Wojtas "Not enough number of MSI-x allocated: %d\n", 17722b5b60feSMarcin Wojtas msix_vecs); 17732b5b60feSMarcin Wojtas pci_release_msi(dev); 17742b5b60feSMarcin Wojtas rc = ENOSPC; 17752b5b60feSMarcin Wojtas goto err_msix_free; 17762b5b60feSMarcin Wojtas } 177782e558eaSDawid Gorecki ena_log(dev, ERR, 177882e558eaSDawid Gorecki "Enable only %d MSI-x (out of %d), reduce " 177982e558eaSDawid Gorecki "the number of queues\n", 178082e558eaSDawid Gorecki msix_vecs, msix_req); 17818805021aSMarcin Wojtas } 17828805021aSMarcin Wojtas 17839b8d05b8SZbigniew Bodek adapter->msix_vecs = msix_vecs; 1784fd43fd2aSMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter); 17859b8d05b8SZbigniew Bodek 17867d2544e6SMarcin Wojtas return (0); 17877d2544e6SMarcin Wojtas 17887d2544e6SMarcin Wojtas err_msix_free: 17897d2544e6SMarcin Wojtas free(adapter->msix_entries, M_DEVBUF); 17907d2544e6SMarcin Wojtas adapter->msix_entries = NULL; 17917d2544e6SMarcin Wojtas 17929b8d05b8SZbigniew Bodek return (rc); 17939b8d05b8SZbigniew Bodek } 17949b8d05b8SZbigniew Bodek 17959b8d05b8SZbigniew Bodek static void 17969b8d05b8SZbigniew Bodek ena_setup_mgmnt_intr(struct ena_adapter *adapter) 17979b8d05b8SZbigniew Bodek { 179882e558eaSDawid Gorecki snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, ENA_IRQNAME_SIZE, 179982e558eaSDawid Gorecki "ena-mgmnt@pci:%s", device_get_nameunit(adapter->pdev)); 18009b8d05b8SZbigniew Bodek /* 18019b8d05b8SZbigniew Bodek * Handler is NULL on purpose, it will be set 18029b8d05b8SZbigniew Bodek * when mgmnt interrupt is acquired 18039b8d05b8SZbigniew Bodek */ 18049b8d05b8SZbigniew Bodek adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = NULL; 18059b8d05b8SZbigniew Bodek adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter; 18069b8d05b8SZbigniew Bodek adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector = 18079b8d05b8SZbigniew Bodek adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector; 18089b8d05b8SZbigniew Bodek } 18099b8d05b8SZbigniew Bodek 181077958fcdSMarcin Wojtas static int 18119b8d05b8SZbigniew Bodek ena_setup_io_intr(struct ena_adapter *adapter) 18129b8d05b8SZbigniew Bodek { 18136d1ef2abSArtur Rojek #ifdef RSS 18146d1ef2abSArtur Rojek int num_buckets = rss_getnumbuckets(); 18156d1ef2abSArtur Rojek static int last_bind = 0; 1816eb4c4f4aSMarcin Wojtas int cur_bind; 1817eb4c4f4aSMarcin Wojtas int idx; 18186d1ef2abSArtur Rojek #endif 18199b8d05b8SZbigniew Bodek int irq_idx; 18209b8d05b8SZbigniew Bodek 182177958fcdSMarcin Wojtas if (adapter->msix_entries == NULL) 182277958fcdSMarcin Wojtas return (EINVAL); 182377958fcdSMarcin Wojtas 1824eb4c4f4aSMarcin Wojtas #ifdef RSS 1825eb4c4f4aSMarcin Wojtas if (adapter->first_bind < 0) { 1826eb4c4f4aSMarcin Wojtas adapter->first_bind = last_bind; 1827eb4c4f4aSMarcin Wojtas last_bind = (last_bind + adapter->num_io_queues) % num_buckets; 1828eb4c4f4aSMarcin Wojtas } 1829eb4c4f4aSMarcin Wojtas cur_bind = adapter->first_bind; 1830eb4c4f4aSMarcin Wojtas #endif 1831eb4c4f4aSMarcin Wojtas 18327d8c4feeSMarcin Wojtas for (int i = 0; i < adapter->num_io_queues; i++) { 18339b8d05b8SZbigniew Bodek irq_idx = ENA_IO_IRQ_IDX(i); 18349b8d05b8SZbigniew Bodek 18359b8d05b8SZbigniew Bodek snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE, 18369b8d05b8SZbigniew Bodek "%s-TxRx-%d", device_get_nameunit(adapter->pdev), i); 18379b8d05b8SZbigniew Bodek adapter->irq_tbl[irq_idx].handler = ena_handle_msix; 18389b8d05b8SZbigniew Bodek adapter->irq_tbl[irq_idx].data = &adapter->que[i]; 18399b8d05b8SZbigniew Bodek adapter->irq_tbl[irq_idx].vector = 18409b8d05b8SZbigniew Bodek adapter->msix_entries[irq_idx].vector; 18413fc5d816SMarcin Wojtas ena_log(adapter->pdev, DBG, "ena_setup_io_intr vector: %d\n", 18429b8d05b8SZbigniew Bodek adapter->msix_entries[irq_idx].vector); 1843277f11c4SMarcin Wojtas 1844f9e1d947SOsama Abboud if (adapter->irq_cpu_base > ENA_BASE_CPU_UNSPECIFIED) { 1845f9e1d947SOsama Abboud adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu = 1846f9e1d947SOsama Abboud (unsigned)(adapter->irq_cpu_base + 1847f9e1d947SOsama Abboud i * adapter->irq_cpu_stride) % (unsigned)mp_ncpus; 1848f9e1d947SOsama Abboud CPU_SETOF(adapter->que[i].cpu, &adapter->que[i].cpu_mask); 1849f9e1d947SOsama Abboud } 1850f9e1d947SOsama Abboud 18516d1ef2abSArtur Rojek #ifdef RSS 18529b8d05b8SZbigniew Bodek adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu = 1853eb4c4f4aSMarcin Wojtas rss_getcpu(cur_bind); 1854eb4c4f4aSMarcin Wojtas cur_bind = (cur_bind + 1) % num_buckets; 18556d1ef2abSArtur Rojek CPU_SETOF(adapter->que[i].cpu, &adapter->que[i].cpu_mask); 1856eb4c4f4aSMarcin Wojtas 1857eb4c4f4aSMarcin Wojtas for (idx = 0; idx < MAXMEMDOM; ++idx) { 1858eb4c4f4aSMarcin Wojtas if (CPU_ISSET(adapter->que[i].cpu, &cpuset_domain[idx])) 1859eb4c4f4aSMarcin Wojtas break; 1860eb4c4f4aSMarcin Wojtas } 1861eb4c4f4aSMarcin Wojtas adapter->que[i].domain = idx; 1862eb4c4f4aSMarcin Wojtas #else 1863eb4c4f4aSMarcin Wojtas adapter->que[i].domain = -1; 18646d1ef2abSArtur Rojek #endif 18659b8d05b8SZbigniew Bodek } 186677958fcdSMarcin Wojtas 186777958fcdSMarcin Wojtas return (0); 18689b8d05b8SZbigniew Bodek } 18699b8d05b8SZbigniew Bodek 18709b8d05b8SZbigniew Bodek static int 18719b8d05b8SZbigniew Bodek ena_request_mgmnt_irq(struct ena_adapter *adapter) 18729b8d05b8SZbigniew Bodek { 18733fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 18749b8d05b8SZbigniew Bodek struct ena_irq *irq; 18759b8d05b8SZbigniew Bodek unsigned long flags; 18769b8d05b8SZbigniew Bodek int rc, rcc; 18779b8d05b8SZbigniew Bodek 18789b8d05b8SZbigniew Bodek flags = RF_ACTIVE | RF_SHAREABLE; 18799b8d05b8SZbigniew Bodek 18809b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; 18819b8d05b8SZbigniew Bodek irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ, 18829b8d05b8SZbigniew Bodek &irq->vector, flags); 18839b8d05b8SZbigniew Bodek 18843f9ed7abSMarcin Wojtas if (unlikely(irq->res == NULL)) { 18853fc5d816SMarcin Wojtas ena_log(pdev, ERR, "could not allocate irq vector: %d\n", 18863fc5d816SMarcin Wojtas irq->vector); 18877d2544e6SMarcin Wojtas return (ENXIO); 18889b8d05b8SZbigniew Bodek } 18899b8d05b8SZbigniew Bodek 18900bdffe59SMarcin Wojtas rc = bus_setup_intr(adapter->pdev, irq->res, 189182e558eaSDawid Gorecki INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt, irq->data, 189282e558eaSDawid Gorecki &irq->cookie); 18933f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 189482e558eaSDawid Gorecki ena_log(pdev, ERR, 189582e558eaSDawid Gorecki "failed to register interrupt handler for irq %ju: %d\n", 18969b8d05b8SZbigniew Bodek rman_get_start(irq->res), rc); 18977d2544e6SMarcin Wojtas goto err_res_free; 18989b8d05b8SZbigniew Bodek } 18999b8d05b8SZbigniew Bodek irq->requested = true; 19009b8d05b8SZbigniew Bodek 19019b8d05b8SZbigniew Bodek return (rc); 19029b8d05b8SZbigniew Bodek 19037d2544e6SMarcin Wojtas err_res_free: 19043fc5d816SMarcin Wojtas ena_log(pdev, INFO, "releasing resource for irq %d\n", irq->vector); 190582e558eaSDawid Gorecki rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, irq->vector, 190682e558eaSDawid Gorecki irq->res); 19073f9ed7abSMarcin Wojtas if (unlikely(rcc != 0)) 190882e558eaSDawid Gorecki ena_log(pdev, ERR, 190982e558eaSDawid Gorecki "dev has no parent while releasing res for irq: %d\n", 191082e558eaSDawid Gorecki irq->vector); 19119b8d05b8SZbigniew Bodek irq->res = NULL; 19129b8d05b8SZbigniew Bodek 19139b8d05b8SZbigniew Bodek return (rc); 19149b8d05b8SZbigniew Bodek } 19159b8d05b8SZbigniew Bodek 19169b8d05b8SZbigniew Bodek static int 19179b8d05b8SZbigniew Bodek ena_request_io_irq(struct ena_adapter *adapter) 19189b8d05b8SZbigniew Bodek { 19193fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 19209b8d05b8SZbigniew Bodek struct ena_irq *irq; 19219b8d05b8SZbigniew Bodek unsigned long flags = 0; 19229b8d05b8SZbigniew Bodek int rc = 0, i, rcc; 19239b8d05b8SZbigniew Bodek 1924fd43fd2aSMarcin Wojtas if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter))) { 19253fc5d816SMarcin Wojtas ena_log(pdev, ERR, 19264e8acd84SMarcin Wojtas "failed to request I/O IRQ: MSI-X is not enabled\n"); 19279b8d05b8SZbigniew Bodek return (EINVAL); 19289b8d05b8SZbigniew Bodek } else { 19299b8d05b8SZbigniew Bodek flags = RF_ACTIVE | RF_SHAREABLE; 19309b8d05b8SZbigniew Bodek } 19319b8d05b8SZbigniew Bodek 19329b8d05b8SZbigniew Bodek for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { 19339b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[i]; 19349b8d05b8SZbigniew Bodek 19353f9ed7abSMarcin Wojtas if (unlikely(irq->requested)) 19369b8d05b8SZbigniew Bodek continue; 19379b8d05b8SZbigniew Bodek 19389b8d05b8SZbigniew Bodek irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ, 19399b8d05b8SZbigniew Bodek &irq->vector, flags); 19403f9ed7abSMarcin Wojtas if (unlikely(irq->res == NULL)) { 1941469a8407SMarcin Wojtas rc = ENOMEM; 194282e558eaSDawid Gorecki ena_log(pdev, ERR, 194382e558eaSDawid Gorecki "could not allocate irq vector: %d\n", irq->vector); 19449b8d05b8SZbigniew Bodek goto err; 19459b8d05b8SZbigniew Bodek } 19469b8d05b8SZbigniew Bodek 19470bdffe59SMarcin Wojtas rc = bus_setup_intr(adapter->pdev, irq->res, 194882e558eaSDawid Gorecki INTR_TYPE_NET | INTR_MPSAFE, irq->handler, NULL, irq->data, 194982e558eaSDawid Gorecki &irq->cookie); 19503f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 195182e558eaSDawid Gorecki ena_log(pdev, ERR, 195282e558eaSDawid Gorecki "failed to register interrupt handler for irq %ju: %d\n", 19539b8d05b8SZbigniew Bodek rman_get_start(irq->res), rc); 19549b8d05b8SZbigniew Bodek goto err; 19559b8d05b8SZbigniew Bodek } 19569b8d05b8SZbigniew Bodek irq->requested = true; 19576d1ef2abSArtur Rojek 1958f9e1d947SOsama Abboud if (adapter->rss_enabled || adapter->irq_cpu_base > ENA_BASE_CPU_UNSPECIFIED) { 19596d1ef2abSArtur Rojek rc = bus_bind_intr(adapter->pdev, irq->res, irq->cpu); 19606d1ef2abSArtur Rojek if (unlikely(rc != 0)) { 196182e558eaSDawid Gorecki ena_log(pdev, ERR, 196282e558eaSDawid Gorecki "failed to bind interrupt handler for irq %ju to cpu %d: %d\n", 19636d1ef2abSArtur Rojek rman_get_start(irq->res), irq->cpu, rc); 19646d1ef2abSArtur Rojek goto err; 19656d1ef2abSArtur Rojek } 19666d1ef2abSArtur Rojek 19676d1ef2abSArtur Rojek ena_log(pdev, INFO, "queue %d - cpu %d\n", 19686d1ef2abSArtur Rojek i - ENA_IO_IRQ_FIRST_IDX, irq->cpu); 19699b8d05b8SZbigniew Bodek } 1970f9e1d947SOsama Abboud } 19719b8d05b8SZbigniew Bodek return (rc); 19729b8d05b8SZbigniew Bodek 19739b8d05b8SZbigniew Bodek err: 19749b8d05b8SZbigniew Bodek 19759b8d05b8SZbigniew Bodek for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) { 19769b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[i]; 19779b8d05b8SZbigniew Bodek rcc = 0; 19789b8d05b8SZbigniew Bodek 19799b8d05b8SZbigniew Bodek /* Once we entered err: section and irq->requested is true we 19809b8d05b8SZbigniew Bodek free both intr and resources */ 1981f9e1d947SOsama Abboud if (irq->requested) { 198282e558eaSDawid Gorecki rcc = bus_teardown_intr(adapter->pdev, irq->res, 198382e558eaSDawid Gorecki irq->cookie); 19843f9ed7abSMarcin Wojtas if (unlikely(rcc != 0)) 198582e558eaSDawid Gorecki ena_log(pdev, ERR, 198682e558eaSDawid Gorecki "could not release irq: %d, error: %d\n", 19873fc5d816SMarcin Wojtas irq->vector, rcc); 1988f9e1d947SOsama Abboud } 19899b8d05b8SZbigniew Bodek 1990eb3f25b4SGordon Bergling /* If we entered err: section without irq->requested set we know 19919b8d05b8SZbigniew Bodek it was bus_alloc_resource_any() that needs cleanup, provided 19929b8d05b8SZbigniew Bodek res is not NULL. In case res is NULL no work in needed in 19939b8d05b8SZbigniew Bodek this iteration */ 19949b8d05b8SZbigniew Bodek rcc = 0; 19959b8d05b8SZbigniew Bodek if (irq->res != NULL) { 19969b8d05b8SZbigniew Bodek rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 19979b8d05b8SZbigniew Bodek irq->vector, irq->res); 19989b8d05b8SZbigniew Bodek } 19993f9ed7abSMarcin Wojtas if (unlikely(rcc != 0)) 200082e558eaSDawid Gorecki ena_log(pdev, ERR, 200182e558eaSDawid Gorecki "dev has no parent while releasing res for irq: %d\n", 200282e558eaSDawid Gorecki irq->vector); 20039b8d05b8SZbigniew Bodek irq->requested = false; 20049b8d05b8SZbigniew Bodek irq->res = NULL; 20059b8d05b8SZbigniew Bodek } 20069b8d05b8SZbigniew Bodek 20079b8d05b8SZbigniew Bodek return (rc); 20089b8d05b8SZbigniew Bodek } 20099b8d05b8SZbigniew Bodek 20109b8d05b8SZbigniew Bodek static void 20119b8d05b8SZbigniew Bodek ena_free_mgmnt_irq(struct ena_adapter *adapter) 20129b8d05b8SZbigniew Bodek { 20133fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 20149b8d05b8SZbigniew Bodek struct ena_irq *irq; 20159b8d05b8SZbigniew Bodek int rc; 20169b8d05b8SZbigniew Bodek 20179b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; 20189b8d05b8SZbigniew Bodek if (irq->requested) { 20193fc5d816SMarcin Wojtas ena_log(pdev, DBG, "tear down irq: %d\n", irq->vector); 20209b8d05b8SZbigniew Bodek rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); 20213f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 20223fc5d816SMarcin Wojtas ena_log(pdev, ERR, "failed to tear down irq: %d\n", 20233fc5d816SMarcin Wojtas irq->vector); 20249b8d05b8SZbigniew Bodek irq->requested = 0; 20259b8d05b8SZbigniew Bodek } 20269b8d05b8SZbigniew Bodek 20279b8d05b8SZbigniew Bodek if (irq->res != NULL) { 20283fc5d816SMarcin Wojtas ena_log(pdev, DBG, "release resource irq: %d\n", irq->vector); 20299b8d05b8SZbigniew Bodek rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 20309b8d05b8SZbigniew Bodek irq->vector, irq->res); 20319b8d05b8SZbigniew Bodek irq->res = NULL; 20323f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 203382e558eaSDawid Gorecki ena_log(pdev, ERR, 203482e558eaSDawid Gorecki "dev has no parent while releasing res for irq: %d\n", 203582e558eaSDawid Gorecki irq->vector); 20369b8d05b8SZbigniew Bodek } 20379b8d05b8SZbigniew Bodek } 20389b8d05b8SZbigniew Bodek 20399b8d05b8SZbigniew Bodek static void 20409b8d05b8SZbigniew Bodek ena_free_io_irq(struct ena_adapter *adapter) 20419b8d05b8SZbigniew Bodek { 20423fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 20439b8d05b8SZbigniew Bodek struct ena_irq *irq; 20449b8d05b8SZbigniew Bodek int rc; 20459b8d05b8SZbigniew Bodek 20469b8d05b8SZbigniew Bodek for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { 20479b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[i]; 20489b8d05b8SZbigniew Bodek if (irq->requested) { 20493fc5d816SMarcin Wojtas ena_log(pdev, DBG, "tear down irq: %d\n", irq->vector); 20509b8d05b8SZbigniew Bodek rc = bus_teardown_intr(adapter->pdev, irq->res, 20519b8d05b8SZbigniew Bodek irq->cookie); 20523f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 205382e558eaSDawid Gorecki ena_log(pdev, ERR, 205482e558eaSDawid Gorecki "failed to tear down irq: %d\n", 20553fc5d816SMarcin Wojtas irq->vector); 20569b8d05b8SZbigniew Bodek } 20579b8d05b8SZbigniew Bodek irq->requested = 0; 20589b8d05b8SZbigniew Bodek } 20599b8d05b8SZbigniew Bodek 20609b8d05b8SZbigniew Bodek if (irq->res != NULL) { 20613fc5d816SMarcin Wojtas ena_log(pdev, DBG, "release resource irq: %d\n", 20629b8d05b8SZbigniew Bodek irq->vector); 20639b8d05b8SZbigniew Bodek rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 20649b8d05b8SZbigniew Bodek irq->vector, irq->res); 20659b8d05b8SZbigniew Bodek irq->res = NULL; 20663f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 206782e558eaSDawid Gorecki ena_log(pdev, ERR, 206882e558eaSDawid Gorecki "dev has no parent while releasing res for irq: %d\n", 20699b8d05b8SZbigniew Bodek irq->vector); 20709b8d05b8SZbigniew Bodek } 20719b8d05b8SZbigniew Bodek } 20729b8d05b8SZbigniew Bodek } 20739b8d05b8SZbigniew Bodek } 20749b8d05b8SZbigniew Bodek 20759b8d05b8SZbigniew Bodek static void 20769b8d05b8SZbigniew Bodek ena_free_irqs(struct ena_adapter *adapter) 20779b8d05b8SZbigniew Bodek { 20789b8d05b8SZbigniew Bodek ena_free_io_irq(adapter); 20799b8d05b8SZbigniew Bodek ena_free_mgmnt_irq(adapter); 20809b8d05b8SZbigniew Bodek ena_disable_msix(adapter); 20819b8d05b8SZbigniew Bodek } 20829b8d05b8SZbigniew Bodek 20839b8d05b8SZbigniew Bodek static void 20849b8d05b8SZbigniew Bodek ena_disable_msix(struct ena_adapter *adapter) 20859b8d05b8SZbigniew Bodek { 2086fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) { 2087fd43fd2aSMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter); 20889b8d05b8SZbigniew Bodek pci_release_msi(adapter->pdev); 2089fd43fd2aSMarcin Wojtas } 20909b8d05b8SZbigniew Bodek 20919b8d05b8SZbigniew Bodek adapter->msix_vecs = 0; 2092cd5d5804SMarcin Wojtas free(adapter->msix_entries, M_DEVBUF); 20939b8d05b8SZbigniew Bodek adapter->msix_entries = NULL; 20949b8d05b8SZbigniew Bodek } 20959b8d05b8SZbigniew Bodek 20969b8d05b8SZbigniew Bodek static void 20979b8d05b8SZbigniew Bodek ena_unmask_all_io_irqs(struct ena_adapter *adapter) 20989b8d05b8SZbigniew Bodek { 20999b8d05b8SZbigniew Bodek struct ena_com_io_cq *io_cq; 21009b8d05b8SZbigniew Bodek struct ena_eth_io_intr_reg intr_reg; 2101223c8cb1SArtur Rojek struct ena_ring *tx_ring; 21029b8d05b8SZbigniew Bodek uint16_t ena_qid; 21039b8d05b8SZbigniew Bodek int i; 21049b8d05b8SZbigniew Bodek 21059b8d05b8SZbigniew Bodek /* Unmask interrupts for all queues */ 21067d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 21079b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(i); 21089b8d05b8SZbigniew Bodek io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; 210972e34ebdSOsama Abboud ena_com_update_intr_reg(&intr_reg, 0, 0, true, false); 2110223c8cb1SArtur Rojek tx_ring = &adapter->tx_ring[i]; 2111223c8cb1SArtur Rojek counter_u64_add(tx_ring->tx_stats.unmask_interrupt_num, 1); 21129b8d05b8SZbigniew Bodek ena_com_unmask_intr(io_cq, &intr_reg); 21139b8d05b8SZbigniew Bodek } 21149b8d05b8SZbigniew Bodek } 21159b8d05b8SZbigniew Bodek 21169b8d05b8SZbigniew Bodek static int 21179b8d05b8SZbigniew Bodek ena_up_complete(struct ena_adapter *adapter) 21189b8d05b8SZbigniew Bodek { 21199b8d05b8SZbigniew Bodek int rc; 21209b8d05b8SZbigniew Bodek 2121fd43fd2aSMarcin Wojtas if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) { 21229b8d05b8SZbigniew Bodek rc = ena_rss_configure(adapter); 212356d41ad5SMarcin Wojtas if (rc != 0) { 21243fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 212556d41ad5SMarcin Wojtas "Failed to configure RSS\n"); 21269b8d05b8SZbigniew Bodek return (rc); 21279b8d05b8SZbigniew Bodek } 212856d41ad5SMarcin Wojtas } 21299b8d05b8SZbigniew Bodek 21307583c633SJustin Hibbits rc = ena_change_mtu(adapter->ifp, if_getmtu(adapter->ifp)); 21313f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 21327d2544e6SMarcin Wojtas return (rc); 21337d2544e6SMarcin Wojtas 21349b8d05b8SZbigniew Bodek ena_refill_all_rx_bufs(adapter); 213530217e2dSMarcin Wojtas ena_reset_counters((counter_u64_t *)&adapter->hw_stats, 213630217e2dSMarcin Wojtas sizeof(adapter->hw_stats)); 21379b8d05b8SZbigniew Bodek 21389b8d05b8SZbigniew Bodek return (0); 21399b8d05b8SZbigniew Bodek } 21409b8d05b8SZbigniew Bodek 21419762a033SMarcin Wojtas static void 214282e558eaSDawid Gorecki set_io_rings_size(struct ena_adapter *adapter, int new_tx_size, int new_rx_size) 21439762a033SMarcin Wojtas { 21449762a033SMarcin Wojtas int i; 21459762a033SMarcin Wojtas 21469762a033SMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 21479762a033SMarcin Wojtas adapter->tx_ring[i].ring_size = new_tx_size; 21489762a033SMarcin Wojtas adapter->rx_ring[i].ring_size = new_rx_size; 21499762a033SMarcin Wojtas } 21509762a033SMarcin Wojtas } 21519762a033SMarcin Wojtas 21529762a033SMarcin Wojtas static int 21539762a033SMarcin Wojtas create_queues_with_size_backoff(struct ena_adapter *adapter) 21549762a033SMarcin Wojtas { 21553fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 21569762a033SMarcin Wojtas int rc; 21579762a033SMarcin Wojtas uint32_t cur_rx_ring_size, cur_tx_ring_size; 21589762a033SMarcin Wojtas uint32_t new_rx_ring_size, new_tx_ring_size; 21599762a033SMarcin Wojtas 21609762a033SMarcin Wojtas /* 21619762a033SMarcin Wojtas * Current queue sizes might be set to smaller than the requested 21629762a033SMarcin Wojtas * ones due to past queue allocation failures. 21639762a033SMarcin Wojtas */ 21649762a033SMarcin Wojtas set_io_rings_size(adapter, adapter->requested_tx_ring_size, 21659762a033SMarcin Wojtas adapter->requested_rx_ring_size); 21669762a033SMarcin Wojtas 21679762a033SMarcin Wojtas while (1) { 21689762a033SMarcin Wojtas /* Allocate transmit descriptors */ 21699762a033SMarcin Wojtas rc = ena_setup_all_tx_resources(adapter); 21709762a033SMarcin Wojtas if (unlikely(rc != 0)) { 21713fc5d816SMarcin Wojtas ena_log(pdev, ERR, "err_setup_tx\n"); 21729762a033SMarcin Wojtas goto err_setup_tx; 21739762a033SMarcin Wojtas } 21749762a033SMarcin Wojtas 21759762a033SMarcin Wojtas /* Allocate receive descriptors */ 21769762a033SMarcin Wojtas rc = ena_setup_all_rx_resources(adapter); 21779762a033SMarcin Wojtas if (unlikely(rc != 0)) { 21783fc5d816SMarcin Wojtas ena_log(pdev, ERR, "err_setup_rx\n"); 21799762a033SMarcin Wojtas goto err_setup_rx; 21809762a033SMarcin Wojtas } 21819762a033SMarcin Wojtas 21829762a033SMarcin Wojtas /* Create IO queues for Rx & Tx */ 21839762a033SMarcin Wojtas rc = ena_create_io_queues(adapter); 21849762a033SMarcin Wojtas if (unlikely(rc != 0)) { 218582e558eaSDawid Gorecki ena_log(pdev, ERR, "create IO queues failed\n"); 21869762a033SMarcin Wojtas goto err_io_que; 21879762a033SMarcin Wojtas } 21889762a033SMarcin Wojtas 21899762a033SMarcin Wojtas return (0); 21909762a033SMarcin Wojtas 21919762a033SMarcin Wojtas err_io_que: 21929762a033SMarcin Wojtas ena_free_all_rx_resources(adapter); 21939762a033SMarcin Wojtas err_setup_rx: 21949762a033SMarcin Wojtas ena_free_all_tx_resources(adapter); 21959762a033SMarcin Wojtas err_setup_tx: 21969762a033SMarcin Wojtas /* 21979762a033SMarcin Wojtas * Lower the ring size if ENOMEM. Otherwise, return the 21989762a033SMarcin Wojtas * error straightaway. 21999762a033SMarcin Wojtas */ 22009762a033SMarcin Wojtas if (unlikely(rc != ENOMEM)) { 22013fc5d816SMarcin Wojtas ena_log(pdev, ERR, 22029762a033SMarcin Wojtas "Queue creation failed with error code: %d\n", rc); 22039762a033SMarcin Wojtas return (rc); 22049762a033SMarcin Wojtas } 22059762a033SMarcin Wojtas 22069762a033SMarcin Wojtas cur_tx_ring_size = adapter->tx_ring[0].ring_size; 22079762a033SMarcin Wojtas cur_rx_ring_size = adapter->rx_ring[0].ring_size; 22089762a033SMarcin Wojtas 22093fc5d816SMarcin Wojtas ena_log(pdev, ERR, 22109762a033SMarcin Wojtas "Not enough memory to create queues with sizes TX=%d, RX=%d\n", 22119762a033SMarcin Wojtas cur_tx_ring_size, cur_rx_ring_size); 22129762a033SMarcin Wojtas 22139762a033SMarcin Wojtas new_tx_ring_size = cur_tx_ring_size; 22149762a033SMarcin Wojtas new_rx_ring_size = cur_rx_ring_size; 22159762a033SMarcin Wojtas 22169762a033SMarcin Wojtas /* 221782e558eaSDawid Gorecki * Decrease the size of a larger queue, or decrease both if they 221882e558eaSDawid Gorecki * are the same size. 22199762a033SMarcin Wojtas */ 22209762a033SMarcin Wojtas if (cur_rx_ring_size <= cur_tx_ring_size) 22219762a033SMarcin Wojtas new_tx_ring_size = cur_tx_ring_size / 2; 22229762a033SMarcin Wojtas if (cur_rx_ring_size >= cur_tx_ring_size) 22239762a033SMarcin Wojtas new_rx_ring_size = cur_rx_ring_size / 2; 22249762a033SMarcin Wojtas 22259762a033SMarcin Wojtas if (new_tx_ring_size < ENA_MIN_RING_SIZE || 22269762a033SMarcin Wojtas new_rx_ring_size < ENA_MIN_RING_SIZE) { 22273fc5d816SMarcin Wojtas ena_log(pdev, ERR, 22289762a033SMarcin Wojtas "Queue creation failed with the smallest possible queue size" 22299762a033SMarcin Wojtas "of %d for both queues. Not retrying with smaller queues\n", 22309762a033SMarcin Wojtas ENA_MIN_RING_SIZE); 22319762a033SMarcin Wojtas return (rc); 22329762a033SMarcin Wojtas } 22339762a033SMarcin Wojtas 223477160654SArtur Rojek ena_log(pdev, INFO, 223577160654SArtur Rojek "Retrying queue creation with sizes TX=%d, RX=%d\n", 223677160654SArtur Rojek new_tx_ring_size, new_rx_ring_size); 223777160654SArtur Rojek 22389762a033SMarcin Wojtas set_io_rings_size(adapter, new_tx_ring_size, new_rx_ring_size); 22399762a033SMarcin Wojtas } 22409762a033SMarcin Wojtas } 22419762a033SMarcin Wojtas 224238c7b965SMarcin Wojtas int 22439b8d05b8SZbigniew Bodek ena_up(struct ena_adapter *adapter) 22449b8d05b8SZbigniew Bodek { 22459b8d05b8SZbigniew Bodek int rc = 0; 22469b8d05b8SZbigniew Bodek 224707aff471SArtur Rojek ENA_LOCK_ASSERT(); 2248cb98c439SArtur Rojek 22493f9ed7abSMarcin Wojtas if (unlikely(device_is_attached(adapter->pdev) == 0)) { 22503fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "device is not attached!\n"); 22519b8d05b8SZbigniew Bodek return (ENXIO); 22529b8d05b8SZbigniew Bodek } 22539b8d05b8SZbigniew Bodek 2254579d23aaSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) 2255579d23aaSMarcin Wojtas return (0); 2256579d23aaSMarcin Wojtas 22573fc5d816SMarcin Wojtas ena_log(adapter->pdev, INFO, "device is going UP\n"); 22589b8d05b8SZbigniew Bodek 22599b8d05b8SZbigniew Bodek /* setup interrupts for IO queues */ 226077958fcdSMarcin Wojtas rc = ena_setup_io_intr(adapter); 226177958fcdSMarcin Wojtas if (unlikely(rc != 0)) { 22623fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "error setting up IO interrupt\n"); 226377958fcdSMarcin Wojtas goto error; 226477958fcdSMarcin Wojtas } 22659b8d05b8SZbigniew Bodek rc = ena_request_io_irq(adapter); 22663f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 22673fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "err_req_irq\n"); 226877958fcdSMarcin Wojtas goto error; 22699b8d05b8SZbigniew Bodek } 22709b8d05b8SZbigniew Bodek 22713fc5d816SMarcin Wojtas ena_log(adapter->pdev, INFO, 227282e558eaSDawid Gorecki "Creating %u IO queues. Rx queue size: %d, Tx queue size: %d, LLQ is %s\n", 22737d8c4feeSMarcin Wojtas adapter->num_io_queues, 22749762a033SMarcin Wojtas adapter->requested_rx_ring_size, 22759762a033SMarcin Wojtas adapter->requested_tx_ring_size, 22769762a033SMarcin Wojtas (adapter->ena_dev->tx_mem_queue_type == 22779762a033SMarcin Wojtas ENA_ADMIN_PLACEMENT_POLICY_DEV) ? "ENABLED" : "DISABLED"); 22787d8c4feeSMarcin Wojtas 22799762a033SMarcin Wojtas rc = create_queues_with_size_backoff(adapter); 22803f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 22813fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 22829762a033SMarcin Wojtas "error creating queues with size backoff\n"); 22839762a033SMarcin Wojtas goto err_create_queues_with_backoff; 22849b8d05b8SZbigniew Bodek } 22859b8d05b8SZbigniew Bodek 2286fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) 22879b8d05b8SZbigniew Bodek if_link_state_change(adapter->ifp, LINK_STATE_UP); 22889b8d05b8SZbigniew Bodek 22899b8d05b8SZbigniew Bodek rc = ena_up_complete(adapter); 22903f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 22919b8d05b8SZbigniew Bodek goto err_up_complete; 22929b8d05b8SZbigniew Bodek 22939b8d05b8SZbigniew Bodek counter_u64_add(adapter->dev_stats.interface_up, 1); 22949b8d05b8SZbigniew Bodek 22959b8d05b8SZbigniew Bodek ena_update_hwassist(adapter); 22969b8d05b8SZbigniew Bodek 229782e558eaSDawid Gorecki if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 22989b8d05b8SZbigniew Bodek 2299fd43fd2aSMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP, adapter); 230093471047SZbigniew Bodek 230193471047SZbigniew Bodek ena_unmask_all_io_irqs(adapter); 23029b8d05b8SZbigniew Bodek 23039b8d05b8SZbigniew Bodek return (0); 23049b8d05b8SZbigniew Bodek 23059b8d05b8SZbigniew Bodek err_up_complete: 23069b8d05b8SZbigniew Bodek ena_destroy_all_io_queues(adapter); 23079b8d05b8SZbigniew Bodek ena_free_all_rx_resources(adapter); 23089b8d05b8SZbigniew Bodek ena_free_all_tx_resources(adapter); 23099762a033SMarcin Wojtas err_create_queues_with_backoff: 23109b8d05b8SZbigniew Bodek ena_free_io_irq(adapter); 231177958fcdSMarcin Wojtas error: 23129b8d05b8SZbigniew Bodek return (rc); 23139b8d05b8SZbigniew Bodek } 23149b8d05b8SZbigniew Bodek 23159b8d05b8SZbigniew Bodek static uint64_t 23169b8d05b8SZbigniew Bodek ena_get_counter(if_t ifp, ift_counter cnt) 23179b8d05b8SZbigniew Bodek { 23189b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 23199b8d05b8SZbigniew Bodek struct ena_hw_stats *stats; 23209b8d05b8SZbigniew Bodek 23219b8d05b8SZbigniew Bodek adapter = if_getsoftc(ifp); 23229b8d05b8SZbigniew Bodek stats = &adapter->hw_stats; 23239b8d05b8SZbigniew Bodek 23249b8d05b8SZbigniew Bodek switch (cnt) { 23259b8d05b8SZbigniew Bodek case IFCOUNTER_IPACKETS: 232630217e2dSMarcin Wojtas return (counter_u64_fetch(stats->rx_packets)); 23279b8d05b8SZbigniew Bodek case IFCOUNTER_OPACKETS: 232830217e2dSMarcin Wojtas return (counter_u64_fetch(stats->tx_packets)); 23299b8d05b8SZbigniew Bodek case IFCOUNTER_IBYTES: 233030217e2dSMarcin Wojtas return (counter_u64_fetch(stats->rx_bytes)); 23319b8d05b8SZbigniew Bodek case IFCOUNTER_OBYTES: 233230217e2dSMarcin Wojtas return (counter_u64_fetch(stats->tx_bytes)); 23339b8d05b8SZbigniew Bodek case IFCOUNTER_IQDROPS: 233430217e2dSMarcin Wojtas return (counter_u64_fetch(stats->rx_drops)); 23356c84cec3SMarcin Wojtas case IFCOUNTER_OQDROPS: 23366c84cec3SMarcin Wojtas return (counter_u64_fetch(stats->tx_drops)); 23379b8d05b8SZbigniew Bodek default: 23389b8d05b8SZbigniew Bodek return (if_get_counter_default(ifp, cnt)); 23399b8d05b8SZbigniew Bodek } 23409b8d05b8SZbigniew Bodek } 23419b8d05b8SZbigniew Bodek 23429b8d05b8SZbigniew Bodek static int 23439b8d05b8SZbigniew Bodek ena_media_change(if_t ifp) 23449b8d05b8SZbigniew Bodek { 23459b8d05b8SZbigniew Bodek /* Media Change is not supported by firmware */ 23469b8d05b8SZbigniew Bodek return (0); 23479b8d05b8SZbigniew Bodek } 23489b8d05b8SZbigniew Bodek 23499b8d05b8SZbigniew Bodek static void 23509b8d05b8SZbigniew Bodek ena_media_status(if_t ifp, struct ifmediareq *ifmr) 23519b8d05b8SZbigniew Bodek { 23529b8d05b8SZbigniew Bodek struct ena_adapter *adapter = if_getsoftc(ifp); 23533fc5d816SMarcin Wojtas ena_log(adapter->pdev, DBG, "Media status update\n"); 23549b8d05b8SZbigniew Bodek 235507aff471SArtur Rojek ENA_LOCK_LOCK(); 23569b8d05b8SZbigniew Bodek 23579b8d05b8SZbigniew Bodek ifmr->ifm_status = IFM_AVALID; 23589b8d05b8SZbigniew Bodek ifmr->ifm_active = IFM_ETHER; 23599b8d05b8SZbigniew Bodek 2360fd43fd2aSMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) { 236107aff471SArtur Rojek ENA_LOCK_UNLOCK(); 23623fc5d816SMarcin Wojtas ena_log(adapter->pdev, INFO, "Link is down\n"); 23639b8d05b8SZbigniew Bodek return; 23649b8d05b8SZbigniew Bodek } 23659b8d05b8SZbigniew Bodek 23669b8d05b8SZbigniew Bodek ifmr->ifm_status |= IFM_ACTIVE; 2367b8ca5dbeSMarcin Wojtas ifmr->ifm_active |= IFM_UNKNOWN | IFM_FDX; 23689b8d05b8SZbigniew Bodek 236907aff471SArtur Rojek ENA_LOCK_UNLOCK(); 23709b8d05b8SZbigniew Bodek } 23719b8d05b8SZbigniew Bodek 23729b8d05b8SZbigniew Bodek static void 23739b8d05b8SZbigniew Bodek ena_init(void *arg) 23749b8d05b8SZbigniew Bodek { 23759b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)arg; 23769b8d05b8SZbigniew Bodek 2377fd43fd2aSMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) { 237807aff471SArtur Rojek ENA_LOCK_LOCK(); 23799b8d05b8SZbigniew Bodek ena_up(adapter); 238007aff471SArtur Rojek ENA_LOCK_UNLOCK(); 23813d3a90f9SZbigniew Bodek } 23829b8d05b8SZbigniew Bodek } 23839b8d05b8SZbigniew Bodek 23849b8d05b8SZbigniew Bodek static int 23859b8d05b8SZbigniew Bodek ena_ioctl(if_t ifp, u_long command, caddr_t data) 23869b8d05b8SZbigniew Bodek { 23879b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 23889b8d05b8SZbigniew Bodek struct ifreq *ifr; 23899b8d05b8SZbigniew Bodek int rc; 23909b8d05b8SZbigniew Bodek 23917583c633SJustin Hibbits adapter = if_getsoftc(ifp); 23929b8d05b8SZbigniew Bodek ifr = (struct ifreq *)data; 23939b8d05b8SZbigniew Bodek 23949b8d05b8SZbigniew Bodek /* 23959b8d05b8SZbigniew Bodek * Acquiring lock to prevent from running up and down routines parallel. 23969b8d05b8SZbigniew Bodek */ 23979b8d05b8SZbigniew Bodek rc = 0; 23989b8d05b8SZbigniew Bodek switch (command) { 23999b8d05b8SZbigniew Bodek case SIOCSIFMTU: 24007583c633SJustin Hibbits if (if_getmtu(ifp) == ifr->ifr_mtu) 2401dbf2eb54SMarcin Wojtas break; 240207aff471SArtur Rojek ENA_LOCK_LOCK(); 24039b8d05b8SZbigniew Bodek ena_down(adapter); 24049b8d05b8SZbigniew Bodek 24059b8d05b8SZbigniew Bodek ena_change_mtu(ifp, ifr->ifr_mtu); 24069b8d05b8SZbigniew Bodek 24079b8d05b8SZbigniew Bodek rc = ena_up(adapter); 240807aff471SArtur Rojek ENA_LOCK_UNLOCK(); 24099b8d05b8SZbigniew Bodek break; 24109b8d05b8SZbigniew Bodek 24119b8d05b8SZbigniew Bodek case SIOCSIFFLAGS: 24127583c633SJustin Hibbits if ((if_getflags(ifp) & IFF_UP) != 0) { 24130bdffe59SMarcin Wojtas if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 24147583c633SJustin Hibbits if ((if_getflags(ifp) & (IFF_PROMISC | 24157583c633SJustin Hibbits IFF_ALLMULTI)) != 0) { 24163fc5d816SMarcin Wojtas ena_log(adapter->pdev, INFO, 24179b8d05b8SZbigniew Bodek "ioctl promisc/allmulti\n"); 24189b8d05b8SZbigniew Bodek } 24199b8d05b8SZbigniew Bodek } else { 242007aff471SArtur Rojek ENA_LOCK_LOCK(); 24219b8d05b8SZbigniew Bodek rc = ena_up(adapter); 242207aff471SArtur Rojek ENA_LOCK_UNLOCK(); 24239b8d05b8SZbigniew Bodek } 24249b8d05b8SZbigniew Bodek } else { 24250bdffe59SMarcin Wojtas if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 242607aff471SArtur Rojek ENA_LOCK_LOCK(); 24279b8d05b8SZbigniew Bodek ena_down(adapter); 242807aff471SArtur Rojek ENA_LOCK_UNLOCK(); 2429e67c6554SZbigniew Bodek } 24309b8d05b8SZbigniew Bodek } 24319b8d05b8SZbigniew Bodek break; 24329b8d05b8SZbigniew Bodek 24339b8d05b8SZbigniew Bodek case SIOCADDMULTI: 24349b8d05b8SZbigniew Bodek case SIOCDELMULTI: 24359b8d05b8SZbigniew Bodek break; 24369b8d05b8SZbigniew Bodek 24379b8d05b8SZbigniew Bodek case SIOCSIFMEDIA: 24389b8d05b8SZbigniew Bodek case SIOCGIFMEDIA: 24399b8d05b8SZbigniew Bodek rc = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 24409b8d05b8SZbigniew Bodek break; 24419b8d05b8SZbigniew Bodek 24429b8d05b8SZbigniew Bodek case SIOCSIFCAP: 24439b8d05b8SZbigniew Bodek { 24449b8d05b8SZbigniew Bodek int reinit = 0; 24459b8d05b8SZbigniew Bodek 24467583c633SJustin Hibbits if (ifr->ifr_reqcap != if_getcapenable(ifp)) { 24477583c633SJustin Hibbits if_setcapenable(ifp, ifr->ifr_reqcap); 24489b8d05b8SZbigniew Bodek reinit = 1; 24499b8d05b8SZbigniew Bodek } 24509b8d05b8SZbigniew Bodek 24510bdffe59SMarcin Wojtas if ((reinit != 0) && 24520bdffe59SMarcin Wojtas ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)) { 245307aff471SArtur Rojek ENA_LOCK_LOCK(); 24549b8d05b8SZbigniew Bodek ena_down(adapter); 24559b8d05b8SZbigniew Bodek rc = ena_up(adapter); 245607aff471SArtur Rojek ENA_LOCK_UNLOCK(); 24579b8d05b8SZbigniew Bodek } 24589b8d05b8SZbigniew Bodek } 24599b8d05b8SZbigniew Bodek 24609b8d05b8SZbigniew Bodek break; 24619b8d05b8SZbigniew Bodek default: 24629b8d05b8SZbigniew Bodek rc = ether_ioctl(ifp, command, data); 24639b8d05b8SZbigniew Bodek break; 24649b8d05b8SZbigniew Bodek } 24659b8d05b8SZbigniew Bodek 24669b8d05b8SZbigniew Bodek return (rc); 24679b8d05b8SZbigniew Bodek } 24689b8d05b8SZbigniew Bodek 24699b8d05b8SZbigniew Bodek static int 24709b8d05b8SZbigniew Bodek ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *feat) 24719b8d05b8SZbigniew Bodek { 24729b8d05b8SZbigniew Bodek int caps = 0; 24739b8d05b8SZbigniew Bodek 24740bdffe59SMarcin Wojtas if ((feat->offload.tx & 24759b8d05b8SZbigniew Bodek (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK | 24769b8d05b8SZbigniew Bodek ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK | 24770bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)) != 0) 24789b8d05b8SZbigniew Bodek caps |= IFCAP_TXCSUM; 24799b8d05b8SZbigniew Bodek 24800bdffe59SMarcin Wojtas if ((feat->offload.tx & 24819b8d05b8SZbigniew Bodek (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK | 24820bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)) != 0) 24839b8d05b8SZbigniew Bodek caps |= IFCAP_TXCSUM_IPV6; 24849b8d05b8SZbigniew Bodek 248582e558eaSDawid Gorecki if ((feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0) 24869b8d05b8SZbigniew Bodek caps |= IFCAP_TSO4; 24879b8d05b8SZbigniew Bodek 248882e558eaSDawid Gorecki if ((feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) != 0) 24899b8d05b8SZbigniew Bodek caps |= IFCAP_TSO6; 24909b8d05b8SZbigniew Bodek 24910bdffe59SMarcin Wojtas if ((feat->offload.rx_supported & 24929b8d05b8SZbigniew Bodek (ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK | 24930bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)) != 0) 24949b8d05b8SZbigniew Bodek caps |= IFCAP_RXCSUM; 24959b8d05b8SZbigniew Bodek 24960bdffe59SMarcin Wojtas if ((feat->offload.rx_supported & 24970bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) != 0) 24989b8d05b8SZbigniew Bodek caps |= IFCAP_RXCSUM_IPV6; 24999b8d05b8SZbigniew Bodek 25009b8d05b8SZbigniew Bodek caps |= IFCAP_LRO | IFCAP_JUMBO_MTU; 25019b8d05b8SZbigniew Bodek 25029b8d05b8SZbigniew Bodek return (caps); 25039b8d05b8SZbigniew Bodek } 25049b8d05b8SZbigniew Bodek 25059b8d05b8SZbigniew Bodek static void 25069b8d05b8SZbigniew Bodek ena_update_host_info(struct ena_admin_host_info *host_info, if_t ifp) 25079b8d05b8SZbigniew Bodek { 250882e558eaSDawid Gorecki host_info->supported_network_features[0] = (uint32_t)if_getcapabilities(ifp); 25099b8d05b8SZbigniew Bodek } 25109b8d05b8SZbigniew Bodek 25119b8d05b8SZbigniew Bodek static void 25129b8d05b8SZbigniew Bodek ena_update_hwassist(struct ena_adapter *adapter) 25139b8d05b8SZbigniew Bodek { 25149b8d05b8SZbigniew Bodek if_t ifp = adapter->ifp; 25159b8d05b8SZbigniew Bodek uint32_t feat = adapter->tx_offload_cap; 25169b8d05b8SZbigniew Bodek int cap = if_getcapenable(ifp); 25179b8d05b8SZbigniew Bodek int flags = 0; 25189b8d05b8SZbigniew Bodek 25199b8d05b8SZbigniew Bodek if_clearhwassist(ifp); 25209b8d05b8SZbigniew Bodek 25210bdffe59SMarcin Wojtas if ((cap & IFCAP_TXCSUM) != 0) { 25220bdffe59SMarcin Wojtas if ((feat & 25230bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) != 0) 25249b8d05b8SZbigniew Bodek flags |= CSUM_IP; 25250bdffe59SMarcin Wojtas if ((feat & 25269b8d05b8SZbigniew Bodek (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK | 25270bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)) != 0) 25289b8d05b8SZbigniew Bodek flags |= CSUM_IP_UDP | CSUM_IP_TCP; 25299b8d05b8SZbigniew Bodek } 25309b8d05b8SZbigniew Bodek 25310bdffe59SMarcin Wojtas if ((cap & IFCAP_TXCSUM_IPV6) != 0) 25329b8d05b8SZbigniew Bodek flags |= CSUM_IP6_UDP | CSUM_IP6_TCP; 25339b8d05b8SZbigniew Bodek 25340bdffe59SMarcin Wojtas if ((cap & IFCAP_TSO4) != 0) 25359b8d05b8SZbigniew Bodek flags |= CSUM_IP_TSO; 25369b8d05b8SZbigniew Bodek 25370bdffe59SMarcin Wojtas if ((cap & IFCAP_TSO6) != 0) 25389b8d05b8SZbigniew Bodek flags |= CSUM_IP6_TSO; 25399b8d05b8SZbigniew Bodek 25409b8d05b8SZbigniew Bodek if_sethwassistbits(ifp, flags, 0); 25419b8d05b8SZbigniew Bodek } 25429b8d05b8SZbigniew Bodek 2543aa386085SZhenlei Huang static void 25449b8d05b8SZbigniew Bodek ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter, 25459b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *feat) 25469b8d05b8SZbigniew Bodek { 25479b8d05b8SZbigniew Bodek if_t ifp; 25489b8d05b8SZbigniew Bodek int caps = 0; 25499b8d05b8SZbigniew Bodek 25509b8d05b8SZbigniew Bodek ifp = adapter->ifp = if_gethandle(IFT_ETHER); 25519b8d05b8SZbigniew Bodek if_initname(ifp, device_get_name(pdev), device_get_unit(pdev)); 25529b8d05b8SZbigniew Bodek if_setdev(ifp, pdev); 25539b8d05b8SZbigniew Bodek if_setsoftc(ifp, adapter); 25549b8d05b8SZbigniew Bodek 2555a6b55ee6SGleb Smirnoff if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 25569b8d05b8SZbigniew Bodek if_setinitfn(ifp, ena_init); 25579b8d05b8SZbigniew Bodek if_settransmitfn(ifp, ena_mq_start); 25589b8d05b8SZbigniew Bodek if_setqflushfn(ifp, ena_qflush); 25599b8d05b8SZbigniew Bodek if_setioctlfn(ifp, ena_ioctl); 25609b8d05b8SZbigniew Bodek if_setgetcounterfn(ifp, ena_get_counter); 25619b8d05b8SZbigniew Bodek 25629762a033SMarcin Wojtas if_setsendqlen(ifp, adapter->requested_tx_ring_size); 25639b8d05b8SZbigniew Bodek if_setsendqready(ifp); 25649b8d05b8SZbigniew Bodek if_setmtu(ifp, ETHERMTU); 25659b8d05b8SZbigniew Bodek if_setbaudrate(ifp, 0); 25669b8d05b8SZbigniew Bodek /* Zeroize capabilities... */ 25679b8d05b8SZbigniew Bodek if_setcapabilities(ifp, 0); 25689b8d05b8SZbigniew Bodek if_setcapenable(ifp, 0); 25699b8d05b8SZbigniew Bodek /* check hardware support */ 25709b8d05b8SZbigniew Bodek caps = ena_get_dev_offloads(feat); 25719b8d05b8SZbigniew Bodek /* ... and set them */ 25729b8d05b8SZbigniew Bodek if_setcapabilitiesbit(ifp, caps, 0); 25739b8d05b8SZbigniew Bodek 25749b8d05b8SZbigniew Bodek /* TSO parameters */ 25757583c633SJustin Hibbits if_sethwtsomax(ifp, ENA_TSO_MAXSIZE - 25767583c633SJustin Hibbits (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)); 25777583c633SJustin Hibbits if_sethwtsomaxsegcount(ifp, adapter->max_tx_sgl_size - 1); 25787583c633SJustin Hibbits if_sethwtsomaxsegsize(ifp, ENA_TSO_MAXSIZE); 25799b8d05b8SZbigniew Bodek 25809b8d05b8SZbigniew Bodek if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 25819b8d05b8SZbigniew Bodek if_setcapenable(ifp, if_getcapabilities(ifp)); 25829b8d05b8SZbigniew Bodek 25839b8d05b8SZbigniew Bodek /* 25849b8d05b8SZbigniew Bodek * Specify the media types supported by this adapter and register 25859b8d05b8SZbigniew Bodek * callbacks to update media and link information 25869b8d05b8SZbigniew Bodek */ 258782e558eaSDawid Gorecki ifmedia_init(&adapter->media, IFM_IMASK, ena_media_change, 258882e558eaSDawid Gorecki ena_media_status); 25899b8d05b8SZbigniew Bodek ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 25909b8d05b8SZbigniew Bodek ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 25919b8d05b8SZbigniew Bodek 25929b8d05b8SZbigniew Bodek ether_ifattach(ifp, adapter->mac_addr); 25939b8d05b8SZbigniew Bodek } 25949b8d05b8SZbigniew Bodek 259538c7b965SMarcin Wojtas void 25969b8d05b8SZbigniew Bodek ena_down(struct ena_adapter *adapter) 25979b8d05b8SZbigniew Bodek { 2598a195fab0SMarcin Wojtas int rc; 25999b8d05b8SZbigniew Bodek 260007aff471SArtur Rojek ENA_LOCK_ASSERT(); 2601cb98c439SArtur Rojek 2602579d23aaSMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) 2603579d23aaSMarcin Wojtas return; 2604579d23aaSMarcin Wojtas 260578554d0cSDawid Gorecki ena_log(adapter->pdev, INFO, "device is going DOWN\n"); 26069b8d05b8SZbigniew Bodek 2607fd43fd2aSMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP, adapter); 260882e558eaSDawid Gorecki if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 26099b8d05b8SZbigniew Bodek 26109b8d05b8SZbigniew Bodek ena_free_io_irq(adapter); 26119b8d05b8SZbigniew Bodek 2612fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) { 261382e558eaSDawid Gorecki rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); 26143f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 261582e558eaSDawid Gorecki ena_log(adapter->pdev, ERR, "Device reset failed\n"); 2616a195fab0SMarcin Wojtas } 2617a195fab0SMarcin Wojtas 26189b8d05b8SZbigniew Bodek ena_destroy_all_io_queues(adapter); 26199b8d05b8SZbigniew Bodek 26209b8d05b8SZbigniew Bodek ena_free_all_tx_bufs(adapter); 26219b8d05b8SZbigniew Bodek ena_free_all_rx_bufs(adapter); 26229b8d05b8SZbigniew Bodek ena_free_all_tx_resources(adapter); 26239b8d05b8SZbigniew Bodek ena_free_all_rx_resources(adapter); 26249b8d05b8SZbigniew Bodek 26259b8d05b8SZbigniew Bodek counter_u64_add(adapter->dev_stats.interface_down, 1); 26269b8d05b8SZbigniew Bodek } 26279b8d05b8SZbigniew Bodek 26287d8c4feeSMarcin Wojtas static uint32_t 26297d8c4feeSMarcin Wojtas ena_calc_max_io_queue_num(device_t pdev, struct ena_com_dev *ena_dev, 26309b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *get_feat_ctx) 26319b8d05b8SZbigniew Bodek { 26327d8c4feeSMarcin Wojtas uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues; 26339b8d05b8SZbigniew Bodek 26346064f289SMarcin Wojtas /* Regular queues capabilities */ 26356064f289SMarcin Wojtas if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 26366064f289SMarcin Wojtas struct ena_admin_queue_ext_feature_fields *max_queue_ext = 26376064f289SMarcin Wojtas &get_feat_ctx->max_queue_ext.max_queue_ext; 26384fa9e02dSMarcin Wojtas io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num, 26394fa9e02dSMarcin Wojtas max_queue_ext->max_rx_cq_num); 26406064f289SMarcin Wojtas 26414fa9e02dSMarcin Wojtas io_tx_sq_num = max_queue_ext->max_tx_sq_num; 26424fa9e02dSMarcin Wojtas io_tx_cq_num = max_queue_ext->max_tx_cq_num; 26436064f289SMarcin Wojtas } else { 26446064f289SMarcin Wojtas struct ena_admin_queue_feature_desc *max_queues = 26456064f289SMarcin Wojtas &get_feat_ctx->max_queues; 26464fa9e02dSMarcin Wojtas io_tx_sq_num = max_queues->max_sq_num; 26474fa9e02dSMarcin Wojtas io_tx_cq_num = max_queues->max_cq_num; 26484fa9e02dSMarcin Wojtas io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num); 26496064f289SMarcin Wojtas } 26509b8d05b8SZbigniew Bodek 26514fa9e02dSMarcin Wojtas /* In case of LLQ use the llq fields for the tx SQ/CQ */ 26524fa9e02dSMarcin Wojtas if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 26534fa9e02dSMarcin Wojtas io_tx_sq_num = get_feat_ctx->llq.max_llq_num; 26544fa9e02dSMarcin Wojtas 26557d8c4feeSMarcin Wojtas max_num_io_queues = min_t(uint32_t, mp_ncpus, ENA_MAX_NUM_IO_QUEUES); 26567d8c4feeSMarcin Wojtas max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_rx_num); 26577d8c4feeSMarcin Wojtas max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_sq_num); 26587d8c4feeSMarcin Wojtas max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_cq_num); 2659609e6f6dSGordon Bergling /* 1 IRQ for mgmnt and 1 IRQ for each TX/RX pair */ 26607d8c4feeSMarcin Wojtas max_num_io_queues = min_t(uint32_t, max_num_io_queues, 26617d8c4feeSMarcin Wojtas pci_msix_count(pdev) - 1); 26626d1ef2abSArtur Rojek #ifdef RSS 26636d1ef2abSArtur Rojek max_num_io_queues = min_t(uint32_t, max_num_io_queues, 26646d1ef2abSArtur Rojek rss_getnumbuckets()); 26656d1ef2abSArtur Rojek #endif 26669b8d05b8SZbigniew Bodek 26677d8c4feeSMarcin Wojtas return (max_num_io_queues); 26689b8d05b8SZbigniew Bodek } 26699b8d05b8SZbigniew Bodek 26700bdffe59SMarcin Wojtas static int 26713fc5d816SMarcin Wojtas ena_enable_wc(device_t pdev, struct resource *res) 26724fa9e02dSMarcin Wojtas { 2673472d4784SMarcin Wojtas #if defined(__i386) || defined(__amd64) || defined(__aarch64__) 26744fa9e02dSMarcin Wojtas vm_offset_t va; 26754fa9e02dSMarcin Wojtas vm_size_t len; 26764fa9e02dSMarcin Wojtas int rc; 26774fa9e02dSMarcin Wojtas 26784fa9e02dSMarcin Wojtas va = (vm_offset_t)rman_get_virtual(res); 26794fa9e02dSMarcin Wojtas len = rman_get_size(res); 26804fa9e02dSMarcin Wojtas /* Enable write combining */ 2681472d4784SMarcin Wojtas rc = pmap_change_attr(va, len, VM_MEMATTR_WRITE_COMBINING); 26824fa9e02dSMarcin Wojtas if (unlikely(rc != 0)) { 26833fc5d816SMarcin Wojtas ena_log(pdev, ERR, "pmap_change_attr failed, %d\n", rc); 26844fa9e02dSMarcin Wojtas return (rc); 26854fa9e02dSMarcin Wojtas } 26864fa9e02dSMarcin Wojtas 26874fa9e02dSMarcin Wojtas return (0); 26884fa9e02dSMarcin Wojtas #endif 26894fa9e02dSMarcin Wojtas return (EOPNOTSUPP); 26904fa9e02dSMarcin Wojtas } 26914fa9e02dSMarcin Wojtas 26924fa9e02dSMarcin Wojtas static int 26934fa9e02dSMarcin Wojtas ena_set_queues_placement_policy(device_t pdev, struct ena_com_dev *ena_dev, 26944fa9e02dSMarcin Wojtas struct ena_admin_feature_llq_desc *llq, 26954fa9e02dSMarcin Wojtas struct ena_llq_configurations *llq_default_configurations) 26964fa9e02dSMarcin Wojtas { 269790232d18SDawid Gorecki int rc; 26984fa9e02dSMarcin Wojtas uint32_t llq_feature_mask; 26994fa9e02dSMarcin Wojtas 27004fa9e02dSMarcin Wojtas llq_feature_mask = 1 << ENA_ADMIN_LLQ; 27014fa9e02dSMarcin Wojtas if (!(ena_dev->supported_features & llq_feature_mask)) { 27023fc5d816SMarcin Wojtas ena_log(pdev, WARN, 27034fa9e02dSMarcin Wojtas "LLQ is not supported. Fallback to host mode policy.\n"); 27044fa9e02dSMarcin Wojtas ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 27054fa9e02dSMarcin Wojtas return (0); 27064fa9e02dSMarcin Wojtas } 27074fa9e02dSMarcin Wojtas 270890232d18SDawid Gorecki if (ena_dev->mem_bar == NULL) { 270990232d18SDawid Gorecki ena_log(pdev, WARN, 271090232d18SDawid Gorecki "LLQ is advertised as supported but device doesn't expose mem bar.\n"); 271190232d18SDawid Gorecki ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 271290232d18SDawid Gorecki return (0); 271390232d18SDawid Gorecki } 271490232d18SDawid Gorecki 27154fa9e02dSMarcin Wojtas rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); 27164fa9e02dSMarcin Wojtas if (unlikely(rc != 0)) { 271782e558eaSDawid Gorecki ena_log(pdev, WARN, 271882e558eaSDawid Gorecki "Failed to configure the device mode. " 27194fa9e02dSMarcin Wojtas "Fallback to host mode policy.\n"); 27204fa9e02dSMarcin Wojtas ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 272190232d18SDawid Gorecki } 272290232d18SDawid Gorecki 27234fa9e02dSMarcin Wojtas return (0); 27244fa9e02dSMarcin Wojtas } 27254fa9e02dSMarcin Wojtas 272690232d18SDawid Gorecki static int 272790232d18SDawid Gorecki ena_map_llq_mem_bar(device_t pdev, struct ena_com_dev *ena_dev) 272890232d18SDawid Gorecki { 272990232d18SDawid Gorecki struct ena_adapter *adapter = device_get_softc(pdev); 273090232d18SDawid Gorecki int rc, rid; 27314fa9e02dSMarcin Wojtas 27324fa9e02dSMarcin Wojtas /* Try to allocate resources for LLQ bar */ 27334fa9e02dSMarcin Wojtas rid = PCIR_BAR(ENA_MEM_BAR); 273482e558eaSDawid Gorecki adapter->memory = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, &rid, 273582e558eaSDawid Gorecki RF_ACTIVE); 27364fa9e02dSMarcin Wojtas if (unlikely(adapter->memory == NULL)) { 273782e558eaSDawid Gorecki ena_log(pdev, WARN, 27383324e304SMichal Krawczyk "Unable to allocate LLQ bar resource. LLQ mode won't be used.\n"); 27394fa9e02dSMarcin Wojtas return (0); 27404fa9e02dSMarcin Wojtas } 27414fa9e02dSMarcin Wojtas 27424fa9e02dSMarcin Wojtas /* Enable write combining for better LLQ performance */ 27433fc5d816SMarcin Wojtas rc = ena_enable_wc(adapter->pdev, adapter->memory); 27444fa9e02dSMarcin Wojtas if (unlikely(rc != 0)) { 27453fc5d816SMarcin Wojtas ena_log(pdev, ERR, "failed to enable write combining.\n"); 27464fa9e02dSMarcin Wojtas return (rc); 27474fa9e02dSMarcin Wojtas } 27484fa9e02dSMarcin Wojtas 27494fa9e02dSMarcin Wojtas /* 27504fa9e02dSMarcin Wojtas * Save virtual address of the device's memory region 27514fa9e02dSMarcin Wojtas * for the ena_com layer. 27524fa9e02dSMarcin Wojtas */ 27534fa9e02dSMarcin Wojtas ena_dev->mem_bar = rman_get_virtual(adapter->memory); 27544fa9e02dSMarcin Wojtas 27554fa9e02dSMarcin Wojtas return (0); 27564fa9e02dSMarcin Wojtas } 27574fa9e02dSMarcin Wojtas 275882e558eaSDawid Gorecki static inline void 2759b1c38df0SOsama Abboud ena_set_llq_configurations(struct ena_llq_configurations *llq_config, 2760b1c38df0SOsama Abboud struct ena_admin_feature_llq_desc *llq, struct ena_adapter *adapter) 27614fa9e02dSMarcin Wojtas { 27624fa9e02dSMarcin Wojtas llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; 27634fa9e02dSMarcin Wojtas llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 27644fa9e02dSMarcin Wojtas llq_config->llq_num_decs_before_header = 27654fa9e02dSMarcin Wojtas ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 2766b1c38df0SOsama Abboud if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) != 0) { 2767b1c38df0SOsama Abboud if ((ena_force_large_llq_header == ENA_LLQ_HEADER_SIZE_POLICY_LARGE) || 2768b1c38df0SOsama Abboud (ena_force_large_llq_header == ENA_LLQ_HEADER_SIZE_POLICY_DEFAULT && 2769b1c38df0SOsama Abboud llq->entry_size_recommended == ENA_ADMIN_LIST_ENTRY_SIZE_256B)) { 2770beaadec9SMarcin Wojtas llq_config->llq_ring_entry_size = 2771beaadec9SMarcin Wojtas ENA_ADMIN_LIST_ENTRY_SIZE_256B; 2772beaadec9SMarcin Wojtas llq_config->llq_ring_entry_size_value = 256; 2773b1c38df0SOsama Abboud adapter->llq_policy = ENA_ADMIN_LIST_ENTRY_SIZE_256B; 2774b1c38df0SOsama Abboud } 2775beaadec9SMarcin Wojtas } else { 2776beaadec9SMarcin Wojtas llq_config->llq_ring_entry_size = 2777beaadec9SMarcin Wojtas ENA_ADMIN_LIST_ENTRY_SIZE_128B; 27784fa9e02dSMarcin Wojtas llq_config->llq_ring_entry_size_value = 128; 2779b1c38df0SOsama Abboud adapter->llq_policy = ENA_ADMIN_LIST_ENTRY_SIZE_128B; 27804fa9e02dSMarcin Wojtas } 2781beaadec9SMarcin Wojtas } 27824fa9e02dSMarcin Wojtas 27834fa9e02dSMarcin Wojtas static int 2784b1c38df0SOsama Abboud ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx, struct ena_adapter *adapter) 27859b8d05b8SZbigniew Bodek { 27864fa9e02dSMarcin Wojtas struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; 27874fa9e02dSMarcin Wojtas struct ena_com_dev *ena_dev = ctx->ena_dev; 27886064f289SMarcin Wojtas uint32_t tx_queue_size = ENA_DEFAULT_RING_SIZE; 27897d8c4feeSMarcin Wojtas uint32_t rx_queue_size = ENA_DEFAULT_RING_SIZE; 27907d8c4feeSMarcin Wojtas uint32_t max_tx_queue_size; 27917d8c4feeSMarcin Wojtas uint32_t max_rx_queue_size; 27929b8d05b8SZbigniew Bodek 27934fa9e02dSMarcin Wojtas if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 27946064f289SMarcin Wojtas struct ena_admin_queue_ext_feature_fields *max_queue_ext = 27956064f289SMarcin Wojtas &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; 27967d8c4feeSMarcin Wojtas max_rx_queue_size = min_t(uint32_t, 27977d8c4feeSMarcin Wojtas max_queue_ext->max_rx_cq_depth, 27986064f289SMarcin Wojtas max_queue_ext->max_rx_sq_depth); 27997d8c4feeSMarcin Wojtas max_tx_queue_size = max_queue_ext->max_tx_cq_depth; 28004fa9e02dSMarcin Wojtas 28014fa9e02dSMarcin Wojtas if (ena_dev->tx_mem_queue_type == 28024fa9e02dSMarcin Wojtas ENA_ADMIN_PLACEMENT_POLICY_DEV) 28037d8c4feeSMarcin Wojtas max_tx_queue_size = min_t(uint32_t, max_tx_queue_size, 28044fa9e02dSMarcin Wojtas llq->max_llq_depth); 28054fa9e02dSMarcin Wojtas else 28067d8c4feeSMarcin Wojtas max_tx_queue_size = min_t(uint32_t, max_tx_queue_size, 28076064f289SMarcin Wojtas max_queue_ext->max_tx_sq_depth); 28084fa9e02dSMarcin Wojtas 28096064f289SMarcin Wojtas ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 28106064f289SMarcin Wojtas max_queue_ext->max_per_packet_tx_descs); 28117d8c4feeSMarcin Wojtas ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 28127d8c4feeSMarcin Wojtas max_queue_ext->max_per_packet_rx_descs); 28136064f289SMarcin Wojtas } else { 28146064f289SMarcin Wojtas struct ena_admin_queue_feature_desc *max_queues = 28156064f289SMarcin Wojtas &ctx->get_feat_ctx->max_queues; 281682e558eaSDawid Gorecki max_rx_queue_size = min_t(uint32_t, max_queues->max_cq_depth, 28176064f289SMarcin Wojtas max_queues->max_sq_depth); 28187d8c4feeSMarcin Wojtas max_tx_queue_size = max_queues->max_cq_depth; 28194fa9e02dSMarcin Wojtas 28204fa9e02dSMarcin Wojtas if (ena_dev->tx_mem_queue_type == 28214fa9e02dSMarcin Wojtas ENA_ADMIN_PLACEMENT_POLICY_DEV) 28227d8c4feeSMarcin Wojtas max_tx_queue_size = min_t(uint32_t, max_tx_queue_size, 28234fa9e02dSMarcin Wojtas llq->max_llq_depth); 28244fa9e02dSMarcin Wojtas else 28257d8c4feeSMarcin Wojtas max_tx_queue_size = min_t(uint32_t, max_tx_queue_size, 28264fa9e02dSMarcin Wojtas max_queues->max_sq_depth); 28274fa9e02dSMarcin Wojtas 28286064f289SMarcin Wojtas ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 28297d8c4feeSMarcin Wojtas max_queues->max_packet_tx_descs); 28307d8c4feeSMarcin Wojtas ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 28316064f289SMarcin Wojtas max_queues->max_packet_rx_descs); 28326064f289SMarcin Wojtas } 28339b8d05b8SZbigniew Bodek 2834b1c38df0SOsama Abboud if (adapter->llq_policy == ENA_ADMIN_LIST_ENTRY_SIZE_256B) { 2835d0419551SOsama Abboud if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 2836d0419551SOsama Abboud if (llq->max_wide_llq_depth != max_tx_queue_size) { 2837d0419551SOsama Abboud if (llq->max_wide_llq_depth == 0) { 2838d0419551SOsama Abboud /* if there is no large llq max depth from device, we divide 2839d0419551SOsama Abboud * the queue size by 2, leaving the amount of memory 2840d0419551SOsama Abboud * used by the queues unchanged. 2841d0419551SOsama Abboud */ 2842beaadec9SMarcin Wojtas max_tx_queue_size /= 2; 2843d0419551SOsama Abboud } else { 2844d0419551SOsama Abboud max_tx_queue_size = llq->max_wide_llq_depth; 2845d0419551SOsama Abboud } 28463fc5d816SMarcin Wojtas ena_log(ctx->pdev, INFO, 2847d0419551SOsama Abboud "Using large LLQ headers and decreasing maximum Tx queue size to %d\n", 2848beaadec9SMarcin Wojtas max_tx_queue_size); 2849beaadec9SMarcin Wojtas } else { 2850d0419551SOsama Abboud ena_log(ctx->pdev, INFO, "Using large LLQ headers\n"); 2851d0419551SOsama Abboud } 2852d0419551SOsama Abboud } else { 28533fc5d816SMarcin Wojtas ena_log(ctx->pdev, WARN, 2854b1c38df0SOsama Abboud "Using large headers failed: LLQ is disabled or device does not support large headers\n"); 2855beaadec9SMarcin Wojtas } 2856beaadec9SMarcin Wojtas } 2857beaadec9SMarcin Wojtas 2858d0419551SOsama Abboud /* round down to the nearest power of 2 */ 2859d0419551SOsama Abboud max_tx_queue_size = 1 << (flsl(max_tx_queue_size) - 1); 2860d0419551SOsama Abboud max_rx_queue_size = 1 << (flsl(max_rx_queue_size) - 1); 2861d0419551SOsama Abboud 28627d8c4feeSMarcin Wojtas tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE, 28637d8c4feeSMarcin Wojtas max_tx_queue_size); 28647d8c4feeSMarcin Wojtas rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE, 28657d8c4feeSMarcin Wojtas max_rx_queue_size); 28669b8d05b8SZbigniew Bodek 28677d8c4feeSMarcin Wojtas tx_queue_size = 1 << (flsl(tx_queue_size) - 1); 28687d8c4feeSMarcin Wojtas rx_queue_size = 1 << (flsl(rx_queue_size) - 1); 28697d8c4feeSMarcin Wojtas 28707d8c4feeSMarcin Wojtas ctx->max_tx_queue_size = max_tx_queue_size; 28717d8c4feeSMarcin Wojtas ctx->max_rx_queue_size = max_rx_queue_size; 28726064f289SMarcin Wojtas ctx->tx_queue_size = tx_queue_size; 28737d8c4feeSMarcin Wojtas ctx->rx_queue_size = rx_queue_size; 28746064f289SMarcin Wojtas 28756064f289SMarcin Wojtas return (0); 28769b8d05b8SZbigniew Bodek } 28779b8d05b8SZbigniew Bodek 28780bdffe59SMarcin Wojtas static void 287946021271SMarcin Wojtas ena_config_host_info(struct ena_com_dev *ena_dev, device_t dev) 28809b8d05b8SZbigniew Bodek { 28819b8d05b8SZbigniew Bodek struct ena_admin_host_info *host_info; 288246021271SMarcin Wojtas uintptr_t rid; 28839b8d05b8SZbigniew Bodek int rc; 28849b8d05b8SZbigniew Bodek 28859b8d05b8SZbigniew Bodek /* Allocate only the host info */ 28869b8d05b8SZbigniew Bodek rc = ena_com_allocate_host_info(ena_dev); 28873f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 28883fc5d816SMarcin Wojtas ena_log(dev, ERR, "Cannot allocate host info\n"); 28899b8d05b8SZbigniew Bodek return; 28909b8d05b8SZbigniew Bodek } 28919b8d05b8SZbigniew Bodek 28929b8d05b8SZbigniew Bodek host_info = ena_dev->host_attr.host_info; 28939b8d05b8SZbigniew Bodek 289446021271SMarcin Wojtas if (pci_get_id(dev, PCI_ID_RID, &rid) == 0) 289546021271SMarcin Wojtas host_info->bdf = rid; 28969b8d05b8SZbigniew Bodek host_info->os_type = ENA_ADMIN_OS_FREEBSD; 28979b8d05b8SZbigniew Bodek host_info->kernel_ver = osreldate; 28989b8d05b8SZbigniew Bodek 28999b8d05b8SZbigniew Bodek sprintf(host_info->kernel_ver_str, "%d", osreldate); 29009b8d05b8SZbigniew Bodek host_info->os_dist = 0; 29019b8d05b8SZbigniew Bodek strncpy(host_info->os_dist_str, osrelease, 29029b8d05b8SZbigniew Bodek sizeof(host_info->os_dist_str) - 1); 29039b8d05b8SZbigniew Bodek 29048f15f8a7SDawid Gorecki host_info->driver_version = (ENA_DRV_MODULE_VER_MAJOR) | 29058f15f8a7SDawid Gorecki (ENA_DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 29068f15f8a7SDawid Gorecki (ENA_DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 29078ece6b25SMarcin Wojtas host_info->num_cpus = mp_ncpus; 2908c7444389SMarcin Wojtas host_info->driver_supported_features = 29096d1ef2abSArtur Rojek ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK | 29106d1ef2abSArtur Rojek ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK; 29119b8d05b8SZbigniew Bodek 29129b8d05b8SZbigniew Bodek rc = ena_com_set_host_attributes(ena_dev); 29133f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 2914a195fab0SMarcin Wojtas if (rc == EOPNOTSUPP) 29153fc5d816SMarcin Wojtas ena_log(dev, WARN, "Cannot set host attributes\n"); 29169b8d05b8SZbigniew Bodek else 29173fc5d816SMarcin Wojtas ena_log(dev, ERR, "Cannot set host attributes\n"); 29189b8d05b8SZbigniew Bodek 29199b8d05b8SZbigniew Bodek goto err; 29209b8d05b8SZbigniew Bodek } 29219b8d05b8SZbigniew Bodek 29229b8d05b8SZbigniew Bodek return; 29239b8d05b8SZbigniew Bodek 29249b8d05b8SZbigniew Bodek err: 29259b8d05b8SZbigniew Bodek ena_com_delete_host_info(ena_dev); 29269b8d05b8SZbigniew Bodek } 29279b8d05b8SZbigniew Bodek 29289b8d05b8SZbigniew Bodek static int 29299b8d05b8SZbigniew Bodek ena_device_init(struct ena_adapter *adapter, device_t pdev, 29309b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *get_feat_ctx, int *wd_active) 29319b8d05b8SZbigniew Bodek { 29323324e304SMichal Krawczyk struct ena_llq_configurations llq_config; 29339b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 29349b8d05b8SZbigniew Bodek bool readless_supported; 29359b8d05b8SZbigniew Bodek uint32_t aenq_groups; 29369b8d05b8SZbigniew Bodek int dma_width; 29379b8d05b8SZbigniew Bodek int rc; 29389b8d05b8SZbigniew Bodek 29399b8d05b8SZbigniew Bodek rc = ena_com_mmio_reg_read_request_init(ena_dev); 29403f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 29413fc5d816SMarcin Wojtas ena_log(pdev, ERR, "failed to init mmio read less\n"); 29420bdffe59SMarcin Wojtas return (rc); 29439b8d05b8SZbigniew Bodek } 29449b8d05b8SZbigniew Bodek 29459b8d05b8SZbigniew Bodek /* 29469b8d05b8SZbigniew Bodek * The PCIe configuration space revision id indicate if mmio reg 29479b8d05b8SZbigniew Bodek * read is disabled 29489b8d05b8SZbigniew Bodek */ 29499b8d05b8SZbigniew Bodek readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ); 29509b8d05b8SZbigniew Bodek ena_com_set_mmio_read_mode(ena_dev, readless_supported); 29519b8d05b8SZbigniew Bodek 2952a195fab0SMarcin Wojtas rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); 29533f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 29543fc5d816SMarcin Wojtas ena_log(pdev, ERR, "Can not reset device\n"); 29559b8d05b8SZbigniew Bodek goto err_mmio_read_less; 29569b8d05b8SZbigniew Bodek } 29579b8d05b8SZbigniew Bodek 29589b8d05b8SZbigniew Bodek rc = ena_com_validate_version(ena_dev); 29593f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 29603fc5d816SMarcin Wojtas ena_log(pdev, ERR, "device version is too low\n"); 29619b8d05b8SZbigniew Bodek goto err_mmio_read_less; 29629b8d05b8SZbigniew Bodek } 29639b8d05b8SZbigniew Bodek 29649b8d05b8SZbigniew Bodek dma_width = ena_com_get_dma_width(ena_dev); 29653f9ed7abSMarcin Wojtas if (unlikely(dma_width < 0)) { 29663fc5d816SMarcin Wojtas ena_log(pdev, ERR, "Invalid dma width value %d", dma_width); 29679b8d05b8SZbigniew Bodek rc = dma_width; 29689b8d05b8SZbigniew Bodek goto err_mmio_read_less; 29699b8d05b8SZbigniew Bodek } 29709b8d05b8SZbigniew Bodek adapter->dma_width = dma_width; 29719b8d05b8SZbigniew Bodek 29729b8d05b8SZbigniew Bodek /* ENA admin level init */ 297367ec48bbSMarcin Wojtas rc = ena_com_admin_init(ena_dev, &aenq_handlers); 29743f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 29753fc5d816SMarcin Wojtas ena_log(pdev, ERR, 29769b8d05b8SZbigniew Bodek "Can not initialize ena admin queue with device\n"); 29779b8d05b8SZbigniew Bodek goto err_mmio_read_less; 29789b8d05b8SZbigniew Bodek } 29799b8d05b8SZbigniew Bodek 29809b8d05b8SZbigniew Bodek /* 29819b8d05b8SZbigniew Bodek * To enable the msix interrupts the driver needs to know the number 29829b8d05b8SZbigniew Bodek * of queues. So the driver uses polling mode to retrieve this 29839b8d05b8SZbigniew Bodek * information 29849b8d05b8SZbigniew Bodek */ 29859b8d05b8SZbigniew Bodek ena_com_set_admin_polling_mode(ena_dev, true); 29869b8d05b8SZbigniew Bodek 298746021271SMarcin Wojtas ena_config_host_info(ena_dev, pdev); 29889b8d05b8SZbigniew Bodek 29899b8d05b8SZbigniew Bodek /* Get Device Attributes */ 29909b8d05b8SZbigniew Bodek rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 29913f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 29923fc5d816SMarcin Wojtas ena_log(pdev, ERR, 29939b8d05b8SZbigniew Bodek "Cannot get attribute for ena device rc: %d\n", rc); 29949b8d05b8SZbigniew Bodek goto err_admin_init; 29959b8d05b8SZbigniew Bodek } 29969b8d05b8SZbigniew Bodek 2997e6de9a83SMarcin Wojtas aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | 2998e6de9a83SMarcin Wojtas BIT(ENA_ADMIN_FATAL_ERROR) | 2999e6de9a83SMarcin Wojtas BIT(ENA_ADMIN_WARNING) | 300040621d71SMarcin Wojtas BIT(ENA_ADMIN_NOTIFICATION) | 30018cd86b51SOsama Abboud BIT(ENA_ADMIN_KEEP_ALIVE) | 300270587942SOsama Abboud BIT(ENA_ADMIN_CONF_NOTIFICATIONS) | 300370587942SOsama Abboud BIT(ENA_ADMIN_DEVICE_REQUEST_RESET); 30049b8d05b8SZbigniew Bodek 30059b8d05b8SZbigniew Bodek aenq_groups &= get_feat_ctx->aenq.supported_groups; 30069b8d05b8SZbigniew Bodek rc = ena_com_set_aenq_config(ena_dev, aenq_groups); 30073f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 30083fc5d816SMarcin Wojtas ena_log(pdev, ERR, "Cannot configure aenq groups rc: %d\n", rc); 30099b8d05b8SZbigniew Bodek goto err_admin_init; 30109b8d05b8SZbigniew Bodek } 30119b8d05b8SZbigniew Bodek 30129b8d05b8SZbigniew Bodek *wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); 30139b8d05b8SZbigniew Bodek 3014b1c38df0SOsama Abboud ena_set_llq_configurations(&llq_config, &get_feat_ctx->llq, adapter); 30153324e304SMichal Krawczyk 30163324e304SMichal Krawczyk rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq, 30173324e304SMichal Krawczyk &llq_config); 30183324e304SMichal Krawczyk if (unlikely(rc != 0)) { 30193324e304SMichal Krawczyk ena_log(pdev, ERR, "Failed to set placement policy\n"); 30203324e304SMichal Krawczyk goto err_admin_init; 30213324e304SMichal Krawczyk } 30223324e304SMichal Krawczyk 30230bdffe59SMarcin Wojtas return (0); 30249b8d05b8SZbigniew Bodek 30259b8d05b8SZbigniew Bodek err_admin_init: 30269b8d05b8SZbigniew Bodek ena_com_delete_host_info(ena_dev); 30279b8d05b8SZbigniew Bodek ena_com_admin_destroy(ena_dev); 30289b8d05b8SZbigniew Bodek err_mmio_read_less: 30299b8d05b8SZbigniew Bodek ena_com_mmio_reg_read_request_destroy(ena_dev); 30309b8d05b8SZbigniew Bodek 30310bdffe59SMarcin Wojtas return (rc); 30329b8d05b8SZbigniew Bodek } 30339b8d05b8SZbigniew Bodek 303482e558eaSDawid Gorecki static int 303582e558eaSDawid Gorecki ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter) 30369b8d05b8SZbigniew Bodek { 30379b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 30389b8d05b8SZbigniew Bodek int rc; 30399b8d05b8SZbigniew Bodek 30409b8d05b8SZbigniew Bodek rc = ena_enable_msix(adapter); 30413f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 30423fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "Error with MSI-X enablement\n"); 30430bdffe59SMarcin Wojtas return (rc); 30449b8d05b8SZbigniew Bodek } 30459b8d05b8SZbigniew Bodek 30469b8d05b8SZbigniew Bodek ena_setup_mgmnt_intr(adapter); 30479b8d05b8SZbigniew Bodek 30489b8d05b8SZbigniew Bodek rc = ena_request_mgmnt_irq(adapter); 30493f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 30503fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "Cannot setup mgmnt queue intr\n"); 30519b8d05b8SZbigniew Bodek goto err_disable_msix; 30529b8d05b8SZbigniew Bodek } 30539b8d05b8SZbigniew Bodek 30549b8d05b8SZbigniew Bodek ena_com_set_admin_polling_mode(ena_dev, false); 30559b8d05b8SZbigniew Bodek 30569b8d05b8SZbigniew Bodek ena_com_admin_aenq_enable(ena_dev); 30579b8d05b8SZbigniew Bodek 30580bdffe59SMarcin Wojtas return (0); 30599b8d05b8SZbigniew Bodek 30609b8d05b8SZbigniew Bodek err_disable_msix: 30619b8d05b8SZbigniew Bodek ena_disable_msix(adapter); 30629b8d05b8SZbigniew Bodek 30630bdffe59SMarcin Wojtas return (rc); 30649b8d05b8SZbigniew Bodek } 30659b8d05b8SZbigniew Bodek 30669b8d05b8SZbigniew Bodek /* Function called on ENA_ADMIN_KEEP_ALIVE event */ 306782e558eaSDawid Gorecki static void 306882e558eaSDawid Gorecki ena_keep_alive_wd(void *adapter_data, struct ena_admin_aenq_entry *aenq_e) 30699b8d05b8SZbigniew Bodek { 30709b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 307130217e2dSMarcin Wojtas struct ena_admin_aenq_keep_alive_desc *desc; 30729b8d05b8SZbigniew Bodek sbintime_t stime; 307330217e2dSMarcin Wojtas uint64_t rx_drops; 30746c84cec3SMarcin Wojtas uint64_t tx_drops; 307530217e2dSMarcin Wojtas 307630217e2dSMarcin Wojtas desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; 307730217e2dSMarcin Wojtas 307830217e2dSMarcin Wojtas rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low; 30796c84cec3SMarcin Wojtas tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low; 308030217e2dSMarcin Wojtas counter_u64_zero(adapter->hw_stats.rx_drops); 308130217e2dSMarcin Wojtas counter_u64_add(adapter->hw_stats.rx_drops, rx_drops); 30826c84cec3SMarcin Wojtas counter_u64_zero(adapter->hw_stats.tx_drops); 30836c84cec3SMarcin Wojtas counter_u64_add(adapter->hw_stats.tx_drops, tx_drops); 30849b8d05b8SZbigniew Bodek 30859b8d05b8SZbigniew Bodek stime = getsbinuptime(); 30869b8d05b8SZbigniew Bodek atomic_store_rel_64(&adapter->keep_alive_timestamp, stime); 30879b8d05b8SZbigniew Bodek } 30889b8d05b8SZbigniew Bodek 30899b8d05b8SZbigniew Bodek /* Check for keep alive expiration */ 309082e558eaSDawid Gorecki static void 309182e558eaSDawid Gorecki check_for_missing_keep_alive(struct ena_adapter *adapter) 30929b8d05b8SZbigniew Bodek { 30939b8d05b8SZbigniew Bodek sbintime_t timestamp, time; 3094274319acSOsama Abboud enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; 30959b8d05b8SZbigniew Bodek 30969b8d05b8SZbigniew Bodek if (adapter->wd_active == 0) 30979b8d05b8SZbigniew Bodek return; 30989b8d05b8SZbigniew Bodek 309940621d71SMarcin Wojtas if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 31009b8d05b8SZbigniew Bodek return; 31019b8d05b8SZbigniew Bodek 31029b8d05b8SZbigniew Bodek timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp); 31039b8d05b8SZbigniew Bodek time = getsbinuptime() - timestamp; 31049b8d05b8SZbigniew Bodek if (unlikely(time > adapter->keep_alive_timeout)) { 31053fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "Keep alive watchdog timeout.\n"); 3106274319acSOsama Abboud if (ena_com_aenq_has_keep_alive(adapter->ena_dev)) 3107274319acSOsama Abboud reset_reason = ENA_REGS_RESET_MISSING_ADMIN_INTERRUPT; 3108274319acSOsama Abboud 3109274319acSOsama Abboud ena_trigger_reset(adapter, reset_reason); 31109b8d05b8SZbigniew Bodek } 3111858659f7SMarcin Wojtas } 31129b8d05b8SZbigniew Bodek 31139b8d05b8SZbigniew Bodek /* Check if admin queue is enabled */ 311482e558eaSDawid Gorecki static void 311582e558eaSDawid Gorecki check_for_admin_com_state(struct ena_adapter *adapter) 31169b8d05b8SZbigniew Bodek { 3117274319acSOsama Abboud enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_ADMIN_TO; 311882e558eaSDawid Gorecki if (unlikely(ena_com_get_admin_running_state(adapter->ena_dev) == false)) { 31193fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 31209b8d05b8SZbigniew Bodek "ENA admin queue is not in running state!\n"); 3121274319acSOsama Abboud counter_u64_add(adapter->dev_stats.admin_q_pause, 1); 3122274319acSOsama Abboud if (ena_com_get_missing_admin_interrupt(adapter->ena_dev)) 3123274319acSOsama Abboud reset_reason = ENA_REGS_RESET_MISSING_ADMIN_INTERRUPT; 3124274319acSOsama Abboud 3125274319acSOsama Abboud ena_trigger_reset(adapter, reset_reason); 31269b8d05b8SZbigniew Bodek } 3127858659f7SMarcin Wojtas } 31289b8d05b8SZbigniew Bodek 312974dba3adSMarcin Wojtas static int 3130d12f7bfcSMarcin Wojtas check_for_rx_interrupt_queue(struct ena_adapter *adapter, 3131d12f7bfcSMarcin Wojtas struct ena_ring *rx_ring) 3132d12f7bfcSMarcin Wojtas { 31330ac122c3SDawid Gorecki if (likely(atomic_load_8(&rx_ring->first_interrupt))) 3134d12f7bfcSMarcin Wojtas return (0); 3135d12f7bfcSMarcin Wojtas 3136d12f7bfcSMarcin Wojtas if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) 3137d12f7bfcSMarcin Wojtas return (0); 3138d12f7bfcSMarcin Wojtas 3139d12f7bfcSMarcin Wojtas rx_ring->no_interrupt_event_cnt++; 3140d12f7bfcSMarcin Wojtas 314182e558eaSDawid Gorecki if (rx_ring->no_interrupt_event_cnt == 314282e558eaSDawid Gorecki ENA_MAX_NO_INTERRUPT_ITERATIONS) { 314382e558eaSDawid Gorecki ena_log(adapter->pdev, ERR, 314482e558eaSDawid Gorecki "Potential MSIX issue on Rx side Queue = %d. Reset the device\n", 314582e558eaSDawid Gorecki rx_ring->qid); 31467926bc44SMarcin Wojtas ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_INTERRUPT); 3147d12f7bfcSMarcin Wojtas return (EIO); 3148d12f7bfcSMarcin Wojtas } 3149d12f7bfcSMarcin Wojtas 3150d12f7bfcSMarcin Wojtas return (0); 3151d12f7bfcSMarcin Wojtas } 3152d12f7bfcSMarcin Wojtas 3153a33ec635SOsama Abboud static enum ena_regs_reset_reason_types 3154a33ec635SOsama Abboud check_cdesc_in_tx_cq(struct ena_adapter *adapter, 3155a33ec635SOsama Abboud struct ena_ring *tx_ring) 3156a33ec635SOsama Abboud { 3157a33ec635SOsama Abboud device_t pdev = adapter->pdev; 3158a33ec635SOsama Abboud int rc; 3159a33ec635SOsama Abboud u16 req_id; 3160a33ec635SOsama Abboud 3161a33ec635SOsama Abboud rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id); 3162a33ec635SOsama Abboud /* TX CQ is empty */ 3163a33ec635SOsama Abboud if (rc == ENA_COM_TRY_AGAIN) { 3164a33ec635SOsama Abboud ena_log(pdev, ERR, 3165a33ec635SOsama Abboud "No completion descriptors found in CQ %d\n", 3166a33ec635SOsama Abboud tx_ring->qid); 3167a33ec635SOsama Abboud return ENA_REGS_RESET_MISS_TX_CMPL; 3168a33ec635SOsama Abboud } 3169a33ec635SOsama Abboud 3170a33ec635SOsama Abboud /* TX CQ has cdescs */ 3171a33ec635SOsama Abboud ena_log(pdev, ERR, 3172a33ec635SOsama Abboud "Completion descriptors found in CQ %d", 3173a33ec635SOsama Abboud tx_ring->qid); 3174a33ec635SOsama Abboud 3175a33ec635SOsama Abboud return ENA_REGS_RESET_MISS_INTERRUPT; 3176a33ec635SOsama Abboud } 3177a33ec635SOsama Abboud 3178d12f7bfcSMarcin Wojtas static int 3179d12f7bfcSMarcin Wojtas check_missing_comp_in_tx_queue(struct ena_adapter *adapter, 318074dba3adSMarcin Wojtas struct ena_ring *tx_ring) 318174dba3adSMarcin Wojtas { 31821f67704eSOsama Abboud uint32_t missed_tx = 0, new_missed_tx = 0; 31833fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 318474dba3adSMarcin Wojtas struct bintime curtime, time; 318574dba3adSMarcin Wojtas struct ena_tx_buffer *tx_buf; 3186d8aba82bSDawid Gorecki int time_since_last_cleanup; 3187d8aba82bSDawid Gorecki int missing_tx_comp_to; 3188d12f7bfcSMarcin Wojtas sbintime_t time_offset; 3189d12f7bfcSMarcin Wojtas int i, rc = 0; 3190a33ec635SOsama Abboud enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_MISS_TX_CMPL; 3191a33ec635SOsama Abboud bool cleanup_scheduled, cleanup_running; 319274dba3adSMarcin Wojtas 319374dba3adSMarcin Wojtas getbinuptime(&curtime); 319474dba3adSMarcin Wojtas 319574dba3adSMarcin Wojtas for (i = 0; i < tx_ring->ring_size; i++) { 319674dba3adSMarcin Wojtas tx_buf = &tx_ring->tx_buffer_info[i]; 319774dba3adSMarcin Wojtas 31980bdffe59SMarcin Wojtas if (bintime_isset(&tx_buf->timestamp) == 0) 319974dba3adSMarcin Wojtas continue; 320074dba3adSMarcin Wojtas 320174dba3adSMarcin Wojtas time = curtime; 320274dba3adSMarcin Wojtas bintime_sub(&time, &tx_buf->timestamp); 3203d12f7bfcSMarcin Wojtas time_offset = bttosbt(time); 3204d12f7bfcSMarcin Wojtas 32050ac122c3SDawid Gorecki if (unlikely(!atomic_load_8(&tx_ring->first_interrupt) && 3206d12f7bfcSMarcin Wojtas time_offset > 2 * adapter->missing_tx_timeout)) { 3207d12f7bfcSMarcin Wojtas /* 3208d12f7bfcSMarcin Wojtas * If after graceful period interrupt is still not 3209d12f7bfcSMarcin Wojtas * received, we schedule a reset. 3210d12f7bfcSMarcin Wojtas */ 32113fc5d816SMarcin Wojtas ena_log(pdev, ERR, 3212d12f7bfcSMarcin Wojtas "Potential MSIX issue on Tx side Queue = %d. " 321382e558eaSDawid Gorecki "Reset the device\n", 321482e558eaSDawid Gorecki tx_ring->qid); 32157926bc44SMarcin Wojtas ena_trigger_reset(adapter, 32167926bc44SMarcin Wojtas ENA_REGS_RESET_MISS_INTERRUPT); 3217d12f7bfcSMarcin Wojtas return (EIO); 3218d12f7bfcSMarcin Wojtas } 321974dba3adSMarcin Wojtas 322074dba3adSMarcin Wojtas /* Check again if packet is still waiting */ 3221d12f7bfcSMarcin Wojtas if (unlikely(time_offset > adapter->missing_tx_timeout)) { 322274dba3adSMarcin Wojtas 3223f01b2cd9SArthur Kiyanovski if (tx_buf->print_once) { 32249272e45cSArthur Kiyanovski time_since_last_cleanup = TICKS_2_MSEC(ticks - 3225d8aba82bSDawid Gorecki tx_ring->tx_last_cleanup_ticks); 322682e558eaSDawid Gorecki missing_tx_comp_to = sbttoms( 322782e558eaSDawid Gorecki adapter->missing_tx_timeout); 322882e558eaSDawid Gorecki ena_log(pdev, WARN, 322982e558eaSDawid Gorecki "Found a Tx that wasn't completed on time, qid %d, index %d. " 32309272e45cSArthur Kiyanovski "%d msecs have passed since last cleanup. Missing Tx timeout value %d msecs.\n", 3231d8aba82bSDawid Gorecki tx_ring->qid, i, time_since_last_cleanup, 3232d8aba82bSDawid Gorecki missing_tx_comp_to); 32331f67704eSOsama Abboud /* Add new TX completions which are missed */ 32341f67704eSOsama Abboud new_missed_tx++; 3235d8aba82bSDawid Gorecki } 323674dba3adSMarcin Wojtas 3237f01b2cd9SArthur Kiyanovski tx_buf->print_once = false; 323874dba3adSMarcin Wojtas missed_tx++; 3239d12f7bfcSMarcin Wojtas } 3240d12f7bfcSMarcin Wojtas } 32411f67704eSOsama Abboud /* Checking if this TX ring missing TX completions have passed the threshold */ 3242d12f7bfcSMarcin Wojtas if (unlikely(missed_tx > adapter->missing_tx_threshold)) { 32433fc5d816SMarcin Wojtas ena_log(pdev, ERR, 3244d12f7bfcSMarcin Wojtas "The number of lost tx completion is above the threshold " 3245d12f7bfcSMarcin Wojtas "(%d > %d). Reset the device\n", 32464e8acd84SMarcin Wojtas missed_tx, adapter->missing_tx_threshold); 3247a33ec635SOsama Abboud /* Set the reset flag to prevent ena_cleanup() from running */ 3248a33ec635SOsama Abboud ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); 3249a33ec635SOsama Abboud /* Need to make sure that ENA_FLAG_TRIGGER_RESET is visible to ena_cleanup() and 3250a33ec635SOsama Abboud * that cleanup_running is visible to check_missing_comp_in_tx_queue() to 3251a33ec635SOsama Abboud * prevent the case of accessing CQ concurrently with check_cdesc_in_tx_cq() 3252a33ec635SOsama Abboud */ 3253a33ec635SOsama Abboud mb(); 3254a33ec635SOsama Abboud cleanup_scheduled = !!(atomic_load_16(&tx_ring->que->cleanup_task.ta_pending)); 3255a33ec635SOsama Abboud cleanup_running = !!(atomic_load_8((&tx_ring->cleanup_running))); 3256a33ec635SOsama Abboud if (!(cleanup_scheduled || cleanup_running)) 3257a33ec635SOsama Abboud reset_reason = check_cdesc_in_tx_cq(adapter, tx_ring); 3258a33ec635SOsama Abboud 3259a33ec635SOsama Abboud adapter->reset_reason = reset_reason; 3260d12f7bfcSMarcin Wojtas rc = EIO; 326174dba3adSMarcin Wojtas } 32621f67704eSOsama Abboud /* Add the newly discovered missing TX completions */ 32631f67704eSOsama Abboud counter_u64_add(tx_ring->tx_stats.missing_tx_comp, new_missed_tx); 3264d12f7bfcSMarcin Wojtas 3265d12f7bfcSMarcin Wojtas return (rc); 326674dba3adSMarcin Wojtas } 326774dba3adSMarcin Wojtas 32689b8d05b8SZbigniew Bodek /* 32699b8d05b8SZbigniew Bodek * Check for TX which were not completed on time. 32709b8d05b8SZbigniew Bodek * Timeout is defined by "missing_tx_timeout". 32719b8d05b8SZbigniew Bodek * Reset will be performed if number of incompleted 32729b8d05b8SZbigniew Bodek * transactions exceeds "missing_tx_threshold". 32739b8d05b8SZbigniew Bodek */ 32740bdffe59SMarcin Wojtas static void 3275d12f7bfcSMarcin Wojtas check_for_missing_completions(struct ena_adapter *adapter) 32769b8d05b8SZbigniew Bodek { 32779b8d05b8SZbigniew Bodek struct ena_ring *tx_ring; 3278d12f7bfcSMarcin Wojtas struct ena_ring *rx_ring; 327974dba3adSMarcin Wojtas int i, budget, rc; 32809b8d05b8SZbigniew Bodek 32819b8d05b8SZbigniew Bodek /* Make sure the driver doesn't turn the device in other process */ 32829b8d05b8SZbigniew Bodek rmb(); 32839b8d05b8SZbigniew Bodek 3284fd43fd2aSMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) 32859b8d05b8SZbigniew Bodek return; 32869b8d05b8SZbigniew Bodek 3287fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) 32889b8d05b8SZbigniew Bodek return; 32899b8d05b8SZbigniew Bodek 329040621d71SMarcin Wojtas if (adapter->missing_tx_timeout == ENA_HW_HINTS_NO_TIMEOUT) 32919b8d05b8SZbigniew Bodek return; 32929b8d05b8SZbigniew Bodek 32939b8d05b8SZbigniew Bodek budget = adapter->missing_tx_max_queues; 32949b8d05b8SZbigniew Bodek 32957d8c4feeSMarcin Wojtas for (i = adapter->next_monitored_tx_qid; i < adapter->num_io_queues; i++) { 32969b8d05b8SZbigniew Bodek tx_ring = &adapter->tx_ring[i]; 3297d12f7bfcSMarcin Wojtas rx_ring = &adapter->rx_ring[i]; 32989b8d05b8SZbigniew Bodek 3299d12f7bfcSMarcin Wojtas rc = check_missing_comp_in_tx_queue(adapter, tx_ring); 3300d12f7bfcSMarcin Wojtas if (unlikely(rc != 0)) 3301d12f7bfcSMarcin Wojtas return; 3302d12f7bfcSMarcin Wojtas 3303d12f7bfcSMarcin Wojtas rc = check_for_rx_interrupt_queue(adapter, rx_ring); 33040bdffe59SMarcin Wojtas if (unlikely(rc != 0)) 33059b8d05b8SZbigniew Bodek return; 33069b8d05b8SZbigniew Bodek 33079b8d05b8SZbigniew Bodek budget--; 3308cd5d5804SMarcin Wojtas if (budget == 0) { 33099b8d05b8SZbigniew Bodek i++; 33109b8d05b8SZbigniew Bodek break; 33119b8d05b8SZbigniew Bodek } 33129b8d05b8SZbigniew Bodek } 33139b8d05b8SZbigniew Bodek 33147d8c4feeSMarcin Wojtas adapter->next_monitored_tx_qid = i % adapter->num_io_queues; 33159b8d05b8SZbigniew Bodek } 33169b8d05b8SZbigniew Bodek 33175cb9db07SMarcin Wojtas /* trigger rx cleanup after 2 consecutive detections */ 3318efe6ab18SMarcin Wojtas #define EMPTY_RX_REFILL 2 3319efe6ab18SMarcin Wojtas /* For the rare case where the device runs out of Rx descriptors and the 3320efe6ab18SMarcin Wojtas * msix handler failed to refill new Rx descriptors (due to a lack of memory 3321efe6ab18SMarcin Wojtas * for example). 3322efe6ab18SMarcin Wojtas * This case will lead to a deadlock: 3323efe6ab18SMarcin Wojtas * The device won't send interrupts since all the new Rx packets will be dropped 3324efe6ab18SMarcin Wojtas * The msix handler won't allocate new Rx descriptors so the device won't be 3325efe6ab18SMarcin Wojtas * able to send new packets. 3326efe6ab18SMarcin Wojtas * 3327efe6ab18SMarcin Wojtas * When such a situation is detected - execute rx cleanup task in another thread 3328efe6ab18SMarcin Wojtas */ 3329efe6ab18SMarcin Wojtas static void 3330efe6ab18SMarcin Wojtas check_for_empty_rx_ring(struct ena_adapter *adapter) 3331efe6ab18SMarcin Wojtas { 3332efe6ab18SMarcin Wojtas struct ena_ring *rx_ring; 3333efe6ab18SMarcin Wojtas int i, refill_required; 3334efe6ab18SMarcin Wojtas 3335fd43fd2aSMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) 3336efe6ab18SMarcin Wojtas return; 3337efe6ab18SMarcin Wojtas 3338fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) 3339efe6ab18SMarcin Wojtas return; 3340efe6ab18SMarcin Wojtas 33417d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 3342efe6ab18SMarcin Wojtas rx_ring = &adapter->rx_ring[i]; 3343efe6ab18SMarcin Wojtas 334482e558eaSDawid Gorecki refill_required = ena_com_free_q_entries( 334582e558eaSDawid Gorecki rx_ring->ena_com_io_sq); 3346efe6ab18SMarcin Wojtas if (unlikely(refill_required == (rx_ring->ring_size - 1))) { 3347efe6ab18SMarcin Wojtas rx_ring->empty_rx_queue++; 3348efe6ab18SMarcin Wojtas 3349efe6ab18SMarcin Wojtas if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { 3350efe6ab18SMarcin Wojtas counter_u64_add(rx_ring->rx_stats.empty_rx_ring, 3351efe6ab18SMarcin Wojtas 1); 3352efe6ab18SMarcin Wojtas 33533fc5d816SMarcin Wojtas ena_log(adapter->pdev, WARN, 33543fc5d816SMarcin Wojtas "Rx ring %d is stalled. Triggering the refill function\n", 33553fc5d816SMarcin Wojtas i); 3356efe6ab18SMarcin Wojtas 33575cb9db07SMarcin Wojtas taskqueue_enqueue(rx_ring->que->cleanup_tq, 33585cb9db07SMarcin Wojtas &rx_ring->que->cleanup_task); 3359efe6ab18SMarcin Wojtas rx_ring->empty_rx_queue = 0; 3360efe6ab18SMarcin Wojtas } 3361efe6ab18SMarcin Wojtas } else { 3362efe6ab18SMarcin Wojtas rx_ring->empty_rx_queue = 0; 3363efe6ab18SMarcin Wojtas } 3364efe6ab18SMarcin Wojtas } 3365efe6ab18SMarcin Wojtas } 33669b8d05b8SZbigniew Bodek 336782e558eaSDawid Gorecki static void 336882e558eaSDawid Gorecki ena_update_hints(struct ena_adapter *adapter, 336940621d71SMarcin Wojtas struct ena_admin_ena_hw_hints *hints) 337040621d71SMarcin Wojtas { 337140621d71SMarcin Wojtas struct ena_com_dev *ena_dev = adapter->ena_dev; 337240621d71SMarcin Wojtas 337340621d71SMarcin Wojtas if (hints->admin_completion_tx_timeout) 337440621d71SMarcin Wojtas ena_dev->admin_queue.completion_timeout = 337540621d71SMarcin Wojtas hints->admin_completion_tx_timeout * 1000; 337640621d71SMarcin Wojtas 337740621d71SMarcin Wojtas if (hints->mmio_read_timeout) 337840621d71SMarcin Wojtas /* convert to usec */ 337982e558eaSDawid Gorecki ena_dev->mmio_read.reg_read_to = hints->mmio_read_timeout * 1000; 338040621d71SMarcin Wojtas 338140621d71SMarcin Wojtas if (hints->missed_tx_completion_count_threshold_to_reset) 338240621d71SMarcin Wojtas adapter->missing_tx_threshold = 338340621d71SMarcin Wojtas hints->missed_tx_completion_count_threshold_to_reset; 338440621d71SMarcin Wojtas 338540621d71SMarcin Wojtas if (hints->missing_tx_completion_timeout) { 338640621d71SMarcin Wojtas if (hints->missing_tx_completion_timeout == 338740621d71SMarcin Wojtas ENA_HW_HINTS_NO_TIMEOUT) 338840621d71SMarcin Wojtas adapter->missing_tx_timeout = ENA_HW_HINTS_NO_TIMEOUT; 338940621d71SMarcin Wojtas else 339082e558eaSDawid Gorecki adapter->missing_tx_timeout = SBT_1MS * 339182e558eaSDawid Gorecki hints->missing_tx_completion_timeout; 339240621d71SMarcin Wojtas } 339340621d71SMarcin Wojtas 339440621d71SMarcin Wojtas if (hints->driver_watchdog_timeout) { 339540621d71SMarcin Wojtas if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 339640621d71SMarcin Wojtas adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 339740621d71SMarcin Wojtas else 339882e558eaSDawid Gorecki adapter->keep_alive_timeout = SBT_1MS * 339982e558eaSDawid Gorecki hints->driver_watchdog_timeout; 340040621d71SMarcin Wojtas } 340140621d71SMarcin Wojtas } 340240621d71SMarcin Wojtas 3403f180142cSMarcin Wojtas /** 3404f180142cSMarcin Wojtas * ena_copy_eni_metrics - Get and copy ENI metrics from the HW. 3405f180142cSMarcin Wojtas * @adapter: ENA device adapter 3406f180142cSMarcin Wojtas * 3407f180142cSMarcin Wojtas * Returns 0 on success, EOPNOTSUPP if current HW doesn't support those metrics 3408f180142cSMarcin Wojtas * and other error codes on failure. 3409f180142cSMarcin Wojtas * 3410f180142cSMarcin Wojtas * This function can possibly cause a race with other calls to the admin queue. 3411f180142cSMarcin Wojtas * Because of that, the caller should either lock this function or make sure 3412f180142cSMarcin Wojtas * that there is no race in the current context. 3413f180142cSMarcin Wojtas */ 3414f180142cSMarcin Wojtas static int 3415f180142cSMarcin Wojtas ena_copy_eni_metrics(struct ena_adapter *adapter) 3416f180142cSMarcin Wojtas { 3417f180142cSMarcin Wojtas static bool print_once = true; 3418f180142cSMarcin Wojtas int rc; 3419f180142cSMarcin Wojtas 3420f180142cSMarcin Wojtas rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_metrics); 3421f180142cSMarcin Wojtas 3422f180142cSMarcin Wojtas if (rc != 0) { 3423f180142cSMarcin Wojtas if (rc == ENA_COM_UNSUPPORTED) { 3424f180142cSMarcin Wojtas if (print_once) { 34253fc5d816SMarcin Wojtas ena_log(adapter->pdev, WARN, 3426f180142cSMarcin Wojtas "Retrieving ENI metrics is not supported.\n"); 3427f180142cSMarcin Wojtas print_once = false; 3428f180142cSMarcin Wojtas } else { 34293fc5d816SMarcin Wojtas ena_log(adapter->pdev, DBG, 3430f180142cSMarcin Wojtas "Retrieving ENI metrics is not supported.\n"); 3431f180142cSMarcin Wojtas } 3432f180142cSMarcin Wojtas } else { 34333fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 3434f180142cSMarcin Wojtas "Failed to get ENI metrics: %d\n", rc); 3435f180142cSMarcin Wojtas } 3436f180142cSMarcin Wojtas } 3437f180142cSMarcin Wojtas 3438f180142cSMarcin Wojtas return (rc); 3439f180142cSMarcin Wojtas } 3440f180142cSMarcin Wojtas 3441f97993adSOsama Abboud static int 344236d42c86SOsama Abboud ena_copy_srd_metrics(struct ena_adapter *adapter) 344336d42c86SOsama Abboud { 344436d42c86SOsama Abboud return ena_com_get_ena_srd_info(adapter->ena_dev, &adapter->ena_srd_info); 344536d42c86SOsama Abboud } 344636d42c86SOsama Abboud 344736d42c86SOsama Abboud static int 3448f97993adSOsama Abboud ena_copy_customer_metrics(struct ena_adapter *adapter) 3449f97993adSOsama Abboud { 3450f97993adSOsama Abboud struct ena_com_dev *dev; 3451f97993adSOsama Abboud u32 supported_metrics_count; 3452f97993adSOsama Abboud int rc, len; 3453f97993adSOsama Abboud 3454f97993adSOsama Abboud dev = adapter->ena_dev; 3455f97993adSOsama Abboud 3456f97993adSOsama Abboud supported_metrics_count = ena_com_get_customer_metric_count(dev); 3457f97993adSOsama Abboud len = supported_metrics_count * sizeof(u64); 3458f97993adSOsama Abboud 3459f97993adSOsama Abboud /* Fill the data buffer */ 3460f97993adSOsama Abboud rc = ena_com_get_customer_metrics(adapter->ena_dev, 3461f97993adSOsama Abboud (char *)(adapter->customer_metrics_array), len); 3462f97993adSOsama Abboud 3463f97993adSOsama Abboud return (rc); 3464f97993adSOsama Abboud } 3465f97993adSOsama Abboud 34669b8d05b8SZbigniew Bodek static void 34679b8d05b8SZbigniew Bodek ena_timer_service(void *data) 34689b8d05b8SZbigniew Bodek { 34699b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)data; 34709b8d05b8SZbigniew Bodek struct ena_admin_host_info *host_info = 34719b8d05b8SZbigniew Bodek adapter->ena_dev->host_attr.host_info; 34729b8d05b8SZbigniew Bodek 34739b8d05b8SZbigniew Bodek check_for_missing_keep_alive(adapter); 34749b8d05b8SZbigniew Bodek 34759b8d05b8SZbigniew Bodek check_for_admin_com_state(adapter); 34769b8d05b8SZbigniew Bodek 3477d12f7bfcSMarcin Wojtas check_for_missing_completions(adapter); 34789b8d05b8SZbigniew Bodek 3479efe6ab18SMarcin Wojtas check_for_empty_rx_ring(adapter); 3480efe6ab18SMarcin Wojtas 3481f180142cSMarcin Wojtas /* 34825b925280SOsama Abboud * User controller update of the ENA metrics. 3483f180142cSMarcin Wojtas * If the delay was set to 0, then the stats shouldn't be updated at 3484f180142cSMarcin Wojtas * all. 34855b925280SOsama Abboud * Otherwise, wait 'metrics_sample_interval' seconds, before 3486f180142cSMarcin Wojtas * updating stats. 3487f180142cSMarcin Wojtas * As timer service is executed every second, it's enough to increment 3488f180142cSMarcin Wojtas * appropriate counter each time the timer service is executed. 3489f180142cSMarcin Wojtas */ 34905b925280SOsama Abboud if ((adapter->metrics_sample_interval != 0) && 34915b925280SOsama Abboud (++adapter->metrics_sample_interval_cnt >= 34925b925280SOsama Abboud adapter->metrics_sample_interval)) { 3493b899a02aSDawid Gorecki taskqueue_enqueue(adapter->metrics_tq, &adapter->metrics_task); 34945b925280SOsama Abboud adapter->metrics_sample_interval_cnt = 0; 3495f180142cSMarcin Wojtas } 3496f180142cSMarcin Wojtas 3497f180142cSMarcin Wojtas 34980bdffe59SMarcin Wojtas if (host_info != NULL) 34999b8d05b8SZbigniew Bodek ena_update_host_info(host_info, adapter->ifp); 35009b8d05b8SZbigniew Bodek 3501fd43fd2aSMarcin Wojtas if (unlikely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { 3502d10ec3adSDawid Gorecki /* 3503d10ec3adSDawid Gorecki * Timeout when validating version indicates that the device 3504d10ec3adSDawid Gorecki * became unresponsive. If that happens skip the reset and 3505d10ec3adSDawid Gorecki * reschedule timer service, so the reset can be retried later. 3506d10ec3adSDawid Gorecki */ 3507d10ec3adSDawid Gorecki if (ena_com_validate_version(adapter->ena_dev) == 3508d10ec3adSDawid Gorecki ENA_COM_TIMER_EXPIRED) { 3509d10ec3adSDawid Gorecki ena_log(adapter->pdev, WARN, 3510d10ec3adSDawid Gorecki "FW unresponsive, skipping reset\n"); 3511d10ec3adSDawid Gorecki ENA_TIMER_RESET(adapter); 3512d10ec3adSDawid Gorecki return; 3513d10ec3adSDawid Gorecki } 35143fc5d816SMarcin Wojtas ena_log(adapter->pdev, WARN, "Trigger reset is on\n"); 35159b8d05b8SZbigniew Bodek taskqueue_enqueue(adapter->reset_tq, &adapter->reset_task); 35169b8d05b8SZbigniew Bodek return; 35179b8d05b8SZbigniew Bodek } 35189b8d05b8SZbigniew Bodek 35199b8d05b8SZbigniew Bodek /* 35209b8d05b8SZbigniew Bodek * Schedule another timeout one second from now. 35219b8d05b8SZbigniew Bodek */ 352278554d0cSDawid Gorecki ENA_TIMER_RESET(adapter); 35239b8d05b8SZbigniew Bodek } 35249b8d05b8SZbigniew Bodek 352538c7b965SMarcin Wojtas void 352632f63fa7SMarcin Wojtas ena_destroy_device(struct ena_adapter *adapter, bool graceful) 35279b8d05b8SZbigniew Bodek { 352832f63fa7SMarcin Wojtas if_t ifp = adapter->ifp; 35299b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 35309b8d05b8SZbigniew Bodek bool dev_up; 353132f63fa7SMarcin Wojtas 353232f63fa7SMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)) 353332f63fa7SMarcin Wojtas return; 353432f63fa7SMarcin Wojtas 3535c59a5fbdSArthur Kiyanovski if (!graceful) 353632f63fa7SMarcin Wojtas if_link_state_change(ifp, LINK_STATE_DOWN); 353732f63fa7SMarcin Wojtas 353878554d0cSDawid Gorecki ENA_TIMER_DRAIN(adapter); 353932f63fa7SMarcin Wojtas 354032f63fa7SMarcin Wojtas dev_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); 354132f63fa7SMarcin Wojtas if (dev_up) 354232f63fa7SMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); 354332f63fa7SMarcin Wojtas 354432f63fa7SMarcin Wojtas if (!graceful) 354532f63fa7SMarcin Wojtas ena_com_set_admin_running_state(ena_dev, false); 354632f63fa7SMarcin Wojtas 354732f63fa7SMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) 354832f63fa7SMarcin Wojtas ena_down(adapter); 354932f63fa7SMarcin Wojtas 355032f63fa7SMarcin Wojtas /* 355132f63fa7SMarcin Wojtas * Stop the device from sending AENQ events (if the device was up, and 355232f63fa7SMarcin Wojtas * the trigger reset was on, ena_down already performs device reset) 355332f63fa7SMarcin Wojtas */ 355432f63fa7SMarcin Wojtas if (!(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter) && dev_up)) 355532f63fa7SMarcin Wojtas ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); 355632f63fa7SMarcin Wojtas 355732f63fa7SMarcin Wojtas ena_free_mgmnt_irq(adapter); 355832f63fa7SMarcin Wojtas 355932f63fa7SMarcin Wojtas ena_disable_msix(adapter); 356032f63fa7SMarcin Wojtas 3561e2735b09SMarcin Wojtas /* 3562e2735b09SMarcin Wojtas * IO rings resources should be freed because `ena_restore_device()` 3563e2735b09SMarcin Wojtas * calls (not directly) `ena_enable_msix()`, which re-allocates MSIX 3564e2735b09SMarcin Wojtas * vectors. The amount of MSIX vectors after destroy-restore may be 3565e2735b09SMarcin Wojtas * different than before. Therefore, IO rings resources should be 3566e2735b09SMarcin Wojtas * established from scratch each time. 3567e2735b09SMarcin Wojtas */ 3568e2735b09SMarcin Wojtas ena_free_all_io_rings_resources(adapter); 3569e2735b09SMarcin Wojtas 357032f63fa7SMarcin Wojtas ena_com_abort_admin_commands(ena_dev); 357132f63fa7SMarcin Wojtas 357232f63fa7SMarcin Wojtas ena_com_wait_for_abort_completion(ena_dev); 357332f63fa7SMarcin Wojtas 357432f63fa7SMarcin Wojtas ena_com_admin_destroy(ena_dev); 357532f63fa7SMarcin Wojtas 357632f63fa7SMarcin Wojtas ena_com_mmio_reg_read_request_destroy(ena_dev); 357732f63fa7SMarcin Wojtas 357832f63fa7SMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_NORMAL; 357932f63fa7SMarcin Wojtas 358032f63fa7SMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); 358132f63fa7SMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); 358232f63fa7SMarcin Wojtas } 358332f63fa7SMarcin Wojtas 358432f63fa7SMarcin Wojtas static int 358532f63fa7SMarcin Wojtas ena_device_validate_params(struct ena_adapter *adapter, 358632f63fa7SMarcin Wojtas struct ena_com_dev_get_features_ctx *get_feat_ctx) 358732f63fa7SMarcin Wojtas { 358832f63fa7SMarcin Wojtas if (memcmp(get_feat_ctx->dev_attr.mac_addr, adapter->mac_addr, 358932f63fa7SMarcin Wojtas ETHER_ADDR_LEN) != 0) { 35903fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "Error, mac addresses differ\n"); 359132f63fa7SMarcin Wojtas return (EINVAL); 359232f63fa7SMarcin Wojtas } 359332f63fa7SMarcin Wojtas 359432f63fa7SMarcin Wojtas if (get_feat_ctx->dev_attr.max_mtu < if_getmtu(adapter->ifp)) { 35953fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 359632f63fa7SMarcin Wojtas "Error, device max mtu is smaller than ifp MTU\n"); 359732f63fa7SMarcin Wojtas return (EINVAL); 359832f63fa7SMarcin Wojtas } 359932f63fa7SMarcin Wojtas 360032f63fa7SMarcin Wojtas return 0; 360132f63fa7SMarcin Wojtas } 360232f63fa7SMarcin Wojtas 360338c7b965SMarcin Wojtas int 360432f63fa7SMarcin Wojtas ena_restore_device(struct ena_adapter *adapter) 360532f63fa7SMarcin Wojtas { 360632f63fa7SMarcin Wojtas struct ena_com_dev_get_features_ctx get_feat_ctx; 360732f63fa7SMarcin Wojtas struct ena_com_dev *ena_dev = adapter->ena_dev; 360832f63fa7SMarcin Wojtas if_t ifp = adapter->ifp; 360932f63fa7SMarcin Wojtas device_t dev = adapter->pdev; 361032f63fa7SMarcin Wojtas int wd_active; 36119b8d05b8SZbigniew Bodek int rc; 36129b8d05b8SZbigniew Bodek 361332f63fa7SMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter); 361432f63fa7SMarcin Wojtas 361532f63fa7SMarcin Wojtas rc = ena_device_init(adapter, dev, &get_feat_ctx, &wd_active); 361632f63fa7SMarcin Wojtas if (rc != 0) { 36173fc5d816SMarcin Wojtas ena_log(dev, ERR, "Cannot initialize device\n"); 361832f63fa7SMarcin Wojtas goto err; 361932f63fa7SMarcin Wojtas } 362032f63fa7SMarcin Wojtas /* 362132f63fa7SMarcin Wojtas * Only enable WD if it was enabled before reset, so it won't override 362232f63fa7SMarcin Wojtas * value set by the user by the sysctl. 362332f63fa7SMarcin Wojtas */ 362432f63fa7SMarcin Wojtas if (adapter->wd_active != 0) 362532f63fa7SMarcin Wojtas adapter->wd_active = wd_active; 362632f63fa7SMarcin Wojtas 362732f63fa7SMarcin Wojtas rc = ena_device_validate_params(adapter, &get_feat_ctx); 362832f63fa7SMarcin Wojtas if (rc != 0) { 36293fc5d816SMarcin Wojtas ena_log(dev, ERR, "Validation of device parameters failed\n"); 363032f63fa7SMarcin Wojtas goto err_device_destroy; 363132f63fa7SMarcin Wojtas } 363232f63fa7SMarcin Wojtas 363332f63fa7SMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter); 363432f63fa7SMarcin Wojtas /* Make sure we don't have a race with AENQ Links state handler */ 363532f63fa7SMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) 363632f63fa7SMarcin Wojtas if_link_state_change(ifp, LINK_STATE_UP); 363732f63fa7SMarcin Wojtas 3638aa9c3226SMarcin Wojtas rc = ena_enable_msix_and_set_admin_interrupts(adapter); 363932f63fa7SMarcin Wojtas if (rc != 0) { 36403fc5d816SMarcin Wojtas ena_log(dev, ERR, "Enable MSI-X failed\n"); 364132f63fa7SMarcin Wojtas goto err_device_destroy; 364232f63fa7SMarcin Wojtas } 364332f63fa7SMarcin Wojtas 3644e2735b09SMarcin Wojtas /* 3645e2735b09SMarcin Wojtas * Effective value of used MSIX vectors should be the same as before 3646e2735b09SMarcin Wojtas * `ena_destroy_device()`, if possible, or closest to it if less vectors 3647e2735b09SMarcin Wojtas * are available. 3648e2735b09SMarcin Wojtas */ 3649e2735b09SMarcin Wojtas if ((adapter->msix_vecs - ENA_ADMIN_MSIX_VEC) < adapter->num_io_queues) 365082e558eaSDawid Gorecki adapter->num_io_queues = adapter->msix_vecs - ENA_ADMIN_MSIX_VEC; 3651e2735b09SMarcin Wojtas 3652e2735b09SMarcin Wojtas /* Re-initialize rings basic information */ 3653e2735b09SMarcin Wojtas ena_init_io_rings(adapter); 3654e2735b09SMarcin Wojtas 365532f63fa7SMarcin Wojtas /* If the interface was up before the reset bring it up */ 365632f63fa7SMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) { 365732f63fa7SMarcin Wojtas rc = ena_up(adapter); 365832f63fa7SMarcin Wojtas if (rc != 0) { 36593fc5d816SMarcin Wojtas ena_log(dev, ERR, "Failed to create I/O queues\n"); 366032f63fa7SMarcin Wojtas goto err_disable_msix; 366132f63fa7SMarcin Wojtas } 366232f63fa7SMarcin Wojtas } 366332f63fa7SMarcin Wojtas 366424392281SMarcin Wojtas /* Indicate that device is running again and ready to work */ 366532f63fa7SMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); 366624392281SMarcin Wojtas 366724392281SMarcin Wojtas /* 366824392281SMarcin Wojtas * As the AENQ handlers weren't executed during reset because 366924392281SMarcin Wojtas * the flag ENA_FLAG_DEVICE_RUNNING was turned off, the 367024392281SMarcin Wojtas * timestamp must be updated again That will prevent next reset 367124392281SMarcin Wojtas * caused by missing keep alive. 367224392281SMarcin Wojtas */ 367324392281SMarcin Wojtas adapter->keep_alive_timestamp = getsbinuptime(); 367478554d0cSDawid Gorecki ENA_TIMER_RESET(adapter); 367578554d0cSDawid Gorecki 36767d8c4feeSMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); 367732f63fa7SMarcin Wojtas 367832f63fa7SMarcin Wojtas return (rc); 367932f63fa7SMarcin Wojtas 368032f63fa7SMarcin Wojtas err_disable_msix: 368132f63fa7SMarcin Wojtas ena_free_mgmnt_irq(adapter); 368232f63fa7SMarcin Wojtas ena_disable_msix(adapter); 368332f63fa7SMarcin Wojtas err_device_destroy: 368432f63fa7SMarcin Wojtas ena_com_abort_admin_commands(ena_dev); 368532f63fa7SMarcin Wojtas ena_com_wait_for_abort_completion(ena_dev); 368632f63fa7SMarcin Wojtas ena_com_admin_destroy(ena_dev); 368732f63fa7SMarcin Wojtas ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE); 368832f63fa7SMarcin Wojtas ena_com_mmio_reg_read_request_destroy(ena_dev); 368932f63fa7SMarcin Wojtas err: 369032f63fa7SMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); 369132f63fa7SMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter); 36923fc5d816SMarcin Wojtas ena_log(dev, ERR, "Reset attempt failed. Can not reset the device\n"); 369332f63fa7SMarcin Wojtas 369432f63fa7SMarcin Wojtas return (rc); 369532f63fa7SMarcin Wojtas } 369632f63fa7SMarcin Wojtas 369732f63fa7SMarcin Wojtas static void 3698b899a02aSDawid Gorecki ena_metrics_task(void *arg, int pending) 3699b899a02aSDawid Gorecki { 3700b899a02aSDawid Gorecki struct ena_adapter *adapter = (struct ena_adapter *)arg; 3701b899a02aSDawid Gorecki 3702b899a02aSDawid Gorecki ENA_LOCK_LOCK(); 3703f97993adSOsama Abboud 3704f97993adSOsama Abboud if (ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) 3705f97993adSOsama Abboud (void)ena_copy_customer_metrics(adapter); 3706f97993adSOsama Abboud else if (ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_ENI_STATS)) 3707b899a02aSDawid Gorecki (void)ena_copy_eni_metrics(adapter); 3708f97993adSOsama Abboud 370936d42c86SOsama Abboud if (ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_ENA_SRD_INFO)) 371036d42c86SOsama Abboud (void)ena_copy_srd_metrics(adapter); 371136d42c86SOsama Abboud 3712b899a02aSDawid Gorecki ENA_LOCK_UNLOCK(); 3713b899a02aSDawid Gorecki } 3714b899a02aSDawid Gorecki 3715b899a02aSDawid Gorecki static void 371632f63fa7SMarcin Wojtas ena_reset_task(void *arg, int pending) 371732f63fa7SMarcin Wojtas { 371832f63fa7SMarcin Wojtas struct ena_adapter *adapter = (struct ena_adapter *)arg; 371932f63fa7SMarcin Wojtas 372007aff471SArtur Rojek ENA_LOCK_LOCK(); 3721433ab9b6SArtur Rojek if (likely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { 3722a33ec635SOsama Abboud ena_increment_reset_counter(adapter); 372332f63fa7SMarcin Wojtas ena_destroy_device(adapter, false); 372432f63fa7SMarcin Wojtas ena_restore_device(adapter); 3725d209ffeeSDawid Gorecki 3726d209ffeeSDawid Gorecki ena_log(adapter->pdev, INFO, 3727d209ffeeSDawid Gorecki "Device reset completed successfully, Driver info: %s\n", 3728d209ffeeSDawid Gorecki ena_version); 3729433ab9b6SArtur Rojek } 373007aff471SArtur Rojek ENA_LOCK_UNLOCK(); 37319b8d05b8SZbigniew Bodek } 37329b8d05b8SZbigniew Bodek 3733b9e80b52SOsama Abboud static void 3734b9e80b52SOsama Abboud ena_free_stats(struct ena_adapter *adapter) 3735b9e80b52SOsama Abboud { 3736b9e80b52SOsama Abboud ena_free_counters((counter_u64_t *)&adapter->hw_stats, 3737b9e80b52SOsama Abboud sizeof(struct ena_hw_stats)); 3738b9e80b52SOsama Abboud ena_free_counters((counter_u64_t *)&adapter->dev_stats, 3739b9e80b52SOsama Abboud sizeof(struct ena_stats_dev)); 3740b9e80b52SOsama Abboud 3741b9e80b52SOsama Abboud } 37429b8d05b8SZbigniew Bodek /** 37439b8d05b8SZbigniew Bodek * ena_attach - Device Initialization Routine 37449b8d05b8SZbigniew Bodek * @pdev: device information struct 37459b8d05b8SZbigniew Bodek * 37469b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 37479b8d05b8SZbigniew Bodek * 37489b8d05b8SZbigniew Bodek * ena_attach initializes an adapter identified by a device structure. 37499b8d05b8SZbigniew Bodek * The OS initialization, configuring of the adapter private structure, 37509b8d05b8SZbigniew Bodek * and a hardware reset occur. 37519b8d05b8SZbigniew Bodek **/ 37529b8d05b8SZbigniew Bodek static int 37539b8d05b8SZbigniew Bodek ena_attach(device_t pdev) 37549b8d05b8SZbigniew Bodek { 37559b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx get_feat_ctx; 37566064f289SMarcin Wojtas struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; 37579b8d05b8SZbigniew Bodek static int version_printed; 37589b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 37599b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = NULL; 37607d8c4feeSMarcin Wojtas uint32_t max_num_io_queues; 37611c808fcdSMichal Krawczyk int msix_rid; 37624fa9e02dSMarcin Wojtas int rid, rc; 37634fa9e02dSMarcin Wojtas 37649b8d05b8SZbigniew Bodek adapter = device_get_softc(pdev); 37659b8d05b8SZbigniew Bodek adapter->pdev = pdev; 3766eb4c4f4aSMarcin Wojtas adapter->first_bind = -1; 37679b8d05b8SZbigniew Bodek 37686959869eSMarcin Wojtas /* 37696959869eSMarcin Wojtas * Set up the timer service - driver is responsible for avoiding 37706959869eSMarcin Wojtas * concurrency, as the callout won't be using any locking inside. 37716959869eSMarcin Wojtas */ 377278554d0cSDawid Gorecki ENA_TIMER_INIT(adapter); 37738f15f8a7SDawid Gorecki adapter->keep_alive_timeout = ENA_DEFAULT_KEEP_ALIVE_TO; 37748f15f8a7SDawid Gorecki adapter->missing_tx_timeout = ENA_DEFAULT_TX_CMP_TO; 37758f15f8a7SDawid Gorecki adapter->missing_tx_max_queues = ENA_DEFAULT_TX_MONITORED_QUEUES; 37768f15f8a7SDawid Gorecki adapter->missing_tx_threshold = ENA_DEFAULT_TX_CMP_THRESHOLD; 37779b8d05b8SZbigniew Bodek 3778f9e1d947SOsama Abboud adapter->irq_cpu_base = ENA_BASE_CPU_UNSPECIFIED; 3779f9e1d947SOsama Abboud adapter->irq_cpu_stride = 0; 3780f9e1d947SOsama Abboud 3781f9e1d947SOsama Abboud #ifdef RSS 3782f9e1d947SOsama Abboud adapter->rss_enabled = 1; 3783f9e1d947SOsama Abboud #endif 3784f9e1d947SOsama Abboud 37859b8d05b8SZbigniew Bodek if (version_printed++ == 0) 37863fc5d816SMarcin Wojtas ena_log(pdev, INFO, "%s\n", ena_version); 37879b8d05b8SZbigniew Bodek 37889b8d05b8SZbigniew Bodek /* Allocate memory for ena_dev structure */ 3789cd5d5804SMarcin Wojtas ena_dev = malloc(sizeof(struct ena_com_dev), M_DEVBUF, 3790cd5d5804SMarcin Wojtas M_WAITOK | M_ZERO); 37919b8d05b8SZbigniew Bodek 37929b8d05b8SZbigniew Bodek adapter->ena_dev = ena_dev; 37939b8d05b8SZbigniew Bodek ena_dev->dmadev = pdev; 37944fa9e02dSMarcin Wojtas 37954fa9e02dSMarcin Wojtas rid = PCIR_BAR(ENA_REG_BAR); 37964fa9e02dSMarcin Wojtas adapter->memory = NULL; 379782e558eaSDawid Gorecki adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, &rid, 379882e558eaSDawid Gorecki RF_ACTIVE); 37994fa9e02dSMarcin Wojtas if (unlikely(adapter->registers == NULL)) { 38003fc5d816SMarcin Wojtas ena_log(pdev, ERR, 38014fa9e02dSMarcin Wojtas "unable to allocate bus resource: registers!\n"); 38024fa9e02dSMarcin Wojtas rc = ENOMEM; 38034fa9e02dSMarcin Wojtas goto err_dev_free; 38044fa9e02dSMarcin Wojtas } 38054fa9e02dSMarcin Wojtas 38061c808fcdSMichal Krawczyk /* MSIx vector table may reside on BAR0 with registers or on BAR1. */ 38071c808fcdSMichal Krawczyk msix_rid = pci_msix_table_bar(pdev); 38081c808fcdSMichal Krawczyk if (msix_rid != rid) { 38091c808fcdSMichal Krawczyk adapter->msix = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, 38101c808fcdSMichal Krawczyk &msix_rid, RF_ACTIVE); 38111c808fcdSMichal Krawczyk if (unlikely(adapter->msix == NULL)) { 38123fc5d816SMarcin Wojtas ena_log(pdev, ERR, 38131c808fcdSMichal Krawczyk "unable to allocate bus resource: msix!\n"); 38141c808fcdSMichal Krawczyk rc = ENOMEM; 38151c808fcdSMichal Krawczyk goto err_pci_free; 38161c808fcdSMichal Krawczyk } 38171c808fcdSMichal Krawczyk adapter->msix_rid = msix_rid; 38181c808fcdSMichal Krawczyk } 38191c808fcdSMichal Krawczyk 38209b8d05b8SZbigniew Bodek ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF, 38219b8d05b8SZbigniew Bodek M_WAITOK | M_ZERO); 38229b8d05b8SZbigniew Bodek 38239b8d05b8SZbigniew Bodek /* Store register resources */ 382482e558eaSDawid Gorecki ((struct ena_bus *)(ena_dev->bus))->reg_bar_t = rman_get_bustag( 382582e558eaSDawid Gorecki adapter->registers); 382682e558eaSDawid Gorecki ((struct ena_bus *)(ena_dev->bus))->reg_bar_h = rman_get_bushandle( 382782e558eaSDawid Gorecki adapter->registers); 38289b8d05b8SZbigniew Bodek 38293f9ed7abSMarcin Wojtas if (unlikely(((struct ena_bus *)(ena_dev->bus))->reg_bar_h == 0)) { 38303fc5d816SMarcin Wojtas ena_log(pdev, ERR, "failed to pmap registers bar\n"); 38319b8d05b8SZbigniew Bodek rc = ENXIO; 3832cd5d5804SMarcin Wojtas goto err_bus_free; 38339b8d05b8SZbigniew Bodek } 38349b8d05b8SZbigniew Bodek 38353324e304SMichal Krawczyk rc = ena_map_llq_mem_bar(pdev, ena_dev); 38363324e304SMichal Krawczyk if (unlikely(rc != 0)) { 38373324e304SMichal Krawczyk ena_log(pdev, ERR, "Failed to map ENA mem bar"); 38383324e304SMichal Krawczyk goto err_bus_free; 38393324e304SMichal Krawczyk } 38409b8d05b8SZbigniew Bodek 3841637ff00fSosamaabb ena_dev->ena_min_poll_delay_us = ENA_ADMIN_POLL_DELAY_US; 3842637ff00fSosamaabb 3843fd43fd2aSMarcin Wojtas /* Initially clear all the flags */ 3844fd43fd2aSMarcin Wojtas ENA_FLAG_ZERO(adapter); 3845fd43fd2aSMarcin Wojtas 38469b8d05b8SZbigniew Bodek /* Device initialization */ 38479b8d05b8SZbigniew Bodek rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active); 38483f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 38493fc5d816SMarcin Wojtas ena_log(pdev, ERR, "ENA device init failed! (err: %d)\n", rc); 38509b8d05b8SZbigniew Bodek rc = ENXIO; 38519b8d05b8SZbigniew Bodek goto err_bus_free; 38529b8d05b8SZbigniew Bodek } 38539b8d05b8SZbigniew Bodek 38540b432b70SMarcin Wojtas if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 385582e558eaSDawid Gorecki adapter->disable_meta_caching = !!( 385682e558eaSDawid Gorecki get_feat_ctx.llq.accel_mode.u.get.supported_flags & 38570b432b70SMarcin Wojtas BIT(ENA_ADMIN_DISABLE_META_CACHING)); 38580b432b70SMarcin Wojtas 38599b8d05b8SZbigniew Bodek adapter->keep_alive_timestamp = getsbinuptime(); 38609b8d05b8SZbigniew Bodek 38619b8d05b8SZbigniew Bodek adapter->tx_offload_cap = get_feat_ctx.offload.tx; 38629b8d05b8SZbigniew Bodek 38639b8d05b8SZbigniew Bodek memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr, 38649b8d05b8SZbigniew Bodek ETHER_ADDR_LEN); 38659b8d05b8SZbigniew Bodek 38667d8c4feeSMarcin Wojtas calc_queue_ctx.pdev = pdev; 38676064f289SMarcin Wojtas calc_queue_ctx.ena_dev = ena_dev; 38686064f289SMarcin Wojtas calc_queue_ctx.get_feat_ctx = &get_feat_ctx; 38696064f289SMarcin Wojtas 38707d8c4feeSMarcin Wojtas /* Calculate initial and maximum IO queue number and size */ 38717d8c4feeSMarcin Wojtas max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, 38727d8c4feeSMarcin Wojtas &get_feat_ctx); 3873b1c38df0SOsama Abboud rc = ena_calc_io_queue_size(&calc_queue_ctx, adapter); 38747d8c4feeSMarcin Wojtas if (unlikely((rc != 0) || (max_num_io_queues <= 0))) { 38756064f289SMarcin Wojtas rc = EFAULT; 38769b8d05b8SZbigniew Bodek goto err_com_free; 38779b8d05b8SZbigniew Bodek } 38789b8d05b8SZbigniew Bodek 38799762a033SMarcin Wojtas adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size; 38809762a033SMarcin Wojtas adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size; 38817d8c4feeSMarcin Wojtas adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size; 38827d8c4feeSMarcin Wojtas adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size; 38836064f289SMarcin Wojtas adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; 38846064f289SMarcin Wojtas adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; 38856064f289SMarcin Wojtas 38867d8c4feeSMarcin Wojtas adapter->max_num_io_queues = max_num_io_queues; 38877d8c4feeSMarcin Wojtas 38886064f289SMarcin Wojtas adapter->buf_ring_size = ENA_DEFAULT_BUF_RING_SIZE; 38899b8d05b8SZbigniew Bodek 38907d8c4feeSMarcin Wojtas adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; 38917d8c4feeSMarcin Wojtas 38927d8c4feeSMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_NORMAL; 38937d8c4feeSMarcin Wojtas 38949b8d05b8SZbigniew Bodek /* set up dma tags for rx and tx buffers */ 38959b8d05b8SZbigniew Bodek rc = ena_setup_tx_dma_tag(adapter); 38964e8acd84SMarcin Wojtas if (unlikely(rc != 0)) { 38973fc5d816SMarcin Wojtas ena_log(pdev, ERR, "Failed to create TX DMA tag\n"); 3898cd5d5804SMarcin Wojtas goto err_com_free; 38994e8acd84SMarcin Wojtas } 39009b8d05b8SZbigniew Bodek 39019b8d05b8SZbigniew Bodek rc = ena_setup_rx_dma_tag(adapter); 39024e8acd84SMarcin Wojtas if (unlikely(rc != 0)) { 39033fc5d816SMarcin Wojtas ena_log(pdev, ERR, "Failed to create RX DMA tag\n"); 3904cd5d5804SMarcin Wojtas goto err_tx_tag_free; 39054e8acd84SMarcin Wojtas } 39069b8d05b8SZbigniew Bodek 3907e2735b09SMarcin Wojtas /* 3908e2735b09SMarcin Wojtas * The amount of requested MSIX vectors is equal to 3909e2735b09SMarcin Wojtas * adapter::max_num_io_queues (see `ena_enable_msix()`), plus a constant 3910e2735b09SMarcin Wojtas * number of admin queue interrupts. The former is initially determined 3911e2735b09SMarcin Wojtas * by HW capabilities (see `ena_calc_max_io_queue_num())` but may not be 3912e2735b09SMarcin Wojtas * achieved if there are not enough system resources. By default, the 3913e2735b09SMarcin Wojtas * number of effectively used IO queues is the same but later on it can 3914e2735b09SMarcin Wojtas * be limited by the user using sysctl interface. 3915e2735b09SMarcin Wojtas */ 3916aa9c3226SMarcin Wojtas rc = ena_enable_msix_and_set_admin_interrupts(adapter); 39173f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 39183fc5d816SMarcin Wojtas ena_log(pdev, ERR, 39199b8d05b8SZbigniew Bodek "Failed to enable and set the admin interrupts\n"); 3920c115a1e2SMarcin Wojtas goto err_io_free; 3921c115a1e2SMarcin Wojtas } 3922e2735b09SMarcin Wojtas /* By default all of allocated MSIX vectors are actively used */ 3923e2735b09SMarcin Wojtas adapter->num_io_queues = adapter->msix_vecs - ENA_ADMIN_MSIX_VEC; 3924e2735b09SMarcin Wojtas 3925e2735b09SMarcin Wojtas /* initialize rings basic information */ 3926e2735b09SMarcin Wojtas ena_init_io_rings(adapter); 3927c115a1e2SMarcin Wojtas 3928f97993adSOsama Abboud rc = ena_com_allocate_customer_metrics_buffer(ena_dev); 3929f97993adSOsama Abboud if (rc) { 3930f97993adSOsama Abboud ena_log(pdev, ERR, "Failed to allocate customer metrics buffer.\n"); 3931f97993adSOsama Abboud goto err_msix_free; 3932f97993adSOsama Abboud } 3933f97993adSOsama Abboud 3934f97993adSOsama Abboud rc = ena_sysctl_allocate_customer_metrics_buffer(adapter); 3935f97993adSOsama Abboud if (unlikely(rc)){ 3936f97993adSOsama Abboud ena_log(pdev, ERR, "Failed to allocate sysctl customer metrics buffer.\n"); 3937f97993adSOsama Abboud goto err_metrics_buffer_destroy; 3938f97993adSOsama Abboud } 3939f97993adSOsama Abboud 3940b9e80b52SOsama Abboud /* Initialize statistics */ 3941b9e80b52SOsama Abboud ena_alloc_counters((counter_u64_t *)&adapter->dev_stats, 3942b9e80b52SOsama Abboud sizeof(struct ena_stats_dev)); 3943b9e80b52SOsama Abboud ena_alloc_counters((counter_u64_t *)&adapter->hw_stats, 3944b9e80b52SOsama Abboud sizeof(struct ena_hw_stats)); 3945b9e80b52SOsama Abboud ena_sysctl_add_nodes(adapter); 3946b9e80b52SOsama Abboud 3947c115a1e2SMarcin Wojtas /* setup network interface */ 3948aa386085SZhenlei Huang ena_setup_ifnet(pdev, adapter, &get_feat_ctx); 39499b8d05b8SZbigniew Bodek 3950081169f2SZbigniew Bodek /* Initialize reset task queue */ 3951081169f2SZbigniew Bodek TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter); 3952081169f2SZbigniew Bodek adapter->reset_tq = taskqueue_create("ena_reset_enqueue", 3953081169f2SZbigniew Bodek M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq); 395482e558eaSDawid Gorecki taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET, "%s rstq", 395582e558eaSDawid Gorecki device_get_nameunit(adapter->pdev)); 3956081169f2SZbigniew Bodek 3957b899a02aSDawid Gorecki /* Initialize metrics task queue */ 3958b899a02aSDawid Gorecki TASK_INIT(&adapter->metrics_task, 0, ena_metrics_task, adapter); 3959b899a02aSDawid Gorecki adapter->metrics_tq = taskqueue_create("ena_metrics_enqueue", 3960b899a02aSDawid Gorecki M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->metrics_tq); 396182e558eaSDawid Gorecki taskqueue_start_threads(&adapter->metrics_tq, 1, PI_NET, "%s metricsq", 396282e558eaSDawid Gorecki device_get_nameunit(adapter->pdev)); 3963b899a02aSDawid Gorecki 3964d17b7d87SMarcin Wojtas #ifdef DEV_NETMAP 3965d17b7d87SMarcin Wojtas rc = ena_netmap_attach(adapter); 3966d17b7d87SMarcin Wojtas if (rc != 0) { 39673fc5d816SMarcin Wojtas ena_log(pdev, ERR, "netmap attach failed: %d\n", rc); 3968d17b7d87SMarcin Wojtas goto err_detach; 3969d17b7d87SMarcin Wojtas } 3970d17b7d87SMarcin Wojtas #endif /* DEV_NETMAP */ 3971d17b7d87SMarcin Wojtas 39729b8d05b8SZbigniew Bodek /* Tell the stack that the interface is not active */ 39739b8d05b8SZbigniew Bodek if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 3974fd43fd2aSMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); 39759b8d05b8SZbigniew Bodek 397678554d0cSDawid Gorecki /* Run the timer service */ 397778554d0cSDawid Gorecki ENA_TIMER_RESET(adapter); 397878554d0cSDawid Gorecki 39799b8d05b8SZbigniew Bodek return (0); 39809b8d05b8SZbigniew Bodek 3981d17b7d87SMarcin Wojtas #ifdef DEV_NETMAP 3982d17b7d87SMarcin Wojtas err_detach: 3983d17b7d87SMarcin Wojtas ether_ifdetach(adapter->ifp); 3984449496ebSOsama Abboud ifmedia_removeall(&adapter->media); 3985f97993adSOsama Abboud free(adapter->customer_metrics_array, M_DEVBUF); 39865517ca84SOsama Abboud #endif /* DEV_NETMAP */ 3987f97993adSOsama Abboud err_metrics_buffer_destroy: 3988f97993adSOsama Abboud ena_com_delete_customer_metrics_buffer(ena_dev); 3989c115a1e2SMarcin Wojtas err_msix_free: 3990b9e80b52SOsama Abboud ena_free_stats(adapter); 3991c115a1e2SMarcin Wojtas ena_com_dev_reset(adapter->ena_dev, ENA_REGS_RESET_INIT_ERR); 3992c115a1e2SMarcin Wojtas ena_free_mgmnt_irq(adapter); 3993c115a1e2SMarcin Wojtas ena_disable_msix(adapter); 3994cd5d5804SMarcin Wojtas err_io_free: 39959b8d05b8SZbigniew Bodek ena_free_all_io_rings_resources(adapter); 39969b8d05b8SZbigniew Bodek ena_free_rx_dma_tag(adapter); 3997cd5d5804SMarcin Wojtas err_tx_tag_free: 39989b8d05b8SZbigniew Bodek ena_free_tx_dma_tag(adapter); 3999cd5d5804SMarcin Wojtas err_com_free: 40009b8d05b8SZbigniew Bodek ena_com_admin_destroy(ena_dev); 40019b8d05b8SZbigniew Bodek ena_com_delete_host_info(ena_dev); 4002cd5d5804SMarcin Wojtas ena_com_mmio_reg_read_request_destroy(ena_dev); 40039b8d05b8SZbigniew Bodek err_bus_free: 40049b8d05b8SZbigniew Bodek free(ena_dev->bus, M_DEVBUF); 40051c808fcdSMichal Krawczyk err_pci_free: 40069b8d05b8SZbigniew Bodek ena_free_pci_resources(adapter); 40074fa9e02dSMarcin Wojtas err_dev_free: 40084fa9e02dSMarcin Wojtas free(ena_dev, M_DEVBUF); 4009cd5d5804SMarcin Wojtas 40109b8d05b8SZbigniew Bodek return (rc); 40119b8d05b8SZbigniew Bodek } 40129b8d05b8SZbigniew Bodek 40139b8d05b8SZbigniew Bodek /** 40149b8d05b8SZbigniew Bodek * ena_detach - Device Removal Routine 40159b8d05b8SZbigniew Bodek * @pdev: device information struct 40169b8d05b8SZbigniew Bodek * 40179b8d05b8SZbigniew Bodek * ena_detach is called by the device subsystem to alert the driver 40189b8d05b8SZbigniew Bodek * that it should release a PCI device. 40199b8d05b8SZbigniew Bodek **/ 40209b8d05b8SZbigniew Bodek static int 40219b8d05b8SZbigniew Bodek ena_detach(device_t pdev) 40229b8d05b8SZbigniew Bodek { 40239b8d05b8SZbigniew Bodek struct ena_adapter *adapter = device_get_softc(pdev); 40249b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 40259b8d05b8SZbigniew Bodek int rc; 40269b8d05b8SZbigniew Bodek 40279b8d05b8SZbigniew Bodek /* Make sure VLANS are not using driver */ 40287583c633SJustin Hibbits if (if_vlantrunkinuse(adapter->ifp)) { 40293fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "VLAN is in use, detach first\n"); 40309b8d05b8SZbigniew Bodek return (EBUSY); 40319b8d05b8SZbigniew Bodek } 40329b8d05b8SZbigniew Bodek 4033*d412c076SJohn Baldwin rc = bus_generic_detach(pdev); 4034*d412c076SJohn Baldwin if (rc != 0) 4035*d412c076SJohn Baldwin return (rc); 4036*d412c076SJohn Baldwin 40379151c55dSMarcin Wojtas ether_ifdetach(adapter->ifp); 40389151c55dSMarcin Wojtas 4039449496ebSOsama Abboud ifmedia_removeall(&adapter->media); 4040449496ebSOsama Abboud 40416959869eSMarcin Wojtas /* Stop timer service */ 404207aff471SArtur Rojek ENA_LOCK_LOCK(); 404378554d0cSDawid Gorecki ENA_TIMER_DRAIN(adapter); 404407aff471SArtur Rojek ENA_LOCK_UNLOCK(); 40456959869eSMarcin Wojtas 4046b899a02aSDawid Gorecki /* Release metrics task */ 4047b899a02aSDawid Gorecki while (taskqueue_cancel(adapter->metrics_tq, &adapter->metrics_task, NULL)) 4048b899a02aSDawid Gorecki taskqueue_drain(adapter->metrics_tq, &adapter->metrics_task); 4049b899a02aSDawid Gorecki taskqueue_free(adapter->metrics_tq); 4050b899a02aSDawid Gorecki 40516959869eSMarcin Wojtas /* Release reset task */ 40529b8d05b8SZbigniew Bodek while (taskqueue_cancel(adapter->reset_tq, &adapter->reset_task, NULL)) 40539b8d05b8SZbigniew Bodek taskqueue_drain(adapter->reset_tq, &adapter->reset_task); 40549b8d05b8SZbigniew Bodek taskqueue_free(adapter->reset_tq); 40559b8d05b8SZbigniew Bodek 405607aff471SArtur Rojek ENA_LOCK_LOCK(); 40579b8d05b8SZbigniew Bodek ena_down(adapter); 405832f63fa7SMarcin Wojtas ena_destroy_device(adapter, true); 405907aff471SArtur Rojek ENA_LOCK_UNLOCK(); 40609b8d05b8SZbigniew Bodek 40610e7d31f6SMarcin Wojtas /* Restore unregistered sysctl queue nodes. */ 40620e7d31f6SMarcin Wojtas ena_sysctl_update_queue_node_nb(adapter, adapter->num_io_queues, 40630e7d31f6SMarcin Wojtas adapter->max_num_io_queues); 40640e7d31f6SMarcin Wojtas 4065d17b7d87SMarcin Wojtas #ifdef DEV_NETMAP 4066d17b7d87SMarcin Wojtas netmap_detach(adapter->ifp); 4067d17b7d87SMarcin Wojtas #endif /* DEV_NETMAP */ 4068d17b7d87SMarcin Wojtas 4069b9e80b52SOsama Abboud ena_free_stats(adapter); 40709b8d05b8SZbigniew Bodek 40719b8d05b8SZbigniew Bodek rc = ena_free_rx_dma_tag(adapter); 40723f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 40733fc5d816SMarcin Wojtas ena_log(adapter->pdev, WARN, 40749b8d05b8SZbigniew Bodek "Unmapped RX DMA tag associations\n"); 40759b8d05b8SZbigniew Bodek 40769b8d05b8SZbigniew Bodek rc = ena_free_tx_dma_tag(adapter); 40773f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 40783fc5d816SMarcin Wojtas ena_log(adapter->pdev, WARN, 40799b8d05b8SZbigniew Bodek "Unmapped TX DMA tag associations\n"); 40809b8d05b8SZbigniew Bodek 40819b8d05b8SZbigniew Bodek ena_free_irqs(adapter); 40829b8d05b8SZbigniew Bodek 40839b8d05b8SZbigniew Bodek ena_free_pci_resources(adapter); 40849b8d05b8SZbigniew Bodek 40856d1ef2abSArtur Rojek if (adapter->rss_indir != NULL) 40866d1ef2abSArtur Rojek free(adapter->rss_indir, M_DEVBUF); 40876d1ef2abSArtur Rojek 408832f63fa7SMarcin Wojtas if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) 408932f63fa7SMarcin Wojtas ena_com_rss_destroy(ena_dev); 409032f63fa7SMarcin Wojtas 409132f63fa7SMarcin Wojtas ena_com_delete_host_info(ena_dev); 409232f63fa7SMarcin Wojtas 4093f97993adSOsama Abboud free(adapter->customer_metrics_array, M_DEVBUF); 4094f97993adSOsama Abboud 4095f97993adSOsama Abboud ena_com_delete_customer_metrics_buffer(ena_dev); 4096f97993adSOsama Abboud 40979151c55dSMarcin Wojtas if_free(adapter->ifp); 40989151c55dSMarcin Wojtas 40999b8d05b8SZbigniew Bodek free(ena_dev->bus, M_DEVBUF); 41009b8d05b8SZbigniew Bodek 41019b8d05b8SZbigniew Bodek free(ena_dev, M_DEVBUF); 41029b8d05b8SZbigniew Bodek 4103*d412c076SJohn Baldwin return (0); 41049b8d05b8SZbigniew Bodek } 41059b8d05b8SZbigniew Bodek 41069b8d05b8SZbigniew Bodek /****************************************************************************** 41079b8d05b8SZbigniew Bodek ******************************** AENQ Handlers ******************************* 41089b8d05b8SZbigniew Bodek *****************************************************************************/ 41099b8d05b8SZbigniew Bodek /** 41109b8d05b8SZbigniew Bodek * ena_update_on_link_change: 41119b8d05b8SZbigniew Bodek * Notify the network interface about the change in link status 41129b8d05b8SZbigniew Bodek **/ 41139b8d05b8SZbigniew Bodek static void 41149b8d05b8SZbigniew Bodek ena_update_on_link_change(void *adapter_data, 41159b8d05b8SZbigniew Bodek struct ena_admin_aenq_entry *aenq_e) 41169b8d05b8SZbigniew Bodek { 41179b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 41189b8d05b8SZbigniew Bodek struct ena_admin_aenq_link_change_desc *aenq_desc; 41199b8d05b8SZbigniew Bodek int status; 41209b8d05b8SZbigniew Bodek if_t ifp; 41219b8d05b8SZbigniew Bodek 41229b8d05b8SZbigniew Bodek aenq_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; 41239b8d05b8SZbigniew Bodek ifp = adapter->ifp; 41249b8d05b8SZbigniew Bodek status = aenq_desc->flags & 41259b8d05b8SZbigniew Bodek ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK; 41269b8d05b8SZbigniew Bodek 41279b8d05b8SZbigniew Bodek if (status != 0) { 41283fc5d816SMarcin Wojtas ena_log(adapter->pdev, INFO, "link is UP\n"); 4129fd43fd2aSMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_LINK_UP, adapter); 413032f63fa7SMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_ONGOING_RESET, adapter)) 413132f63fa7SMarcin Wojtas if_link_state_change(ifp, LINK_STATE_UP); 413232f63fa7SMarcin Wojtas } else { 41333fc5d816SMarcin Wojtas ena_log(adapter->pdev, INFO, "link is DOWN\n"); 41349b8d05b8SZbigniew Bodek if_link_state_change(ifp, LINK_STATE_DOWN); 4135fd43fd2aSMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_LINK_UP, adapter); 41369b8d05b8SZbigniew Bodek } 41379b8d05b8SZbigniew Bodek } 41389b8d05b8SZbigniew Bodek 413982e558eaSDawid Gorecki static void 414082e558eaSDawid Gorecki ena_notification(void *adapter_data, struct ena_admin_aenq_entry *aenq_e) 414140621d71SMarcin Wojtas { 414240621d71SMarcin Wojtas struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 414340621d71SMarcin Wojtas struct ena_admin_ena_hw_hints *hints; 414440621d71SMarcin Wojtas 414582e558eaSDawid Gorecki ENA_WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, 414682e558eaSDawid Gorecki adapter->ena_dev, "Invalid group(%x) expected %x\n", 414782e558eaSDawid Gorecki aenq_e->aenq_common_desc.group, ENA_ADMIN_NOTIFICATION); 414840621d71SMarcin Wojtas 41499eb1615fSMarcin Wojtas switch (aenq_e->aenq_common_desc.syndrome) { 415040621d71SMarcin Wojtas case ENA_ADMIN_UPDATE_HINTS: 415140621d71SMarcin Wojtas hints = 415240621d71SMarcin Wojtas (struct ena_admin_ena_hw_hints *)(&aenq_e->inline_data_w4); 415340621d71SMarcin Wojtas ena_update_hints(adapter, hints); 415440621d71SMarcin Wojtas break; 415540621d71SMarcin Wojtas default: 41563fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 415740621d71SMarcin Wojtas "Invalid aenq notification link state %d\n", 41589eb1615fSMarcin Wojtas aenq_e->aenq_common_desc.syndrome); 415940621d71SMarcin Wojtas } 416040621d71SMarcin Wojtas } 416140621d71SMarcin Wojtas 416207aff471SArtur Rojek static void 416307aff471SArtur Rojek ena_lock_init(void *arg) 416407aff471SArtur Rojek { 416507aff471SArtur Rojek ENA_LOCK_INIT(); 416607aff471SArtur Rojek } 416707aff471SArtur Rojek SYSINIT(ena_lock_init, SI_SUB_LOCK, SI_ORDER_FIRST, ena_lock_init, NULL); 416807aff471SArtur Rojek 416907aff471SArtur Rojek static void 417007aff471SArtur Rojek ena_lock_uninit(void *arg) 417107aff471SArtur Rojek { 417207aff471SArtur Rojek ENA_LOCK_DESTROY(); 417307aff471SArtur Rojek } 417407aff471SArtur Rojek SYSUNINIT(ena_lock_uninit, SI_SUB_LOCK, SI_ORDER_FIRST, ena_lock_uninit, NULL); 417507aff471SArtur Rojek 41769b8d05b8SZbigniew Bodek /** 41779b8d05b8SZbigniew Bodek * This handler will called for unknown event group or unimplemented handlers 41789b8d05b8SZbigniew Bodek **/ 41799b8d05b8SZbigniew Bodek static void 4180e6de9a83SMarcin Wojtas unimplemented_aenq_handler(void *adapter_data, 41819b8d05b8SZbigniew Bodek struct ena_admin_aenq_entry *aenq_e) 41829b8d05b8SZbigniew Bodek { 4183e6de9a83SMarcin Wojtas struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 4184e6de9a83SMarcin Wojtas 41853fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 4186e6de9a83SMarcin Wojtas "Unknown event was received or event with unimplemented handler\n"); 41879b8d05b8SZbigniew Bodek } 41889b8d05b8SZbigniew Bodek 41898cd86b51SOsama Abboud static void ena_conf_notification(void *adapter_data, 41908cd86b51SOsama Abboud struct ena_admin_aenq_entry *aenq_e) 41918cd86b51SOsama Abboud { 41928cd86b51SOsama Abboud struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 41938cd86b51SOsama Abboud struct ena_admin_aenq_conf_notifications_desc *desc; 41948cd86b51SOsama Abboud u64 bitmap, bit; 41958cd86b51SOsama Abboud 41968cd86b51SOsama Abboud desc = (struct ena_admin_aenq_conf_notifications_desc *)aenq_e; 41978cd86b51SOsama Abboud bitmap = desc->notifications_bitmap; 41988cd86b51SOsama Abboud 41998cd86b51SOsama Abboud if (bitmap == 0) { 42008cd86b51SOsama Abboud ena_log(adapter->pdev, INFO, 42018cd86b51SOsama Abboud "Empty configuration notification bitmap\n"); 42028cd86b51SOsama Abboud return; 42038cd86b51SOsama Abboud } 42048cd86b51SOsama Abboud 42058cd86b51SOsama Abboud for (bit = ffsll(bitmap); bit != 0; bit = ffsll(bitmap)) { 42068cd86b51SOsama Abboud bit--; 42078cd86b51SOsama Abboud ena_log(adapter->pdev, INFO, 42088cd86b51SOsama Abboud "Sub-optimal configuration notification code: %" PRIu64 " Refer to AWS ENA documentation for additional details and mitigation options.\n", 42098cd86b51SOsama Abboud bit + 1); 42108cd86b51SOsama Abboud // Clear the processed bit 42118cd86b51SOsama Abboud bitmap &= ~(1UL << bit); 42128cd86b51SOsama Abboud } 42138cd86b51SOsama Abboud } 42148cd86b51SOsama Abboud 421570587942SOsama Abboud static void ena_admin_device_request_reset(void *adapter_data, 421670587942SOsama Abboud struct ena_admin_aenq_entry *aenq_e) 421770587942SOsama Abboud { 421870587942SOsama Abboud struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 421970587942SOsama Abboud ena_log(adapter->pdev, WARN, 422070587942SOsama Abboud "The device has detected an unhealthy state, reset is requested\n"); 422170587942SOsama Abboud ena_trigger_reset(adapter, ENA_REGS_RESET_DEVICE_REQUEST); 422270587942SOsama Abboud } 422370587942SOsama Abboud 42249b8d05b8SZbigniew Bodek static struct ena_aenq_handlers aenq_handlers = { 42259b8d05b8SZbigniew Bodek .handlers = { 42269b8d05b8SZbigniew Bodek [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, 422740621d71SMarcin Wojtas [ENA_ADMIN_NOTIFICATION] = ena_notification, 42289b8d05b8SZbigniew Bodek [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd, 42298cd86b51SOsama Abboud [ENA_ADMIN_CONF_NOTIFICATIONS] = ena_conf_notification, 423070587942SOsama Abboud [ENA_ADMIN_DEVICE_REQUEST_RESET] = ena_admin_device_request_reset, 42319b8d05b8SZbigniew Bodek }, 42329b8d05b8SZbigniew Bodek .unimplemented_handler = unimplemented_aenq_handler 42339b8d05b8SZbigniew Bodek }; 42349b8d05b8SZbigniew Bodek 42359b8d05b8SZbigniew Bodek /********************************************************************* 42369b8d05b8SZbigniew Bodek * FreeBSD Device Interface Entry Points 42379b8d05b8SZbigniew Bodek *********************************************************************/ 42389b8d05b8SZbigniew Bodek 423982e558eaSDawid Gorecki static device_method_t ena_methods[] = { /* Device interface */ 42409b8d05b8SZbigniew Bodek DEVMETHOD(device_probe, ena_probe), 42419b8d05b8SZbigniew Bodek DEVMETHOD(device_attach, ena_attach), 424282e558eaSDawid Gorecki DEVMETHOD(device_detach, ena_detach), DEVMETHOD_END 42439b8d05b8SZbigniew Bodek }; 42449b8d05b8SZbigniew Bodek 42459b8d05b8SZbigniew Bodek static driver_t ena_driver = { 424682e558eaSDawid Gorecki "ena", 424782e558eaSDawid Gorecki ena_methods, 424882e558eaSDawid Gorecki sizeof(struct ena_adapter), 42499b8d05b8SZbigniew Bodek }; 42509b8d05b8SZbigniew Bodek 42511dc1476cSJohn Baldwin DRIVER_MODULE(ena, pci, ena_driver, 0, 0); 425240abe76bSWarner Losh MODULE_PNP_INFO("U16:vendor;U16:device", pci, ena, ena_vendor_info_array, 4253329e817fSWarner Losh nitems(ena_vendor_info_array) - 1); 42549b8d05b8SZbigniew Bodek MODULE_DEPEND(ena, pci, 1, 1, 1); 42559b8d05b8SZbigniew Bodek MODULE_DEPEND(ena, ether, 1, 1, 1); 4256d17b7d87SMarcin Wojtas #ifdef DEV_NETMAP 4257d17b7d87SMarcin Wojtas MODULE_DEPEND(ena, netmap, 1, 1, 1); 4258d17b7d87SMarcin Wojtas #endif /* DEV_NETMAP */ 42599b8d05b8SZbigniew Bodek 42609b8d05b8SZbigniew Bodek /*********************************************************************/ 4261