Lines Matching +full:irq +full:- +full:device
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2015-2024 Amazon.com, Inc. or its affiliates.
210 device_t pdev = adapter->pdev;
215 maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE;
217 dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width);
232 &dma->tag);
238 error = bus_dma_tag_set_domain(dma->tag, domain);
245 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
246 BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map);
253 dma->paddr = 0;
254 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
255 ena_dmamap_callback, &dma->paddr, mapflags);
256 if (unlikely((error != 0) || (dma->paddr == 0))) {
261 bus_dmamap_sync(dma->tag, dma->map,
267 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
269 bus_dma_tag_destroy(dma->tag);
271 dma->tag = NULL;
272 dma->vaddr = NULL;
273 dma->paddr = 0;
281 device_t pdev = adapter->pdev;
283 if (adapter->memory != NULL) {
285 PCIR_BAR(ENA_MEM_BAR), adapter->memory);
288 if (adapter->registers != NULL) {
290 PCIR_BAR(ENA_REG_BAR), adapter->registers);
293 if (adapter->msix != NULL) {
294 bus_release_resource(pdev, SYS_RES_MEMORY, adapter->msix_rid,
295 adapter->msix);
310 while (ent->vendor_id != 0) {
311 if ((pci_vendor_id == ent->vendor_id) &&
312 (pci_device_id == ent->device_id)) {
313 ena_log_raw(DBG, "vendor=%x device=%x\n", pci_vendor_id,
330 device_t pdev = adapter->pdev;
333 if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) {
335 new_mtu, adapter->max_mtu, ENA_MIN_MTU);
339 rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
381 ring->qid = qid;
382 ring->adapter = adapter;
383 ring->ena_dev = adapter->ena_dev;
384 atomic_store_8(&ring->first_interrupt, 0);
385 ring->no_interrupt_event_cnt = 0;
396 ena_dev = adapter->ena_dev;
398 for (i = 0; i < adapter->num_io_queues; i++) {
399 txr = &adapter->tx_ring[i];
400 rxr = &adapter->rx_ring[i];
407 txr->tx_max_header_size = ena_dev->tx_max_header_size;
408 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
410 que = &adapter->que[i];
411 que->adapter = adapter;
412 que->id = i;
413 que->tx_ring = txr;
414 que->rx_ring = rxr;
416 txr->que = que;
417 rxr->que = que;
419 rxr->empty_rx_queue = 0;
420 rxr->rx_mbuf_sz = ena_mbuf_sz;
430 for (i = 0; i < adapter->num_io_queues; i++) {
431 txr = &adapter->tx_ring[i];
432 rxr = &adapter->rx_ring[i];
435 txr->buf_ring_size = adapter->buf_ring_size;
436 txr->br = buf_ring_alloc(txr->buf_ring_size, M_DEVBUF, M_WAITOK,
437 &txr->ring_mtx);
440 ena_alloc_counters((counter_u64_t *)&txr->tx_stats,
441 sizeof(txr->tx_stats));
442 txr->tx_last_cleanup_ticks = ticks;
445 ena_alloc_counters((counter_u64_t *)&rxr->rx_stats,
446 sizeof(rxr->rx_stats));
449 snprintf(txr->mtx_name, nitems(txr->mtx_name), "%s:tx(%d)",
450 device_get_nameunit(adapter->pdev), i);
451 snprintf(rxr->mtx_name, nitems(rxr->mtx_name), "%s:rx(%d)",
452 device_get_nameunit(adapter->pdev), i);
454 mtx_init(&txr->ring_mtx, txr->mtx_name, NULL, MTX_DEF);
474 struct ena_ring *txr = &adapter->tx_ring[qid];
475 struct ena_ring *rxr = &adapter->rx_ring[qid];
477 ena_free_counters((counter_u64_t *)&txr->tx_stats,
478 sizeof(txr->tx_stats));
479 ena_free_counters((counter_u64_t *)&rxr->rx_stats,
480 sizeof(rxr->rx_stats));
483 drbr_free(txr->br, M_DEVBUF);
486 mtx_destroy(&txr->ring_mtx);
494 for (i = 0; i < adapter->num_io_queues; i++)
504 ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev),
506 ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */
510 adapter->max_tx_sgl_size - 1, /* nsegments */
515 &adapter->tx_buf_tag);
525 ret = bus_dma_tag_destroy(adapter->tx_buf_tag);
528 adapter->tx_buf_tag = NULL;
539 ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent */
541 ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */
545 adapter->max_rx_sgl_size, /* nsegments */
550 &adapter->rx_buf_tag);
560 ret = bus_dma_tag_destroy(adapter->rx_buf_tag);
563 adapter->rx_buf_tag = NULL;
571 struct ena_adapter *adapter = tx_ring->adapter;
577 ena_log(adapter->pdev, ERR,
579 req_id, tx_ring->qid);
581 ena_log_nm(adapter->pdev, WARN,
583 req_id, tx_ring->qid);
584 counter_u64_add(tx_ring->tx_stats.bad_req_id, 1);
597 struct ena_adapter *adapter = tx_ring->adapter;
599 bus_dma_tag_t tx_tag = adapter->tx_buf_tag;
606 for (i = 0; i < tx_ring->ring_size; ++i) {
607 tx_info = &tx_ring->tx_buffer_info[i];
609 if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) {
610 nm_info = &tx_info->nm_info;
612 if (nm_info->map_seg[j] != NULL) {
614 nm_info->map_seg[j]);
615 nm_info->map_seg[j] = NULL;
620 if (tx_info->dmamap != NULL) {
621 bus_dmamap_destroy(tx_tag, tx_info->dmamap);
622 tx_info->dmamap = NULL;
628 * ena_setup_tx_resources - allocate Tx resources (Descriptors)
629 * @adapter: network interface device structure
637 device_t pdev = adapter->pdev;
639 struct ena_que *que = &adapter->que[qid];
640 struct ena_ring *tx_ring = que->tx_ring;
650 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
652 tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
653 if (unlikely(tx_ring->tx_buffer_info == NULL))
656 size = sizeof(uint16_t) * tx_ring->ring_size;
657 tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
658 if (unlikely(tx_ring->free_tx_ids == NULL))
661 size = tx_ring->tx_max_header_size;
662 tx_ring->push_buf_intermediate_buf = malloc(size, M_DEVBUF,
664 if (unlikely(tx_ring->push_buf_intermediate_buf == NULL))
668 for (i = 0; i < tx_ring->ring_size; i++)
669 tx_ring->free_tx_ids[i] = i;
672 ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats,
673 sizeof(tx_ring->tx_stats));
675 tx_ring->next_to_use = 0;
676 tx_ring->next_to_clean = 0;
677 tx_ring->acum_pkts = 0;
681 drbr_flush(adapter->ifp, tx_ring->br);
685 for (i = 0; i < tx_ring->ring_size; i++) {
686 err = bus_dmamap_create(adapter->tx_buf_tag, 0,
687 &tx_ring->tx_buffer_info[i].dmamap);
695 if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) {
696 map = tx_ring->tx_buffer_info[i].nm_info.map_seg;
698 err = bus_dmamap_create(adapter->tx_buf_tag, 0,
712 TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring);
713 tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT,
714 taskqueue_thread_enqueue, &tx_ring->enqueue_tq);
715 if (unlikely(tx_ring->enqueue_tq == NULL)) {
718 i = tx_ring->ring_size;
722 tx_ring->running = true;
725 cpu_mask = &que->cpu_mask;
727 device_get_nameunit(adapter->pdev), que->cpu);
730 device_get_nameunit(adapter->pdev), que->id);
732 taskqueue_start_threads_cpuset(&tx_ring->enqueue_tq, 1, PI_NET,
740 free(tx_ring->free_tx_ids, M_DEVBUF);
741 tx_ring->free_tx_ids = NULL;
743 free(tx_ring->tx_buffer_info, M_DEVBUF);
744 tx_ring->tx_buffer_info = NULL;
750 * ena_free_tx_resources - Free Tx Resources per Queue
751 * @adapter: network interface device structure
759 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
765 while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task, NULL))
766 taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
768 taskqueue_free(tx_ring->enqueue_tq);
772 drbr_flush(adapter->ifp, tx_ring->br);
775 for (int i = 0; i < tx_ring->ring_size; i++) {
776 bus_dmamap_sync(adapter->tx_buf_tag,
777 tx_ring->tx_buffer_info[i].dmamap, BUS_DMASYNC_POSTWRITE);
778 bus_dmamap_unload(adapter->tx_buf_tag,
779 tx_ring->tx_buffer_info[i].dmamap);
780 bus_dmamap_destroy(adapter->tx_buf_tag,
781 tx_ring->tx_buffer_info[i].dmamap);
784 if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) {
785 nm_info = &tx_ring->tx_buffer_info[i].nm_info;
787 if (nm_info->socket_buf_idx[j] != 0) {
788 bus_dmamap_sync(adapter->tx_buf_tag,
789 nm_info->map_seg[j],
792 nm_info->map_seg[j]);
794 bus_dmamap_destroy(adapter->tx_buf_tag,
795 nm_info->map_seg[j]);
796 nm_info->socket_buf_idx[j] = 0;
801 m_freem(tx_ring->tx_buffer_info[i].mbuf);
802 tx_ring->tx_buffer_info[i].mbuf = NULL;
807 free(tx_ring->tx_buffer_info, M_DEVBUF);
808 tx_ring->tx_buffer_info = NULL;
810 free(tx_ring->free_tx_ids, M_DEVBUF);
811 tx_ring->free_tx_ids = NULL;
813 free(tx_ring->push_buf_intermediate_buf, M_DEVBUF);
814 tx_ring->push_buf_intermediate_buf = NULL;
818 * ena_setup_all_tx_resources - allocate all queues Tx resources
819 * @adapter: network interface device structure
828 for (i = 0; i < adapter->num_io_queues; i++) {
831 ena_log(adapter->pdev, ERR,
841 while (i--)
847 * ena_free_all_tx_resources - Free Tx Resources for All Queues
848 * @adapter: network interface device structure
857 for (i = 0; i < adapter->num_io_queues; i++)
862 * ena_setup_rx_resources - allocate Rx resources (Descriptors)
863 * @adapter: network interface device structure
871 device_t pdev = adapter->pdev;
872 struct ena_que *que = &adapter->que[qid];
873 struct ena_ring *rx_ring = que->rx_ring;
876 size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size;
880 rx_ring->initialized = false;
889 rx_ring->rx_buffer_info = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
891 size = sizeof(uint16_t) * rx_ring->ring_size;
892 rx_ring->free_rx_ids = malloc(size, M_DEVBUF, M_WAITOK);
894 for (i = 0; i < rx_ring->ring_size; i++)
895 rx_ring->free_rx_ids[i] = i;
898 ena_reset_counters((counter_u64_t *)&rx_ring->rx_stats,
899 sizeof(rx_ring->rx_stats));
901 rx_ring->next_to_clean = 0;
902 rx_ring->next_to_use = 0;
905 for (i = 0; i < rx_ring->ring_size; i++) {
906 err = bus_dmamap_create(adapter->rx_buf_tag, 0,
907 &(rx_ring->rx_buffer_info[i].map));
916 if ((if_getcapenable(adapter->ifp) & IFCAP_LRO) != 0) {
917 int err = tcp_lro_init(&rx_ring->lro);
924 rx_ring->lro.ifp = adapter->ifp;
931 while (i--) {
932 bus_dmamap_destroy(adapter->rx_buf_tag,
933 rx_ring->rx_buffer_info[i].map);
936 free(rx_ring->free_rx_ids, M_DEVBUF);
937 rx_ring->free_rx_ids = NULL;
938 free(rx_ring->rx_buffer_info, M_DEVBUF);
939 rx_ring->rx_buffer_info = NULL;
944 * ena_free_rx_resources - Free Rx Resources
945 * @adapter: network interface device structure
953 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
956 for (int i = 0; i < rx_ring->ring_size; i++) {
957 bus_dmamap_sync(adapter->rx_buf_tag,
958 rx_ring->rx_buffer_info[i].map, BUS_DMASYNC_POSTREAD);
959 m_freem(rx_ring->rx_buffer_info[i].mbuf);
960 rx_ring->rx_buffer_info[i].mbuf = NULL;
961 bus_dmamap_unload(adapter->rx_buf_tag,
962 rx_ring->rx_buffer_info[i].map);
963 bus_dmamap_destroy(adapter->rx_buf_tag,
964 rx_ring->rx_buffer_info[i].map);
968 tcp_lro_free(&rx_ring->lro);
971 free(rx_ring->rx_buffer_info, M_DEVBUF);
972 rx_ring->rx_buffer_info = NULL;
974 free(rx_ring->free_rx_ids, M_DEVBUF);
975 rx_ring->free_rx_ids = NULL;
979 * ena_setup_all_rx_resources - allocate all queues Rx resources
980 * @adapter: network interface device structure
989 for (i = 0; i < adapter->num_io_queues; i++) {
992 ena_log(adapter->pdev, ERR,
1001 while (i--)
1007 * ena_free_all_rx_resources - Free Rx resources for all queues
1008 * @adapter: network interface device structure
1017 for (i = 0; i < adapter->num_io_queues; i++)
1025 device_t pdev = adapter->pdev;
1032 if (unlikely(rx_info->mbuf != NULL))
1036 rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1037 rx_ring->rx_mbuf_sz);
1039 if (unlikely(rx_info->mbuf == NULL)) {
1040 counter_u64_add(rx_ring->rx_stats.mjum_alloc_fail, 1);
1041 rx_info->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1042 if (unlikely(rx_info->mbuf == NULL)) {
1043 counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
1048 mlen = rx_ring->rx_mbuf_sz;
1051 rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = mlen;
1056 adapter->rx_buf_tag, rx_info->mbuf, rx_info->mbuf->m_len);
1057 error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map,
1058 rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
1062 counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1);
1066 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD);
1068 ena_buf = &rx_info->ena_buf;
1069 ena_buf->paddr = segs[0].ds_addr;
1070 ena_buf->len = mlen;
1074 rx_info->mbuf, rx_info, ena_buf->len, (uintmax_t)ena_buf->paddr);
1079 m_freem(rx_info->mbuf);
1080 rx_info->mbuf = NULL;
1088 if (rx_info->mbuf == NULL) {
1089 ena_log(adapter->pdev, WARN,
1094 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map,
1096 bus_dmamap_unload(adapter->rx_buf_tag, rx_info->map);
1097 m_freem(rx_info->mbuf);
1098 rx_info->mbuf = NULL;
1102 * ena_refill_rx_bufs - Refills ring with descriptors
1105 * Refills the ring with newly allocated DMA-mapped mbufs for receiving
1110 struct ena_adapter *adapter = rx_ring->adapter;
1111 device_t pdev = adapter->pdev;
1116 ena_log_io(adapter->pdev, DBG, "refill qid: %d\n", rx_ring->qid);
1118 next_to_use = rx_ring->next_to_use;
1123 ena_log_io(pdev, DBG, "RX buffer - next to use: %d\n",
1126 req_id = rx_ring->free_rx_ids[next_to_use];
1127 rx_info = &rx_ring->rx_buffer_info[req_id];
1129 if (ena_rx_ring_in_netmap(adapter, rx_ring->qid))
1138 rx_ring->qid);
1141 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
1142 &rx_info->ena_buf, req_id);
1146 rx_ring->qid);
1150 rx_ring->ring_size);
1154 counter_u64_add(rx_ring->rx_stats.refil_partial, 1);
1157 rx_ring->qid, i, num);
1161 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
1163 rx_ring->next_to_use = next_to_use;
1173 netmap_detach(adapter->ifp);
1176 ena_log(adapter->pdev, ERR, "netmap attach failed: %d\n", rc);
1190 old_buf_ring_size = adapter->buf_ring_size;
1191 adapter->buf_ring_size = new_buf_ring_size;
1214 ena_log(adapter->pdev, ERR,
1215 "Failed to configure device after setting new drbr size: %u. Reverting old value: %u and triggering the reset\n",
1219 adapter->buf_ring_size = old_buf_ring_size;
1245 old_tx_size = adapter->requested_tx_ring_size;
1246 old_rx_size = adapter->requested_rx_ring_size;
1247 adapter->requested_tx_ring_size = new_tx_size;
1248 adapter->requested_rx_ring_size = new_rx_size;
1264 ena_log(adapter->pdev, ERR,
1265 "Failed to configure device with the new sizes - Tx: %u Rx: %u. Reverting old values - Tx: %u Rx: %u\n",
1269 adapter->requested_tx_ring_size = old_tx_size;
1270 adapter->requested_rx_ring_size = old_rx_size;
1281 ena_log(adapter->pdev, ERR,
1282 "Failed to revert old queue sizes. Triggering device reset.\n");
1285 * wrong. After reset, the device should try to
1304 ena_com_rss_destroy(adapter->ena_dev);
1306 adapter->num_io_queues = num;
1318 old_num = adapter->irq_cpu_base;
1322 adapter->irq_cpu_base = new_num;
1327 ena_log(adapter->pdev, ERR,
1328 "Failed to configure device %d IRQ base CPU. "
1332 adapter->irq_cpu_base = old_num;
1336 ena_log(adapter->pdev, ERR,
1338 "Triggering device reset.\n");
1357 old_num = adapter->irq_cpu_stride;
1361 adapter->irq_cpu_stride = new_num;
1366 ena_log(adapter->pdev, ERR,
1367 "Failed to configure device %d IRQ CPU stride. "
1371 adapter->irq_cpu_stride = old_num;
1375 ena_log(adapter->pdev, ERR,
1377 "Triggering device reset.\n");
1397 old_num = adapter->num_io_queues;
1410 ena_log(adapter->pdev, ERR,
1411 "Failed to configure device with %u IO queues. "
1424 ena_log(adapter->pdev, ERR,
1426 "queues. Triggering device reset.\n");
1441 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
1444 for (i = 0; i < rx_ring->ring_size; i++) {
1445 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
1447 if (rx_info->mbuf != NULL)
1450 if (((if_getflags(adapter->ifp) & IFF_DYING) == 0) &&
1451 (if_getcapenable(adapter->ifp) & IFCAP_NETMAP)) {
1452 if (rx_info->netmap_buf_idx != 0)
1461 * ena_refill_all_rx_bufs - allocate all queues Rx buffers
1462 * @adapter: network interface device structure
1471 for (i = 0; i < adapter->num_io_queues; i++) {
1472 rx_ring = &adapter->rx_ring[i];
1473 bufs_num = rx_ring->ring_size - 1;
1476 ena_log_io(adapter->pdev, WARN,
1481 rx_ring->initialized = true;
1491 for (i = 0; i < adapter->num_io_queues; i++)
1496 * ena_free_tx_bufs - Free Tx Buffers per Queue
1497 * @adapter: network interface device structure
1504 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
1507 for (int i = 0; i < tx_ring->ring_size; i++) {
1508 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
1510 if (tx_info->mbuf == NULL)
1514 ena_log(adapter->pdev, WARN,
1519 ena_log(adapter->pdev, DBG,
1524 bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap,
1526 bus_dmamap_unload(adapter->tx_buf_tag, tx_info->dmamap);
1528 m_free(tx_info->mbuf);
1529 tx_info->mbuf = NULL;
1537 for (int i = 0; i < adapter->num_io_queues; i++)
1547 for (i = 0; i < adapter->num_io_queues; i++) {
1549 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1559 for (i = 0; i < adapter->num_io_queues; i++) {
1561 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1571 for (i = 0; i < adapter->num_io_queues; i++) {
1572 queue = &adapter->que[i];
1573 while (taskqueue_cancel(queue->cleanup_tq, &queue->cleanup_task, NULL))
1574 taskqueue_drain(queue->cleanup_tq, &queue->cleanup_task);
1575 taskqueue_free(queue->cleanup_tq);
1585 struct ena_com_dev *ena_dev = adapter->ena_dev;
1595 for (i = 0; i < adapter->num_io_queues; i++) {
1598 ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1600 ctx.queue_size = adapter->requested_tx_ring_size;
1603 ctx.numa_node = adapter->que[i].domain;
1607 ena_log(adapter->pdev, ERR,
1611 ring = &adapter->tx_ring[i];
1613 &ring->ena_com_io_sq, &ring->ena_com_io_cq);
1615 ena_log(adapter->pdev, ERR,
1624 ena_com_update_numa_node(ring->ena_com_io_cq,
1630 for (i = 0; i < adapter->num_io_queues; i++) {
1635 ctx.queue_size = adapter->requested_rx_ring_size;
1638 ctx.numa_node = adapter->que[i].domain;
1642 ena_log(adapter->pdev, ERR,
1647 ring = &adapter->rx_ring[i];
1649 &ring->ena_com_io_sq, &ring->ena_com_io_cq);
1651 ena_log(adapter->pdev, ERR,
1660 ena_com_update_numa_node(ring->ena_com_io_cq,
1665 for (i = 0; i < adapter->num_io_queues; i++) {
1666 queue = &adapter->que[i];
1668 NET_TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue);
1669 queue->cleanup_tq = taskqueue_create_fast("ena cleanup",
1670 M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq);
1673 cpu_mask = &queue->cpu_mask;
1675 taskqueue_start_threads_cpuset(&queue->cleanup_tq, 1, PI_NET,
1677 device_get_nameunit(adapter->pdev), i);
1683 while (i--)
1685 i = adapter->num_io_queues;
1687 while (i--)
1700 * ena_handle_msix - MSIX Interrupt Handler for admin/async queue
1708 ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1710 ena_com_aenq_intr_handler(adapter->ena_dev, arg);
1714 * ena_handle_msix - MSIX Interrupt Handler for Tx/Rx
1721 struct ena_adapter *adapter = queue->adapter;
1722 if_t ifp = adapter->ifp;
1727 taskqueue_enqueue(queue->cleanup_tq, &queue->cleanup_task);
1735 device_t dev = adapter->pdev;
1740 ena_log(dev, ERR, "Error, MSI-X is already enabled\n");
1745 msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
1747 adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry),
1750 ena_log(dev, DBG, "trying to enable MSI-X, vectors: %d\n", msix_vecs);
1753 adapter->msix_entries[i].entry = i;
1755 adapter->msix_entries[i].vector = i + 1;
1771 "Not enough number of MSI-x allocated: %d\n",
1778 "Enable only %d MSI-x (out of %d), reduce "
1783 adapter->msix_vecs = msix_vecs;
1789 free(adapter->msix_entries, M_DEVBUF);
1790 adapter->msix_entries = NULL;
1798 snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, ENA_IRQNAME_SIZE,
1799 "ena-mgmnt@pci:%s", device_get_nameunit(adapter->pdev));
1804 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = NULL;
1805 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
1806 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
1807 adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector;
1821 if (adapter->msix_entries == NULL)
1825 if (adapter->first_bind < 0) {
1826 adapter->first_bind = last_bind;
1827 last_bind = (last_bind + adapter->num_io_queues) % num_buckets;
1829 cur_bind = adapter->first_bind;
1832 for (int i = 0; i < adapter->num_io_queues; i++) {
1835 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
1836 "%s-TxRx-%d", device_get_nameunit(adapter->pdev), i);
1837 adapter->irq_tbl[irq_idx].handler = ena_handle_msix;
1838 adapter->irq_tbl[irq_idx].data = &adapter->que[i];
1839 adapter->irq_tbl[irq_idx].vector =
1840 adapter->msix_entries[irq_idx].vector;
1841 ena_log(adapter->pdev, DBG, "ena_setup_io_intr vector: %d\n",
1842 adapter->msix_entries[irq_idx].vector);
1844 if (adapter->irq_cpu_base > ENA_BASE_CPU_UNSPECIFIED) {
1845 adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu =
1846 (unsigned)(adapter->irq_cpu_base +
1847 i * adapter->irq_cpu_stride) % (unsigned)mp_ncpus;
1848 CPU_SETOF(adapter->que[i].cpu, &adapter->que[i].cpu_mask);
1852 adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu =
1855 CPU_SETOF(adapter->que[i].cpu, &adapter->que[i].cpu_mask);
1858 if (CPU_ISSET(adapter->que[i].cpu, &cpuset_domain[idx]))
1861 adapter->que[i].domain = idx;
1863 adapter->que[i].domain = -1;
1873 device_t pdev = adapter->pdev;
1874 struct ena_irq *irq;
1880 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1881 irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
1882 &irq->vector, flags);
1884 if (unlikely(irq->res == NULL)) {
1885 ena_log(pdev, ERR, "could not allocate irq vector: %d\n",
1886 irq->vector);
1890 rc = bus_setup_intr(adapter->pdev, irq->res,
1891 INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt, irq->data,
1892 &irq->cookie);
1895 "failed to register interrupt handler for irq %ju: %d\n",
1896 rman_get_start(irq->res), rc);
1899 irq->requested = true;
1904 ena_log(pdev, INFO, "releasing resource for irq %d\n", irq->vector);
1905 rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, irq->vector,
1906 irq->res);
1909 "dev has no parent while releasing res for irq: %d\n",
1910 irq->vector);
1911 irq->res = NULL;
1919 device_t pdev = adapter->pdev;
1920 struct ena_irq *irq;
1926 "failed to request I/O IRQ: MSI-X is not enabled\n");
1932 for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1933 irq = &adapter->irq_tbl[i];
1935 if (unlikely(irq->requested))
1938 irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
1939 &irq->vector, flags);
1940 if (unlikely(irq->res == NULL)) {
1943 "could not allocate irq vector: %d\n", irq->vector);
1947 rc = bus_setup_intr(adapter->pdev, irq->res,
1948 INTR_TYPE_NET | INTR_MPSAFE, irq->handler, NULL, irq->data,
1949 &irq->cookie);
1952 "failed to register interrupt handler for irq %ju: %d\n",
1953 rman_get_start(irq->res), rc);
1956 irq->requested = true;
1958 if (adapter->rss_enabled || adapter->irq_cpu_base > ENA_BASE_CPU_UNSPECIFIED) {
1959 rc = bus_bind_intr(adapter->pdev, irq->res, irq->cpu);
1962 "failed to bind interrupt handler for irq %ju to cpu %d: %d\n",
1963 rman_get_start(irq->res), irq->cpu, rc);
1967 ena_log(pdev, INFO, "queue %d - cpu %d\n",
1968 i - ENA_IO_IRQ_FIRST_IDX, irq->cpu);
1975 for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) {
1976 irq = &adapter->irq_tbl[i];
1979 /* Once we entered err: section and irq->requested is true we
1981 if (irq->requested) {
1982 rcc = bus_teardown_intr(adapter->pdev, irq->res,
1983 irq->cookie);
1986 "could not release irq: %d, error: %d\n",
1987 irq->vector, rcc);
1990 /* If we entered err: section without irq->requested set we know
1995 if (irq->res != NULL) {
1996 rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1997 irq->vector, irq->res);
2001 "dev has no parent while releasing res for irq: %d\n",
2002 irq->vector);
2003 irq->requested = false;
2004 irq->res = NULL;
2013 device_t pdev = adapter->pdev;
2014 struct ena_irq *irq;
2017 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
2018 if (irq->requested) {
2019 ena_log(pdev, DBG, "tear down irq: %d\n", irq->vector);
2020 rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie);
2022 ena_log(pdev, ERR, "failed to tear down irq: %d\n",
2023 irq->vector);
2024 irq->requested = 0;
2027 if (irq->res != NULL) {
2028 ena_log(pdev, DBG, "release resource irq: %d\n", irq->vector);
2029 rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
2030 irq->vector, irq->res);
2031 irq->res = NULL;
2034 "dev has no parent while releasing res for irq: %d\n",
2035 irq->vector);
2042 device_t pdev = adapter->pdev;
2043 struct ena_irq *irq;
2046 for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
2047 irq = &adapter->irq_tbl[i];
2048 if (irq->requested) {
2049 ena_log(pdev, DBG, "tear down irq: %d\n", irq->vector);
2050 rc = bus_teardown_intr(adapter->pdev, irq->res,
2051 irq->cookie);
2054 "failed to tear down irq: %d\n",
2055 irq->vector);
2057 irq->requested = 0;
2060 if (irq->res != NULL) {
2061 ena_log(pdev, DBG, "release resource irq: %d\n",
2062 irq->vector);
2063 rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
2064 irq->vector, irq->res);
2065 irq->res = NULL;
2068 "dev has no parent while releasing res for irq: %d\n",
2069 irq->vector);
2088 pci_release_msi(adapter->pdev);
2091 adapter->msix_vecs = 0;
2092 free(adapter->msix_entries, M_DEVBUF);
2093 adapter->msix_entries = NULL;
2106 for (i = 0; i < adapter->num_io_queues; i++) {
2108 io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
2110 tx_ring = &adapter->tx_ring[i];
2111 counter_u64_add(tx_ring->tx_stats.unmask_interrupt_num, 1);
2124 ena_log(adapter->pdev, ERR,
2130 rc = ena_change_mtu(adapter->ifp, if_getmtu(adapter->ifp));
2135 ena_reset_counters((counter_u64_t *)&adapter->hw_stats,
2136 sizeof(adapter->hw_stats));
2146 for (i = 0; i < adapter->num_io_queues; i++) {
2147 adapter->tx_ring[i].ring_size = new_tx_size;
2148 adapter->rx_ring[i].ring_size = new_rx_size;
2155 device_t pdev = adapter->pdev;
2164 set_io_rings_size(adapter, adapter->requested_tx_ring_size,
2165 adapter->requested_rx_ring_size);
2206 cur_tx_ring_size = adapter->tx_ring[0].ring_size;
2207 cur_rx_ring_size = adapter->rx_ring[0].ring_size;
2249 if (unlikely(device_is_attached(adapter->pdev) == 0)) {
2250 ena_log(adapter->pdev, ERR, "device is not attached!\n");
2257 ena_log(adapter->pdev, INFO, "device is going UP\n");
2262 ena_log(adapter->pdev, ERR, "error setting up IO interrupt\n");
2267 ena_log(adapter->pdev, ERR, "err_req_irq\n");
2271 ena_log(adapter->pdev, INFO,
2273 adapter->num_io_queues,
2274 adapter->requested_rx_ring_size,
2275 adapter->requested_tx_ring_size,
2276 (adapter->ena_dev->tx_mem_queue_type ==
2281 ena_log(adapter->pdev, ERR,
2287 if_link_state_change(adapter->ifp, LINK_STATE_UP);
2293 counter_u64_add(adapter->dev_stats.interface_up, 1);
2297 if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2322 stats = &adapter->hw_stats;
2326 return (counter_u64_fetch(stats->rx_packets));
2328 return (counter_u64_fetch(stats->tx_packets));
2330 return (counter_u64_fetch(stats->rx_bytes));
2332 return (counter_u64_fetch(stats->tx_bytes));
2334 return (counter_u64_fetch(stats->rx_drops));
2336 return (counter_u64_fetch(stats->tx_drops));
2353 ena_log(adapter->pdev, DBG, "Media status update\n");
2357 ifmr->ifm_status = IFM_AVALID;
2358 ifmr->ifm_active = IFM_ETHER;
2362 ena_log(adapter->pdev, INFO, "Link is down\n");
2366 ifmr->ifm_status |= IFM_ACTIVE;
2367 ifmr->ifm_active |= IFM_UNKNOWN | IFM_FDX;
2400 if (if_getmtu(ifp) == ifr->ifr_mtu)
2405 ena_change_mtu(ifp, ifr->ifr_mtu);
2416 ena_log(adapter->pdev, INFO,
2439 rc = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
2446 if (ifr->ifr_reqcap != if_getcapenable(ifp)) {
2447 if_setcapenable(ifp, ifr->ifr_reqcap);
2474 if ((feat->offload.tx &
2480 if ((feat->offload.tx &
2485 if ((feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0)
2488 if ((feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) != 0)
2491 if ((feat->offload.rx_supported &
2496 if ((feat->offload.rx_supported &
2508 host_info->supported_network_features[0] = (uint32_t)if_getcapabilities(ifp);
2514 if_t ifp = adapter->ifp;
2515 uint32_t feat = adapter->tx_offload_cap;
2550 ifp = adapter->ifp = if_gethandle(IFT_ETHER);
2562 if_setsendqlen(ifp, adapter->requested_tx_ring_size);
2575 if_sethwtsomax(ifp, ENA_TSO_MAXSIZE -
2577 if_sethwtsomaxsegcount(ifp, adapter->max_tx_sgl_size - 1);
2587 ifmedia_init(&adapter->media, IFM_IMASK, ena_media_change,
2589 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2590 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2592 ether_ifattach(ifp, adapter->mac_addr);
2605 ena_log(adapter->pdev, INFO, "device is going DOWN\n");
2608 if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2613 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
2615 ena_log(adapter->pdev, ERR, "Device reset failed\n");
2625 counter_u64_add(adapter->dev_stats.interface_down, 1);
2635 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2637 &get_feat_ctx->max_queue_ext.max_queue_ext;
2638 io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num,
2639 max_queue_ext->max_rx_cq_num);
2641 io_tx_sq_num = max_queue_ext->max_tx_sq_num;
2642 io_tx_cq_num = max_queue_ext->max_tx_cq_num;
2645 &get_feat_ctx->max_queues;
2646 io_tx_sq_num = max_queues->max_sq_num;
2647 io_tx_cq_num = max_queues->max_cq_num;
2652 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
2653 io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
2659 /* 1 IRQ for mgmnt and 1 IRQ for each TX/RX pair */
2661 pci_msix_count(pdev) - 1);
2701 if (!(ena_dev->supported_features & llq_feature_mask)) {
2704 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2708 if (ena_dev->mem_bar == NULL) {
2710 "LLQ is advertised as supported but device doesn't expose mem bar.\n");
2711 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2718 "Failed to configure the device mode. "
2720 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2734 adapter->memory = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, &rid,
2736 if (unlikely(adapter->memory == NULL)) {
2743 rc = ena_enable_wc(adapter->pdev, adapter->memory);
2750 * Save virtual address of the device's memory region
2753 ena_dev->mem_bar = rman_get_virtual(adapter->memory);
2762 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
2763 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
2764 llq_config->llq_num_decs_before_header =
2766 if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) != 0) {
2769 llq->entry_size_recommended == ENA_ADMIN_LIST_ENTRY_SIZE_256B)) {
2770 llq_config->llq_ring_entry_size =
2772 llq_config->llq_ring_entry_size_value = 256;
2773 adapter->llq_policy = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
2776 llq_config->llq_ring_entry_size =
2778 llq_config->llq_ring_entry_size_value = 128;
2779 adapter->llq_policy = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
2786 struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
2787 struct ena_com_dev *ena_dev = ctx->ena_dev;
2793 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2795 &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
2797 max_queue_ext->max_rx_cq_depth,
2798 max_queue_ext->max_rx_sq_depth);
2799 max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
2801 if (ena_dev->tx_mem_queue_type ==
2804 llq->max_llq_depth);
2807 max_queue_ext->max_tx_sq_depth);
2809 ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2810 max_queue_ext->max_per_packet_tx_descs);
2811 ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2812 max_queue_ext->max_per_packet_rx_descs);
2815 &ctx->get_feat_ctx->max_queues;
2816 max_rx_queue_size = min_t(uint32_t, max_queues->max_cq_depth,
2817 max_queues->max_sq_depth);
2818 max_tx_queue_size = max_queues->max_cq_depth;
2820 if (ena_dev->tx_mem_queue_type ==
2823 llq->max_llq_depth);
2826 max_queues->max_sq_depth);
2828 ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2829 max_queues->max_packet_tx_descs);
2830 ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2831 max_queues->max_packet_rx_descs);
2834 if (adapter->llq_policy == ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
2835 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2836 if (llq->max_wide_llq_depth != max_tx_queue_size) {
2837 if (llq->max_wide_llq_depth == 0) {
2838 /* if there is no large llq max depth from device, we divide
2844 max_tx_queue_size = llq->max_wide_llq_depth;
2846 ena_log(ctx->pdev, INFO,
2850 ena_log(ctx->pdev, INFO, "Using large LLQ headers\n");
2853 ena_log(ctx->pdev, WARN,
2854 "Using large headers failed: LLQ is disabled or device does not support large headers\n");
2859 max_tx_queue_size = 1 << (flsl(max_tx_queue_size) - 1);
2860 max_rx_queue_size = 1 << (flsl(max_rx_queue_size) - 1);
2867 tx_queue_size = 1 << (flsl(tx_queue_size) - 1);
2868 rx_queue_size = 1 << (flsl(rx_queue_size) - 1);
2870 ctx->max_tx_queue_size = max_tx_queue_size;
2871 ctx->max_rx_queue_size = max_rx_queue_size;
2872 ctx->tx_queue_size = tx_queue_size;
2873 ctx->rx_queue_size = rx_queue_size;
2892 host_info = ena_dev->host_attr.host_info;
2895 host_info->bdf = rid;
2896 host_info->os_type = ENA_ADMIN_OS_FREEBSD;
2897 host_info->kernel_ver = osreldate;
2899 sprintf(host_info->kernel_ver_str, "%d", osreldate);
2900 host_info->os_dist = 0;
2901 strncpy(host_info->os_dist_str, osrelease,
2902 sizeof(host_info->os_dist_str) - 1);
2904 host_info->driver_version = (ENA_DRV_MODULE_VER_MAJOR) |
2907 host_info->num_cpus = mp_ncpus;
2908 host_info->driver_supported_features =
2933 struct ena_com_dev *ena_dev = adapter->ena_dev;
2954 ena_log(pdev, ERR, "Can not reset device\n");
2960 ena_log(pdev, ERR, "device version is too low\n");
2970 adapter->dma_width = dma_width;
2976 "Can not initialize ena admin queue with device\n");
2989 /* Get Device Attributes */
2993 "Cannot get attribute for ena device rc: %d\n", rc);
3005 aenq_groups &= get_feat_ctx->aenq.supported_groups;
3014 ena_set_llq_configurations(&llq_config, &get_feat_ctx->llq, adapter);
3016 rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
3037 struct ena_com_dev *ena_dev = adapter->ena_dev;
3042 ena_log(adapter->pdev, ERR, "Error with MSI-X enablement\n");
3050 ena_log(adapter->pdev, ERR, "Cannot setup mgmnt queue intr\n");
3078 rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low;
3079 tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low;
3080 counter_u64_zero(adapter->hw_stats.rx_drops);
3081 counter_u64_add(adapter->hw_stats.rx_drops, rx_drops);
3082 counter_u64_zero(adapter->hw_stats.tx_drops);
3083 counter_u64_add(adapter->hw_stats.tx_drops, tx_drops);
3086 atomic_store_rel_64(&adapter->keep_alive_timestamp, stime);
3096 if (adapter->wd_active == 0)
3099 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3102 timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp);
3103 time = getsbinuptime() - timestamp;
3104 if (unlikely(time > adapter->keep_alive_timeout)) {
3105 ena_log(adapter->pdev, ERR, "Keep alive watchdog timeout.\n");
3106 if (ena_com_aenq_has_keep_alive(adapter->ena_dev))
3118 if (unlikely(ena_com_get_admin_running_state(adapter->ena_dev) == false)) {
3119 ena_log(adapter->pdev, ERR,
3121 counter_u64_add(adapter->dev_stats.admin_q_pause, 1);
3122 if (ena_com_get_missing_admin_interrupt(adapter->ena_dev))
3133 if (likely(atomic_load_8(&rx_ring->first_interrupt)))
3136 if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
3139 rx_ring->no_interrupt_event_cnt++;
3141 if (rx_ring->no_interrupt_event_cnt ==
3143 ena_log(adapter->pdev, ERR,
3144 "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
3145 rx_ring->qid);
3157 device_t pdev = adapter->pdev;
3161 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id);
3166 tx_ring->qid);
3173 tx_ring->qid);
3183 device_t pdev = adapter->pdev;
3195 for (i = 0; i < tx_ring->ring_size; i++) {
3196 tx_buf = &tx_ring->tx_buffer_info[i];
3198 if (bintime_isset(&tx_buf->timestamp) == 0)
3202 bintime_sub(&time, &tx_buf->timestamp);
3205 if (unlikely(!atomic_load_8(&tx_ring->first_interrupt) &&
3206 time_offset > 2 * adapter->missing_tx_timeout)) {
3213 "Reset the device\n",
3214 tx_ring->qid);
3221 if (unlikely(time_offset > adapter->missing_tx_timeout)) {
3223 if (tx_buf->print_once) {
3224 time_since_last_cleanup = TICKS_2_MSEC(ticks -
3225 tx_ring->tx_last_cleanup_ticks);
3227 adapter->missing_tx_timeout);
3231 tx_ring->qid, i, time_since_last_cleanup,
3237 tx_buf->print_once = false;
3242 if (unlikely(missed_tx > adapter->missing_tx_threshold)) {
3245 "(%d > %d). Reset the device\n",
3246 missed_tx, adapter->missing_tx_threshold);
3254 cleanup_scheduled = !!(atomic_load_16(&tx_ring->que->cleanup_task.ta_pending));
3255 cleanup_running = !!(atomic_load_8((&tx_ring->cleanup_running)));
3259 adapter->reset_reason = reset_reason;
3263 counter_u64_add(tx_ring->tx_stats.missing_tx_comp, new_missed_tx);
3281 /* Make sure the driver doesn't turn the device in other process */
3290 if (adapter->missing_tx_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3293 budget = adapter->missing_tx_max_queues;
3295 for (i = adapter->next_monitored_tx_qid; i < adapter->num_io_queues; i++) {
3296 tx_ring = &adapter->tx_ring[i];
3297 rx_ring = &adapter->rx_ring[i];
3307 budget--;
3314 adapter->next_monitored_tx_qid = i % adapter->num_io_queues;
3319 /* For the rare case where the device runs out of Rx descriptors and the
3323 * The device won't send interrupts since all the new Rx packets will be dropped
3324 * The msix handler won't allocate new Rx descriptors so the device won't be
3327 * When such a situation is detected - execute rx cleanup task in another thread
3341 for (i = 0; i < adapter->num_io_queues; i++) {
3342 rx_ring = &adapter->rx_ring[i];
3345 rx_ring->ena_com_io_sq);
3346 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
3347 rx_ring->empty_rx_queue++;
3349 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
3350 counter_u64_add(rx_ring->rx_stats.empty_rx_ring,
3353 ena_log(adapter->pdev, WARN,
3357 taskqueue_enqueue(rx_ring->que->cleanup_tq,
3358 &rx_ring->que->cleanup_task);
3359 rx_ring->empty_rx_queue = 0;
3362 rx_ring->empty_rx_queue = 0;
3371 struct ena_com_dev *ena_dev = adapter->ena_dev;
3373 if (hints->admin_completion_tx_timeout)
3374 ena_dev->admin_queue.completion_timeout =
3375 hints->admin_completion_tx_timeout * 1000;
3377 if (hints->mmio_read_timeout)
3379 ena_dev->mmio_read.reg_read_to = hints->mmio_read_timeout * 1000;
3381 if (hints->missed_tx_completion_count_threshold_to_reset)
3382 adapter->missing_tx_threshold =
3383 hints->missed_tx_completion_count_threshold_to_reset;
3385 if (hints->missing_tx_completion_timeout) {
3386 if (hints->missing_tx_completion_timeout ==
3388 adapter->missing_tx_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3390 adapter->missing_tx_timeout = SBT_1MS *
3391 hints->missing_tx_completion_timeout;
3394 if (hints->driver_watchdog_timeout) {
3395 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3396 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3398 adapter->keep_alive_timeout = SBT_1MS *
3399 hints->driver_watchdog_timeout;
3404 * ena_copy_eni_metrics - Get and copy ENI metrics from the HW.
3405 * @adapter: ENA device adapter
3420 rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_metrics);
3425 ena_log(adapter->pdev, WARN,
3429 ena_log(adapter->pdev, DBG,
3433 ena_log(adapter->pdev, ERR,
3444 return ena_com_get_ena_srd_info(adapter->ena_dev, &adapter->ena_srd_info);
3454 dev = adapter->ena_dev;
3460 rc = ena_com_get_customer_metrics(adapter->ena_dev,
3461 (char *)(adapter->customer_metrics_array), len);
3471 adapter->ena_dev->host_attr.host_info;
3490 if ((adapter->metrics_sample_interval != 0) &&
3491 (++adapter->metrics_sample_interval_cnt >=
3492 adapter->metrics_sample_interval)) {
3493 taskqueue_enqueue(adapter->metrics_tq, &adapter->metrics_task);
3494 adapter->metrics_sample_interval_cnt = 0;
3499 ena_update_host_info(host_info, adapter->ifp);
3503 * Timeout when validating version indicates that the device
3507 if (ena_com_validate_version(adapter->ena_dev) ==
3509 ena_log(adapter->pdev, WARN,
3514 ena_log(adapter->pdev, WARN, "Trigger reset is on\n");
3515 taskqueue_enqueue(adapter->reset_tq, &adapter->reset_task);
3528 if_t ifp = adapter->ifp;
3529 struct ena_com_dev *ena_dev = adapter->ena_dev;
3551 * Stop the device from sending AENQ events (if the device was up, and
3552 * the trigger reset was on, ena_down already performs device reset)
3555 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
3563 * calls (not directly) `ena_enable_msix()`, which re-allocates MSIX
3564 * vectors. The amount of MSIX vectors after destroy-restore may be
3578 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3588 if (memcmp(get_feat_ctx->dev_attr.mac_addr, adapter->mac_addr,
3590 ena_log(adapter->pdev, ERR, "Error, mac addresses differ\n");
3594 if (get_feat_ctx->dev_attr.max_mtu < if_getmtu(adapter->ifp)) {
3595 ena_log(adapter->pdev, ERR,
3596 "Error, device max mtu is smaller than ifp MTU\n");
3607 struct ena_com_dev *ena_dev = adapter->ena_dev;
3608 if_t ifp = adapter->ifp;
3609 device_t dev = adapter->pdev;
3617 ena_log(dev, ERR, "Cannot initialize device\n");
3624 if (adapter->wd_active != 0)
3625 adapter->wd_active = wd_active;
3629 ena_log(dev, ERR, "Validation of device parameters failed\n");
3640 ena_log(dev, ERR, "Enable MSI-X failed\n");
3649 if ((adapter->msix_vecs - ENA_ADMIN_MSIX_VEC) < adapter->num_io_queues)
3650 adapter->num_io_queues = adapter->msix_vecs - ENA_ADMIN_MSIX_VEC;
3652 /* Re-initialize rings basic information */
3664 /* Indicate that device is running again and ready to work */
3673 adapter->keep_alive_timestamp = getsbinuptime();
3692 ena_log(dev, ERR, "Reset attempt failed. Can not reset the device\n");
3704 if (ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_CUSTOMER_METRICS))
3706 else if (ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_ENI_STATS))
3709 if (ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_ENA_SRD_INFO))
3726 ena_log(adapter->pdev, INFO,
3727 "Device reset completed successfully, Driver info: %s\n",
3736 ena_free_counters((counter_u64_t *)&adapter->hw_stats,
3738 ena_free_counters((counter_u64_t *)&adapter->dev_stats,
3743 * ena_attach - Device Initialization Routine
3744 * @pdev: device information struct
3748 * ena_attach initializes an adapter identified by a device structure.
3765 adapter->pdev = pdev;
3766 adapter->first_bind = -1;
3769 * Set up the timer service - driver is responsible for avoiding
3773 adapter->keep_alive_timeout = ENA_DEFAULT_KEEP_ALIVE_TO;
3774 adapter->missing_tx_timeout = ENA_DEFAULT_TX_CMP_TO;
3775 adapter->missing_tx_max_queues = ENA_DEFAULT_TX_MONITORED_QUEUES;
3776 adapter->missing_tx_threshold = ENA_DEFAULT_TX_CMP_THRESHOLD;
3778 adapter->irq_cpu_base = ENA_BASE_CPU_UNSPECIFIED;
3779 adapter->irq_cpu_stride = 0;
3782 adapter->rss_enabled = 1;
3792 adapter->ena_dev = ena_dev;
3793 ena_dev->dmadev = pdev;
3796 adapter->memory = NULL;
3797 adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, &rid,
3799 if (unlikely(adapter->registers == NULL)) {
3809 adapter->msix = bus_alloc_resource_any(pdev, SYS_RES_MEMORY,
3811 if (unlikely(adapter->msix == NULL)) {
3817 adapter->msix_rid = msix_rid;
3820 ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF,
3824 ((struct ena_bus *)(ena_dev->bus))->reg_bar_t = rman_get_bustag(
3825 adapter->registers);
3826 ((struct ena_bus *)(ena_dev->bus))->reg_bar_h = rman_get_bushandle(
3827 adapter->registers);
3829 if (unlikely(((struct ena_bus *)(ena_dev->bus))->reg_bar_h == 0)) {
3841 ena_dev->ena_min_poll_delay_us = ENA_ADMIN_POLL_DELAY_US;
3846 /* Device initialization */
3847 rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active);
3849 ena_log(pdev, ERR, "ENA device init failed! (err: %d)\n", rc);
3854 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3855 adapter->disable_meta_caching = !!(
3859 adapter->keep_alive_timestamp = getsbinuptime();
3861 adapter->tx_offload_cap = get_feat_ctx.offload.tx;
3863 memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr,
3879 adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
3880 adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
3881 adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
3882 adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
3883 adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
3884 adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
3886 adapter->max_num_io_queues = max_num_io_queues;
3888 adapter->buf_ring_size = ENA_DEFAULT_BUF_RING_SIZE;
3890 adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
3892 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3923 adapter->num_io_queues = adapter->msix_vecs - ENA_ADMIN_MSIX_VEC;
3941 ena_alloc_counters((counter_u64_t *)&adapter->dev_stats,
3943 ena_alloc_counters((counter_u64_t *)&adapter->hw_stats,
3951 TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter);
3952 adapter->reset_tq = taskqueue_create("ena_reset_enqueue",
3953 M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq);
3954 taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET, "%s rstq",
3955 device_get_nameunit(adapter->pdev));
3958 TASK_INIT(&adapter->metrics_task, 0, ena_metrics_task, adapter);
3959 adapter->metrics_tq = taskqueue_create("ena_metrics_enqueue",
3960 M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->metrics_tq);
3961 taskqueue_start_threads(&adapter->metrics_tq, 1, PI_NET, "%s metricsq",
3962 device_get_nameunit(adapter->pdev));
3973 if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
3983 ether_ifdetach(adapter->ifp);
3984 ifmedia_removeall(&adapter->media);
3985 free(adapter->customer_metrics_array, M_DEVBUF);
3991 ena_com_dev_reset(adapter->ena_dev, ENA_REGS_RESET_INIT_ERR);
4004 free(ena_dev->bus, M_DEVBUF);
4014 * ena_detach - Device Removal Routine
4015 * @pdev: device information struct
4017 * ena_detach is called by the device subsystem to alert the driver
4018 * that it should release a PCI device.
4024 struct ena_com_dev *ena_dev = adapter->ena_dev;
4028 if (if_vlantrunkinuse(adapter->ifp)) {
4029 ena_log(adapter->pdev, ERR, "VLAN is in use, detach first\n");
4037 ether_ifdetach(adapter->ifp);
4039 ifmedia_removeall(&adapter->media);
4047 while (taskqueue_cancel(adapter->metrics_tq, &adapter->metrics_task, NULL))
4048 taskqueue_drain(adapter->metrics_tq, &adapter->metrics_task);
4049 taskqueue_free(adapter->metrics_tq);
4052 while (taskqueue_cancel(adapter->reset_tq, &adapter->reset_task, NULL))
4053 taskqueue_drain(adapter->reset_tq, &adapter->reset_task);
4054 taskqueue_free(adapter->reset_tq);
4062 ena_sysctl_update_queue_node_nb(adapter, adapter->num_io_queues,
4063 adapter->max_num_io_queues);
4066 netmap_detach(adapter->ifp);
4073 ena_log(adapter->pdev, WARN,
4078 ena_log(adapter->pdev, WARN,
4085 if (adapter->rss_indir != NULL)
4086 free(adapter->rss_indir, M_DEVBUF);
4093 free(adapter->customer_metrics_array, M_DEVBUF);
4097 if_free(adapter->ifp);
4099 free(ena_dev->bus, M_DEVBUF);
4123 ifp = adapter->ifp;
4124 status = aenq_desc->flags &
4128 ena_log(adapter->pdev, INFO, "link is UP\n");
4133 ena_log(adapter->pdev, INFO, "link is DOWN\n");
4145 ENA_WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
4146 adapter->ena_dev, "Invalid group(%x) expected %x\n",
4147 aenq_e->aenq_common_desc.group, ENA_ADMIN_NOTIFICATION);
4149 switch (aenq_e->aenq_common_desc.syndrome) {
4152 (struct ena_admin_ena_hw_hints *)(&aenq_e->inline_data_w4);
4156 ena_log(adapter->pdev, ERR,
4158 aenq_e->aenq_common_desc.syndrome);
4185 ena_log(adapter->pdev, ERR,
4197 bitmap = desc->notifications_bitmap;
4200 ena_log(adapter->pdev, INFO,
4206 bit--;
4207 ena_log(adapter->pdev, INFO,
4208 "Sub-optimal configuration notification code: %" PRIu64 " Refer to AWS ENA documentation for additional details and mitigation options.\n",
4219 ena_log(adapter->pdev, WARN,
4220 "The device has detected an unhealthy state, reset is requested\n");
4236 * FreeBSD Device Interface Entry Points
4239 static device_method_t ena_methods[] = { /* Device interface */
4252 MODULE_PNP_INFO("U16:vendor;U16:device", pci, ena, ena_vendor_info_array,
4253 nitems(ena_vendor_info_array) - 1);