Lines Matching +full:firmware +full:- +full:initialized

1 /* SPDX-License-Identifier: BSD-3-Clause */
252 * scctx->isc_tx_tso_size_max + the VLAN header is a valid size.
256 * DMA tag. However, scctx->isc_tx_tso_segsize_max is used to set the
285 * IFLIB_SKIP_MSIX allows the driver to handle allocating MSI-X
308 /* Static driver-wide sysctls */
312 * ice_pci_mapping - Map PCI BAR memory
323 rc = ice_map_bar(sc->dev, &sc->bar0, 0);
331 * ice_free_pci_mapping - Release PCI BAR memory
340 ice_free_bar(sc->dev, &sc->bar0);
348 * ice_register - register device method callback
360 * ice_setup_scctx - Setup the iflib softc context structure
369 if_softc_ctx_t scctx = sc->scctx;
370 struct ice_hw *hw = &sc->hw;
371 device_t dev = sc->dev;
374 safe_mode = ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE);
375 recovery_mode = ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE);
382 scctx->isc_ntxqsets = scctx->isc_nrxqsets = 1;
383 scctx->isc_ntxqsets_max = 1;
384 scctx->isc_nrxqsets_max = 1;
390 * sysctl value is when setting up MSI-X vectors.
392 sc->ifc_sysctl_ntxqs = scctx->isc_ntxqsets;
393 sc->ifc_sysctl_nrxqs = scctx->isc_nrxqsets;
395 if (scctx->isc_ntxqsets == 0)
396 scctx->isc_ntxqsets = hw->func_caps.common_cap.rss_table_size;
397 if (scctx->isc_nrxqsets == 0)
398 scctx->isc_nrxqsets = hw->func_caps.common_cap.rss_table_size;
400 scctx->isc_ntxqsets_max = hw->func_caps.common_cap.num_txq;
401 scctx->isc_nrxqsets_max = hw->func_caps.common_cap.num_rxq;
407 if (sc->ifc_sysctl_ntxqs > scctx->isc_ntxqsets_max)
408 sc->ifc_sysctl_ntxqs = scctx->isc_ntxqsets_max;
409 if (sc->ifc_sysctl_nrxqs > scctx->isc_nrxqsets_max)
410 sc->ifc_sysctl_nrxqs = scctx->isc_nrxqsets_max;
413 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
415 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
418 scctx->isc_tx_nsegments = ICE_MAX_TX_SEGS;
419 scctx->isc_tx_tso_segments_max = ICE_MAX_TSO_SEGS;
420 scctx->isc_tx_tso_size_max = ICE_TSO_SIZE;
421 scctx->isc_tx_tso_segsize_max = ICE_MAX_DMA_SEG_SIZE;
423 scctx->isc_msix_bar = pci_msix_table_bar(dev);
424 scctx->isc_rss_table_size = hw->func_caps.common_cap.rss_table_size;
430 scctx->isc_txrx = &ice_recovery_txrx;
432 scctx->isc_txrx = &ice_txrx;
439 scctx->isc_capenable = ICE_SAFE_CAPS;
440 scctx->isc_tx_csum_flags = 0;
442 scctx->isc_capenable = ICE_FULL_CAPS;
443 scctx->isc_tx_csum_flags = ICE_CSUM_OFFLOAD;
446 scctx->isc_capabilities = scctx->isc_capenable;
450 * ice_if_attach_pre - Early device attach logic
470 ice_set_state(&sc->state, ICE_STATE_ATTACHING);
472 sc->ctx = ctx;
473 sc->media = iflib_get_media(ctx);
474 sc->sctx = iflib_get_sctx(ctx);
475 sc->iflib_ctx_lock = iflib_ctx_lock_get(ctx);
476 sc->ifp = iflib_get_ifp(ctx);
478 dev = sc->dev = iflib_get_dev(ctx);
479 scctx = sc->scctx = iflib_get_softc_ctx(ctx);
481 hw = &sc->hw;
482 hw->back = sc;
484 snprintf(sc->admin_mtx_name, sizeof(sc->admin_mtx_name),
486 mtx_init(&sc->admin_mtx, sc->admin_mtx_name, NULL, MTX_DEF);
487 callout_init_mtx(&sc->admin_timer, &sc->admin_mtx, 0);
509 device_printf(dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
536 ice_aq_str(hw->adminq.sq_last_status));
544 ice_set_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN);
546 /* Notify firmware of the device driver version */
569 * and firmware, this will force them to use single VLAN mode.
576 ice_aq_str(hw->adminq.sq_last_status));
583 iflib_set_mac(ctx, hw->port_info->mac.lan_addr);
589 err = ice_resmgr_init(&sc->tx_qmgr, hw->func_caps.common_cap.num_txq);
597 err = ice_resmgr_init(&sc->rx_qmgr, hw->func_caps.common_cap.num_rxq);
611 sc->num_available_vsi = min(ICE_MAX_VSI_AVAILABLE,
612 hw->func_caps.guar_num_vsi);
614 if (!sc->num_available_vsi) {
621 sc->all_vsi = (struct ice_vsi **)
622 malloc(sizeof(struct ice_vsi *) * sc->num_available_vsi,
624 if (!sc->all_vsi) {
636 ice_alloc_vsi_qmap(&sc->pf_vsi, scctx->isc_ntxqsets_max,
637 scctx->isc_nrxqsets_max);
639 /* Allocate MSI-X vectors (due to isc_flags IFLIB_SKIP_MSIX) */
648 ice_release_vsi(&sc->pf_vsi);
649 free(sc->all_vsi, M_ICE);
650 sc->all_vsi = NULL;
654 ice_resmgr_destroy(&sc->rx_qmgr);
656 ice_resmgr_destroy(&sc->tx_qmgr);
662 mtx_lock(&sc->admin_mtx);
663 callout_stop(&sc->admin_timer);
664 mtx_unlock(&sc->admin_mtx);
665 mtx_destroy(&sc->admin_mtx);
670 * ice_attach_pre_recovery_mode - Limited driver attach_pre for FW recovery
673 * Loads the device driver in limited Firmware Recovery mode, intended to
674 * allow users to update the firmware to attempt to recover the device.
676 * @remark We may enter recovery mode in case either (a) the firmware is
677 * detected to be in an invalid state and must be re-programmed, or (b) the
678 * driver detects that the loaded firmware has a non-compatible API version
684 ice_set_state(&sc->state, ICE_STATE_RECOVERY_MODE);
690 sc->pf_vsi.sc = sc;
693 * We still need to allocate MSI-X vectors since we need one vector to
700 * ice_update_link_status - notify OS of link state change
713 struct ice_hw *hw = &sc->hw;
717 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
721 if (!ice_testandset_state(&sc->state, ICE_STATE_LINK_STATUS_REPORTED)) {
722 if (sc->link_up) { /* link is up */
723 uint64_t baudrate = ice_aq_speed_to_rate(sc->hw.port_info);
725 if (!(hw->port_info->phy.link_info_old.link_info & ICE_AQ_LINK_UP))
728 iflib_link_state_change(sc->ctx, LINK_STATE_UP, baudrate);
733 iflib_link_state_change(sc->ctx, LINK_STATE_DOWN, 0);
740 if (update_media && !ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) {
741 status = ice_add_media_types(sc, sc->media);
743 device_printf(sc->dev, "Error adding device media types: %s aq_err %s\n",
745 ice_aq_str(hw->adminq.sq_last_status));
750 * ice_if_attach_post - Late device attach logic
767 /* We don't yet support loading if MSI-X is not supported */
768 if (sc->scctx->isc_intr != IFLIB_INTR_MSIX) {
769 device_printf(sc->dev, "The ice driver does not support loading without MSI-X\n");
773 /* The ifnet structure hasn't yet been initialized when the attach_pre
777 sc->scctx->isc_max_frame_size = if_getmtu(ifp) +
784 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) {
789 sc->pf_vsi.max_frame_size = sc->scctx->isc_max_frame_size;
791 err = ice_initialize_vsi(&sc->pf_vsi);
793 device_printf(sc->dev, "Unable to initialize Main VSI: %s\n",
802 err = ice_config_rss(&sc->pf_vsi);
804 device_printf(sc->dev,
828 status = ice_aq_set_pfc_mode(&sc->hw, ICE_AQC_PFC_VLAN_BASED_PFC, NULL);
830 device_printf(sc->dev, "Setting pfc mode failed, status %s\n", ice_status_str(status));
844 ice_enable_intr(&sc->hw, sc->irqvs[0].me);
851 mtx_lock(&sc->admin_mtx);
852 callout_reset(&sc->admin_timer, hz/2, ice_admin_timer, sc);
853 mtx_unlock(&sc->admin_mtx);
855 if (ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) &&
856 !ice_test_state(&sc->state, ICE_STATE_NO_MEDIA))
857 ice_set_state(&sc->state, ICE_STATE_FIRST_INIT_LINK);
859 ice_clear_state(&sc->state, ICE_STATE_ATTACHING);
865 * ice_attach_post_recovery_mode - Limited driver attach_post for FW recovery
869 * firmware is in recovery mode.
878 ice_enable_intr(&sc->hw, sc->irqvs[0].me);
881 mtx_lock(&sc->admin_mtx);
882 callout_reset(&sc->admin_timer, hz/2, ice_admin_timer, sc);
883 mtx_unlock(&sc->admin_mtx);
885 ice_clear_state(&sc->state, ICE_STATE_ATTACHING);
889 * ice_free_irqvs - Free IRQ vector memory
897 struct ice_vsi *vsi = &sc->pf_vsi;
898 if_ctx_t ctx = sc->ctx;
902 if (sc->irqvs == NULL)
906 for (i = 0; i < sc->num_irq_vectors; i++)
907 iflib_irq_free(ctx, &sc->irqvs[i].irq);
910 for (i = 0; i < vsi->num_rx_queues; i++)
911 vsi->rx_queues[i].irqv = NULL;
913 for (i = 0; i < vsi->num_tx_queues; i++)
914 vsi->tx_queues[i].irqv = NULL;
917 free(sc->irqvs, M_ICE);
918 sc->irqvs = NULL;
919 sc->num_irq_vectors = 0;
923 * ice_if_detach - Device driver detach logic
936 struct ice_vsi *vsi = &sc->pf_vsi;
943 ice_set_state(&sc->state, ICE_STATE_DETACHING);
946 mtx_lock(&sc->admin_mtx);
947 callout_stop(&sc->admin_timer);
948 mtx_unlock(&sc->admin_mtx);
949 mtx_destroy(&sc->admin_mtx);
952 if (sc->mirr_if)
957 ifmedia_removeall(sc->media);
967 /* Release MSI-X resources */
970 for (i = 0; i < sc->num_available_vsi; i++) {
971 if (sc->all_vsi[i])
972 ice_release_vsi(sc->all_vsi[i]);
975 if (sc->all_vsi) {
976 free(sc->all_vsi, M_ICE);
977 sc->all_vsi = NULL;
980 /* Release MSI-X memory */
981 pci_release_msi(sc->dev);
983 if (sc->msix_table != NULL) {
984 bus_release_resource(sc->dev, SYS_RES_MEMORY,
985 rman_get_rid(sc->msix_table),
986 sc->msix_table);
987 sc->msix_table = NULL;
993 ice_resmgr_destroy(&sc->tx_qmgr);
994 ice_resmgr_destroy(&sc->rx_qmgr);
996 if (!ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
997 ice_deinit_hw(&sc->hw);
1000 status = ice_reset(&sc->hw, ICE_RESET_PFR);
1003 device_printf(sc->dev, "device PF reset failed, err %s\n",
1013 * ice_if_tx_queues_alloc - Allocate Tx queue memory
1029 struct ice_vsi *vsi = &sc->pf_vsi;
1034 MPASS(sc->scctx->isc_ntxd[0] <= ICE_MAX_DESC_COUNT);
1038 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
1042 if (!(vsi->tx_queues =
1044 device_printf(sc->dev, "Unable to allocate Tx queue memory\n");
1049 for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) {
1050 if (!(txq->tx_rsq =
1051 (uint16_t *) malloc(sizeof(uint16_t) * sc->scctx->isc_ntxd[0], M_ICE, M_NOWAIT))) {
1052 device_printf(sc->dev, "Unable to allocate tx_rsq memory\n");
1057 for (j = 0; j < sc->scctx->isc_ntxd[0]; j++)
1058 txq->tx_rsq[j] = QIDX_INVALID;
1062 err = ice_resmgr_assign_contiguous(&sc->tx_qmgr, vsi->tx_qmap, ntxqsets);
1064 device_printf(sc->dev, "Unable to assign PF queues: %s\n",
1068 vsi->qmap_type = ICE_RESMGR_ALLOC_CONTIGUOUS;
1073 for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) {
1075 txq->me = txq->q_handle = i;
1076 txq->vsi = vsi;
1079 txq->desc_count = sc->scctx->isc_ntxd[0];
1082 txq->tail = QTX_COMM_DBELL(vsi->tx_qmap[i]);
1083 txq->tx_base = (struct ice_tx_desc *)vaddrs[i];
1084 txq->tx_paddr = paddrs[i];
1089 vsi->num_tx_queues = ntxqsets;
1094 for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) {
1095 if (txq->tx_rsq != NULL) {
1096 free(txq->tx_rsq, M_ICE);
1097 txq->tx_rsq = NULL;
1100 free(vsi->tx_queues, M_ICE);
1101 vsi->tx_queues = NULL;
1106 * ice_if_rx_queues_alloc - Allocate Rx queue memory
1121 struct ice_vsi *vsi = &sc->pf_vsi;
1126 MPASS(sc->scctx->isc_nrxd[0] <= ICE_MAX_DESC_COUNT);
1130 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
1134 if (!(vsi->rx_queues =
1136 device_printf(sc->dev, "Unable to allocate Rx queue memory\n");
1141 err = ice_resmgr_assign_contiguous(&sc->rx_qmgr, vsi->rx_qmap, nrxqsets);
1143 device_printf(sc->dev, "Unable to assign PF queues: %s\n",
1147 vsi->qmap_type = ICE_RESMGR_ALLOC_CONTIGUOUS;
1152 for (i = 0, rxq = vsi->rx_queues; i < nrxqsets; i++, rxq++) {
1153 rxq->me = i;
1154 rxq->vsi = vsi;
1157 rxq->desc_count = sc->scctx->isc_nrxd[0];
1160 rxq->tail = QRX_TAIL(vsi->rx_qmap[i]);
1161 rxq->rx_base = (union ice_32b_rx_flex_desc *)vaddrs[i];
1162 rxq->rx_paddr = paddrs[i];
1167 vsi->num_rx_queues = nrxqsets;
1172 free(vsi->rx_queues, M_ICE);
1173 vsi->rx_queues = NULL;
1178 * ice_if_queues_free - Free queue memory
1198 struct ice_vsi *vsi = &sc->pf_vsi;
1210 /* Release MSI-X IRQ vectors, if not yet released in ice_if_detach */
1213 if (vsi->tx_queues != NULL) {
1215 for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) {
1216 if (txq->tx_rsq != NULL) {
1217 free(txq->tx_rsq, M_ICE);
1218 txq->tx_rsq = NULL;
1221 free(vsi->tx_queues, M_ICE);
1222 vsi->tx_queues = NULL;
1223 vsi->num_tx_queues = 0;
1225 if (vsi->rx_queues != NULL) {
1226 free(vsi->rx_queues, M_ICE);
1227 vsi->rx_queues = NULL;
1228 vsi->num_rx_queues = 0;
1233 * ice_msix_que - Fast interrupt handler for MSI-X receive queues
1236 * Interrupt filter function for iflib MSI-X interrupts. Called by iflib when
1237 * an MSI-X interrupt for a given queue is triggered. Currently this just asks
1251 * ice_msix_admin - Fast interrupt handler for MSI-X admin interrupt
1262 struct ice_hw *hw = &sc->hw;
1263 device_t dev = sc->dev;
1270 * vector will not be re-enabled until after we exit this function,
1289 ice_set_state(&sc->state, ICE_STATE_CONTROLQ_EVENT_PENDING);
1292 ice_set_state(&sc->state, ICE_STATE_VFLR_PENDING);
1296 ice_set_state(&sc->state, ICE_STATE_MDD_PENDING);
1306 sc->soft_stats.corer_count++;
1308 sc->soft_stats.globr_count++;
1310 sc->soft_stats.empr_count++;
1316 * happen. Second, we set hw->reset_ongoing to indicate that
1325 if (!ice_testandset_state(&sc->state, ICE_STATE_RESET_OICR_RECV)) {
1326 hw->reset_ongoing = true;
1329 * goes down and then up. The below if-statement prevents a second
1332 if (if_getflags(sc->ifp) & IFF_UP)
1333 ice_set_state(&sc->state, ICE_STATE_FIRST_INIT_LINK);
1339 ice_set_state(&sc->state, ICE_STATE_RESET_PFR_REQ);
1351 ice_set_state(&sc->state, ICE_STATE_RESET_PFR_REQ);
1358 * ice_allocate_msix - Allocate MSI-X vectors for the interface
1361 * Map the MSI-X bar, and then request MSI-X vectors in a two-stage process.
1373 * IFLIB_SKIP_MSIX flag indicating that the driver will manage MSI-X vectors
1376 * @remark This driver will only use MSI-X vectors. If this is not possible,
1379 * @remark if it exists, os_imgr is initialized here for keeping track of
1391 if_softc_ctx_t scctx = sc->scctx;
1392 device_t dev = sc->dev;
1398 /* Allocate the MSI-X bar */
1399 bar = scctx->isc_msix_bar;
1400 sc->msix_table = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar, RF_ACTIVE);
1401 if (!sc->msix_table) {
1402 device_printf(dev, "Unable to map MSI-X table\n");
1407 if (sc->ifc_sysctl_ntxqs || sc->ifc_sysctl_nrxqs)
1439 queues = imin(queues, sc->ifc_sysctl_ntxqs ?: scctx->isc_ntxqsets);
1440 queues = imin(queues, sc->ifc_sysctl_nrxqs ?: scctx->isc_nrxqsets);
1442 if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_RDMA)) {
1463 if_ctx_t ctx = sc->ctx;
1470 device_printf(dev, "Failed to allocate %d MSI-X vectors, err %s\n",
1477 int diff = requested - vectors;
1479 device_printf(dev, "Requested %d MSI-X vectors, but got only %d\n",
1492 rdma -= diff;
1496 ice_clear_bit(ICE_FEATURE_RDMA, sc->feat_cap);
1497 diff -= rdma;
1509 device_printf(dev, "Unable to allocate sufficient MSI-X vectors\n");
1514 queues -= diff;
1519 device_printf(dev, "Reserving %d MSI-X interrupts for iRDMA\n",
1521 device_printf(dev, "Using MSI-X interrupts with %d vectors\n",
1525 scctx->isc_vectors = vectors;
1526 scctx->isc_nrxqsets = queues;
1527 scctx->isc_ntxqsets = queues;
1528 scctx->isc_intr = IFLIB_INTR_MSIX;
1530 sc->irdma_vectors = rdma;
1535 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
1539 sc->lan_vectors = vectors - rdma;
1540 sc->lan_vectors -= extra_vectors;
1541 err = ice_resmgr_assign_contiguous(&sc->dev_imgr, sc->pf_imap, sc->lan_vectors);
1547 err = ice_resmgr_assign_contiguous(&sc->dev_imgr, sc->rdma_imap, rdma);
1553 sc->extra_vectors = extra_vectors;
1558 err = ice_resmgr_init(&sc->os_imgr, sc->extra_vectors);
1562 ice_resmgr_release_map(&sc->dev_imgr, sc->rdma_imap,
1569 ice_resmgr_release_map(&sc->dev_imgr, sc->pf_imap,
1570 sc->lan_vectors);
1574 if (sc->msix_table != NULL) {
1575 bus_release_resource(sc->dev, SYS_RES_MEMORY,
1576 rman_get_rid(sc->msix_table),
1577 sc->msix_table);
1578 sc->msix_table = NULL;
1585 * ice_if_msix_intr_assign - Assign MSI-X interrupt vectors to queues
1589 * Called by iflib to assign MSI-X vectors to queues. Currently requires that
1599 struct ice_vsi *vsi = &sc->pf_vsi;
1604 if (vsi->num_rx_queues != vsi->num_tx_queues) {
1605 device_printf(sc->dev,
1607 vsi->num_tx_queues, vsi->num_rx_queues);
1611 if (msix < (vsi->num_rx_queues + 1)) {
1612 device_printf(sc->dev,
1613 "Not enough MSI-X vectors to assign one vector to each queue pair\n");
1618 sc->num_irq_vectors = vsi->num_rx_queues + 1;
1621 if (!(sc->irqvs =
1622 (struct ice_irq_vector *) malloc(sizeof(struct ice_irq_vector) * (sc->num_irq_vectors),
1624 device_printf(sc->dev,
1630 err = iflib_irq_alloc_generic(ctx, &sc->irqvs[0].irq, 1, IFLIB_INTR_ADMIN,
1633 device_printf(sc->dev,
1638 sc->irqvs[0].me = 0;
1641 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
1645 for (i = 0, vector = 1; i < vsi->num_rx_queues; i++, vector++) {
1646 struct ice_rx_queue *rxq = &vsi->rx_queues[i];
1647 struct ice_tx_queue *txq = &vsi->tx_queues[i];
1653 err = iflib_irq_alloc_generic(ctx, &sc->irqvs[vector].irq, rid,
1655 rxq, rxq->me, irq_name);
1657 device_printf(sc->dev,
1660 vector--;
1661 i--;
1664 sc->irqvs[vector].me = vector;
1665 rxq->irqv = &sc->irqvs[vector];
1670 iflib_softirq_alloc_generic(ctx, &sc->irqvs[vector].irq,
1672 txq->me, irq_name);
1673 txq->irqv = &sc->irqvs[vector];
1677 sc->last_rid = rid + sc->irdma_vectors;
1681 for (; i >= 0; i--, vector--)
1682 iflib_irq_free(ctx, &sc->irqvs[vector].irq);
1683 iflib_irq_free(ctx, &sc->irqvs[0].irq);
1685 free(sc->irqvs, M_ICE);
1686 sc->irqvs = NULL;
1691 * ice_if_mtu_set - Set the device MTU
1707 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
1713 sc->scctx->isc_max_frame_size = mtu +
1716 sc->pf_vsi.max_frame_size = sc->scctx->isc_max_frame_size;
1722 * ice_if_intr_enable - Enable device interrupts
1731 struct ice_vsi *vsi = &sc->pf_vsi;
1732 struct ice_hw *hw = &sc->hw;
1737 ice_enable_intr(hw, sc->irqvs[0].me);
1740 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
1744 for (int i = 0; i < vsi->num_rx_queues; i++)
1745 ice_enable_intr(hw, vsi->rx_queues[i].irqv->me);
1749 * ice_if_intr_disable - Disable device interrupts
1758 struct ice_hw *hw = &sc->hw;
1772 for (i = 1; i < hw->func_caps.common_cap.num_msix_vectors; i++)
1777 * ice_if_rx_queue_intr_enable - Enable a specific Rx queue interrupt
1789 struct ice_vsi *vsi = &sc->pf_vsi;
1790 struct ice_hw *hw = &sc->hw;
1793 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
1796 ice_enable_intr(hw, vsi->rx_queues[rxqid].irqv->me);
1801 * ice_if_tx_queue_intr_enable - Enable a specific Tx queue interrupt
1813 struct ice_vsi *vsi = &sc->pf_vsi;
1814 struct ice_hw *hw = &sc->hw;
1817 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
1820 ice_enable_intr(hw, vsi->tx_queues[txqid].irqv->me);
1825 * ice_set_default_promisc_mask - Set default config for promisc settings
1831 * non-VLAN-tagged/VLAN 0 traffic.
1844 * ice_if_promisc_set - Set device promiscuous mode
1856 struct ice_hw *hw = &sc->hw;
1857 device_t dev = sc->dev;
1864 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
1873 status = ice_set_vsi_promisc(hw, sc->pf_vsi.idx,
1879 ice_aq_str(hw->adminq.sq_last_status));
1883 status = ice_clear_vsi_promisc(hw, sc->pf_vsi.idx,
1889 ice_aq_str(hw->adminq.sq_last_status));
1898 * ice_if_media_change - Change device media
1909 device_printf(sc->dev, "Media change is not supported.\n");
1914 * ice_if_media_status - Report current device media
1925 struct ice_link_status *li = &sc->hw.port_info->phy.link_info;
1927 ifmr->ifm_status = IFM_AVALID;
1928 ifmr->ifm_active = IFM_ETHER;
1931 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
1934 if (!sc->link_up)
1937 ifmr->ifm_status |= IFM_ACTIVE;
1938 ifmr->ifm_active |= IFM_FDX;
1940 if (li->phy_type_low)
1941 ifmr->ifm_active |= ice_get_phy_type_low(li->phy_type_low);
1942 else if (li->phy_type_high)
1943 ifmr->ifm_active |= ice_get_phy_type_high(li->phy_type_high);
1945 ifmr->ifm_active |= IFM_UNKNOWN;
1948 if (li->an_info & ICE_AQ_LINK_PAUSE_TX)
1949 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1950 if (li->an_info & ICE_AQ_LINK_PAUSE_RX)
1951 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1955 * ice_init_tx_tracking - Initialize Tx queue software tracking values
1968 for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) {
1970 txq->tx_rs_cidx = txq->tx_rs_pidx = 0;
1974 * off-by-one error in ice_ift_txd_credits_update for the
1977 txq->tx_cidx_processed = txq->desc_count - 1;
1979 for (j = 0; j < txq->desc_count; j++)
1980 txq->tx_rsq[j] = QIDX_INVALID;
1985 * ice_update_rx_mbuf_sz - Update the Rx buffer size for all queues
1994 uint32_t mbuf_sz = iflib_get_rx_mbuf_sz(sc->ctx);
1995 struct ice_vsi *vsi = &sc->pf_vsi;
1998 vsi->mbuf_sz = mbuf_sz;
2002 * ice_if_init - Initialize the device
2015 device_t dev = sc->dev;
2030 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
2033 if (ice_test_state(&sc->state, ICE_STATE_RESET_FAILED)) {
2034 device_printf(sc->dev, "request to start interface cannot be completed as the device failed to reset\n");
2038 if (ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) {
2039 device_printf(sc->dev, "request to start interface while device is prepared for impending reset\n");
2055 ice_init_tx_tracking(&sc->pf_vsi);
2057 err = ice_cfg_vsi_for_tx(&sc->pf_vsi);
2065 err = ice_cfg_vsi_for_rx(&sc->pf_vsi);
2073 err = ice_control_all_rx_queues(&sc->pf_vsi, true);
2092 ice_configure_all_rxq_interrupts(&sc->pf_vsi);
2093 ice_configure_rx_itr(&sc->pf_vsi);
2096 ice_if_promisc_set(ctx, if_getflags(sc->ifp));
2098 if (!ice_testandclear_state(&sc->state, ICE_STATE_FIRST_INIT_LINK))
2099 if (!sc->link_up && ((if_getflags(sc->ifp) & IFF_UP) ||
2100 ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN)))
2105 ice_set_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED);
2107 if (sc->mirr_if && ice_testandclear_state(&mif->state, ICE_STATE_SUBIF_NEEDS_REINIT)) {
2108 ice_clear_state(&mif->state, ICE_STATE_DRIVER_INITIALIZED);
2109 iflib_request_reset(sc->mirr_if->subctx);
2110 iflib_admin_intr_deferred(sc->mirr_if->subctx);
2116 ice_control_all_rx_queues(&sc->pf_vsi, false);
2118 ice_vsi_disable_tx(&sc->pf_vsi);
2122 * ice_poll_for_media_avail - Re-enable link if media is detected
2126 * sends the Get Link Status AQ command and re-enables HW link if the
2136 struct ice_hw *hw = &sc->hw;
2137 struct ice_port_info *pi = hw->port_info;
2144 ice_test_state(&sc->state, ICE_STATE_PHY_FW_INIT_PENDING)) {
2146 ice_clear_state(&sc->state, ICE_STATE_PHY_FW_INIT_PENDING);
2151 if (ice_test_state(&sc->state, ICE_STATE_NO_MEDIA)) {
2152 pi->phy.get_link_info = true;
2153 ice_get_link_status(pi, &sc->link_up);
2155 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2158 /* Re-enable link and re-apply user link settings */
2159 if (ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) ||
2160 (if_getflags(sc->ifp) & IFF_UP)) {
2164 status = ice_add_media_types(sc, sc->media);
2166 device_printf(sc->dev,
2169 ice_aq_str(hw->adminq.sq_last_status));
2172 ice_clear_state(&sc->state, ICE_STATE_NO_MEDIA);
2178 * ice_if_timer - called by iflib periodically
2191 uint64_t prev_link_xoff_rx = sc->stats.cur.link_xoff_rx;
2197 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
2208 if (sc->stats.cur.link_xoff_rx != prev_link_xoff_rx)
2209 sc->scctx->isc_pause_frames = 1;
2212 ice_update_vsi_hw_stats(&sc->pf_vsi);
2215 if (sc->mirr_if && sc->mirr_if->if_attached)
2216 ice_update_vsi_hw_stats(sc->mirr_if->vsi);
2220 * ice_admin_timer - called periodically to trigger the admin task
2250 iflib_admin_intr_deferred(sc->ctx);
2253 callout_schedule(&sc->admin_timer, hz/2);
2257 * ice_transition_recovery_mode - Transition to recovery mode
2260 * Called when the driver detects that the firmware has entered recovery mode
2266 struct ice_vsi *vsi = &sc->pf_vsi;
2269 device_printf(sc->dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
2272 iflib_link_state_change(sc->ctx, LINK_STATE_DOWN, 0);
2274 /* Request that the device be re-initialized */
2278 ice_clear_bit(ICE_FEATURE_RDMA, sc->feat_cap);
2280 ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_en);
2281 ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_cap);
2286 for (i = 0; i < sc->num_available_vsi; i++) {
2287 if (sc->all_vsi[i])
2288 ice_release_vsi(sc->all_vsi[i]);
2290 sc->num_available_vsi = 0;
2292 if (sc->all_vsi) {
2293 free(sc->all_vsi, M_ICE);
2294 sc->all_vsi = NULL;
2298 ice_resmgr_destroy(&sc->dev_imgr);
2300 ice_resmgr_destroy(&sc->tx_qmgr);
2301 ice_resmgr_destroy(&sc->rx_qmgr);
2303 ice_deinit_hw(&sc->hw);
2307 * ice_transition_safe_mode - Transition to safe mode
2322 ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_cap);
2323 ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_en);
2326 ice_clear_bit(ICE_FEATURE_RDMA, sc->feat_cap);
2328 ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_en);
2329 ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_cap);
2331 ice_clear_bit(ICE_FEATURE_RSS, sc->feat_cap);
2332 ice_clear_bit(ICE_FEATURE_RSS, sc->feat_en);
2336 * ice_if_update_admin_status - update admin status
2355 /* Check if the firmware entered recovery mode at run time */
2356 fw_mode = ice_get_fw_mode(&sc->hw);
2358 if (!ice_testandset_state(&sc->state, ICE_STATE_RECOVERY_MODE)) {
2366 if (!ice_testandset_state(&sc->state, ICE_STATE_ROLLBACK_MODE)) {
2370 ice_print_rollback_msg(&sc->hw);
2383 if (ice_test_state(&sc->state, ICE_STATE_RESET_FAILED) ||
2384 ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET) ||
2385 ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) {
2391 } else if (ice_testandclear_state(&sc->state, ICE_STATE_CONTROLQ_EVENT_PENDING)) {
2396 if (ice_is_generic_mac(&sc->hw)) {
2415 * ourselves. Otherwise, we can just re-enable the interrupt. We'll be
2419 ice_set_state(&sc->state, ICE_STATE_CONTROLQ_EVENT_PENDING);
2422 ice_enable_intr(&sc->hw, sc->irqvs[0].me);
2427 * ice_prepare_for_reset - Prepare device for an impending reset
2438 struct ice_hw *hw = &sc->hw;
2441 if (ice_testandset_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET))
2444 log(LOG_INFO, "%s: preparing to reset device logic\n", if_name(sc->ifp));
2446 /* In recovery mode, hardware is not initialized */
2447 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
2456 ice_resmgr_release_map(&sc->tx_qmgr, sc->pf_vsi.tx_qmap,
2457 sc->pf_vsi.num_tx_queues);
2458 ice_resmgr_release_map(&sc->rx_qmgr, sc->pf_vsi.rx_qmap,
2459 sc->pf_vsi.num_rx_queues);
2460 if (sc->mirr_if) {
2461 ice_resmgr_release_map(&sc->tx_qmgr, sc->mirr_if->vsi->tx_qmap,
2462 sc->mirr_if->num_irq_vectors);
2463 ice_resmgr_release_map(&sc->rx_qmgr, sc->mirr_if->vsi->rx_qmap,
2464 sc->mirr_if->num_irq_vectors);
2469 if (hw->port_info)
2476 * ice_rebuild_pf_vsi_qmap - Rebuild the main PF VSI queue mapping
2485 struct ice_vsi *vsi = &sc->pf_vsi;
2490 /* Re-assign Tx queues from PF space to the main VSI */
2491 err = ice_resmgr_assign_contiguous(&sc->tx_qmgr, vsi->tx_qmap,
2492 vsi->num_tx_queues);
2494 device_printf(sc->dev, "Unable to re-assign PF Tx queues: %s\n",
2499 /* Re-assign Rx queues from PF space to this VSI */
2500 err = ice_resmgr_assign_contiguous(&sc->rx_qmgr, vsi->rx_qmap,
2501 vsi->num_rx_queues);
2503 device_printf(sc->dev, "Unable to re-assign PF Rx queues: %s\n",
2508 vsi->qmap_type = ICE_RESMGR_ALLOC_CONTIGUOUS;
2510 /* Re-assign Tx queue tail pointers */
2511 for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++)
2512 txq->tail = QTX_COMM_DBELL(vsi->tx_qmap[i]);
2514 /* Re-assign Rx queue tail pointers */
2515 for (i = 0, rxq = vsi->rx_queues; i < vsi->num_rx_queues; i++, rxq++)
2516 rxq->tail = QRX_TAIL(vsi->rx_qmap[i]);
2521 ice_resmgr_release_map(&sc->tx_qmgr, sc->pf_vsi.tx_qmap,
2522 sc->pf_vsi.num_tx_queues);
2531 * ice_rebuild_recovery_mode - Rebuild driver state while in recovery mode
2540 device_t dev = sc->dev;
2549 ice_enable_intr(&sc->hw, sc->irqvs[0].me);
2552 ice_clear_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET);
2554 log(LOG_INFO, "%s: device rebuild successful\n", if_name(sc->ifp));
2559 * the iflib core, we also want re-run the admin task so that iflib
2568 * ice_rebuild - Rebuild driver state post reset
2572 * the hardware port, and re-enable the VSIs.
2577 struct ice_hw *hw = &sc->hw;
2578 device_t dev = sc->dev;
2583 sc->rebuild_ticks = ticks;
2586 ice_clear_state(&sc->state, ICE_STATE_RESET_FAILED);
2589 * If the firmware is in recovery mode, only restore the limited
2592 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) {
2602 device_printf(dev, "failed to re-init controlqs, err %s\n",
2613 ice_aq_str(hw->adminq.sq_last_status));
2617 /* Re-enable FW logging. Keep going even if this fails */
2619 if (hw->pf_id == 0)
2620 status = ice_fwlog_set(hw, &hw->fwlog_cfg);
2626 * enabled pre-rebuild.
2628 if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED) {
2631 device_printf(dev, "failed to re-register fw logging, err %s aq_err %s\n",
2633 ice_aq_str(hw->adminq.sq_last_status));
2638 ice_aq_str(hw->adminq.sq_last_status));
2669 status = ice_sched_init_port(hw->port_info);
2677 if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE)) {
2678 pkg_state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
2689 device_printf(sc->dev, "Unable to re-assign main VSI queues, err %s\n",
2693 err = ice_initialize_vsi(&sc->pf_vsi);
2695 device_printf(sc->dev, "Unable to re-initialize Main VSI, err %s\n",
2705 /* Re-enable FW health event reporting */
2709 err = ice_config_rss(&sc->pf_vsi);
2711 device_printf(sc->dev,
2717 if (hw->port_info->qos_cfg.is_sw_lldp)
2723 ice_clear_state(&sc->state, ICE_STATE_LINK_STATUS_REPORTED);
2726 /* RDMA interface will be restarted by the stack re-init */
2732 ice_enable_intr(&sc->hw, sc->irqvs[0].me);
2735 ice_clear_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET);
2738 if (sc->mirr_if) {
2744 log(LOG_INFO, "%s: device rebuild successful\n", if_name(sc->ifp));
2749 * the iflib core, we also want re-run the admin task so that iflib
2754 if (hw->port_info->qos_cfg.is_sw_lldp)
2762 ice_deinit_vsi(&sc->pf_vsi);
2764 ice_resmgr_release_map(&sc->tx_qmgr, sc->pf_vsi.tx_qmap,
2765 sc->pf_vsi.num_tx_queues);
2766 ice_resmgr_release_map(&sc->rx_qmgr, sc->pf_vsi.rx_qmap,
2767 sc->pf_vsi.num_rx_queues);
2772 ice_clear_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET);
2773 ice_set_state(&sc->state, ICE_STATE_RESET_FAILED);
2778 * ice_handle_reset_event - Handle reset events triggered by OICR
2783 * firmware.
2791 struct ice_hw *hw = &sc->hw;
2793 device_t dev = sc->dev;
2800 if (!ice_testandclear_state(&sc->state, ICE_STATE_RESET_OICR_RECV))
2823 ice_set_state(&sc->state, ICE_STATE_RESET_FAILED);
2824 ice_clear_state(&sc->state, ICE_STATE_RESET_PFR_REQ);
2825 ice_clear_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET);
2831 sc->hw.reset_ongoing = false;
2838 if (ice_testandclear_state(&sc->state, ICE_STATE_RESET_PFR_REQ))
2843 * ice_handle_pf_reset_request - Initiate PF reset requested by software
2857 struct ice_hw *hw = &sc->hw;
2861 if (!ice_testandclear_state(&sc->state, ICE_STATE_RESET_PFR_REQ))
2875 device_printf(sc->dev, "device PF reset failed, err %s\n",
2877 ice_set_state(&sc->state, ICE_STATE_RESET_FAILED);
2881 sc->soft_stats.pfr_count++;
2886 * ice_init_device_features - Init device driver features
2895 struct ice_hw *hw = &sc->hw;
2898 ice_set_bit(ICE_FEATURE_SRIOV, sc->feat_cap);
2899 ice_set_bit(ICE_FEATURE_RSS, sc->feat_cap);
2900 ice_set_bit(ICE_FEATURE_RDMA, sc->feat_cap);
2901 ice_set_bit(ICE_FEATURE_LENIENT_LINK_MODE, sc->feat_cap);
2902 ice_set_bit(ICE_FEATURE_LINK_MGMT_VER_1, sc->feat_cap);
2903 ice_set_bit(ICE_FEATURE_LINK_MGMT_VER_2, sc->feat_cap);
2904 ice_set_bit(ICE_FEATURE_HEALTH_STATUS, sc->feat_cap);
2905 ice_set_bit(ICE_FEATURE_FW_LOGGING, sc->feat_cap);
2906 ice_set_bit(ICE_FEATURE_HAS_PBA, sc->feat_cap);
2907 ice_set_bit(ICE_FEATURE_DCB, sc->feat_cap);
2908 ice_set_bit(ICE_FEATURE_TX_BALANCE, sc->feat_cap);
2909 ice_set_bit(ICE_FEATURE_PHY_STATISTICS, sc->feat_cap);
2912 ice_set_bit(ICE_FEATURE_PHY_STATISTICS, sc->feat_en);
2915 ice_set_bit(ICE_FEATURE_DUAL_NAC, sc->feat_cap);
2917 if (!hw->func_caps.common_cap.rss_table_size)
2918 ice_clear_bit(ICE_FEATURE_RSS, sc->feat_cap);
2919 if (!hw->func_caps.common_cap.iwarp || !ice_enable_irdma)
2920 ice_clear_bit(ICE_FEATURE_RDMA, sc->feat_cap);
2921 if (!hw->func_caps.common_cap.dcb)
2922 ice_clear_bit(ICE_FEATURE_DCB, sc->feat_cap);
2923 /* Disable features due to firmware limitations... */
2925 ice_clear_bit(ICE_FEATURE_HEALTH_STATUS, sc->feat_cap);
2927 ice_clear_bit(ICE_FEATURE_FW_LOGGING, sc->feat_cap);
2928 if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED) {
2929 if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_FW_LOGGING))
2930 ice_set_bit(ICE_FEATURE_FW_LOGGING, sc->feat_en);
2936 ice_disable_unsupported_features(sc->feat_cap);
2939 if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_RSS))
2940 ice_set_bit(ICE_FEATURE_RSS, sc->feat_en);
2944 ice_clear_bit(ICE_FEATURE_TX_BALANCE, sc->feat_cap);
2946 if (hw->dev_caps.supported_sensors & ICE_SENSOR_SUPPORT_E810_INT_TEMP) {
2947 ice_set_bit(ICE_FEATURE_TEMP_SENSOR, sc->feat_cap);
2948 ice_set_bit(ICE_FEATURE_TEMP_SENSOR, sc->feat_en);
2951 if (hw->func_caps.common_cap.next_cluster_id_support ||
2952 hw->dev_caps.common_cap.next_cluster_id_support) {
2953 ice_set_bit(ICE_FEATURE_NEXT_CLUSTER_ID, sc->feat_cap);
2954 ice_set_bit(ICE_FEATURE_NEXT_CLUSTER_ID, sc->feat_en);
2959 * ice_if_multi_set - Callback to update Multicast filters in HW
2977 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
2982 device_printf(sc->dev,
2990 * ice_if_vlan_register - Register a VLAN with the hardware
3007 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
3010 status = ice_add_vlan_hw_filter(&sc->pf_vsi, vtag);
3012 device_printf(sc->dev,
3015 ice_aq_str(sc->hw.adminq.sq_last_status));
3020 * ice_if_vlan_unregister - Remove a VLAN filter from the hardware
3037 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
3040 status = ice_remove_vlan_hw_filter(&sc->pf_vsi, vtag);
3042 device_printf(sc->dev,
3045 ice_aq_str(sc->hw.adminq.sq_last_status));
3050 * ice_if_stop - Stop the device
3075 * been initialized. It will be set when ice_if_init is called, and
3078 if (!ice_testandclear_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED))
3081 if (ice_test_state(&sc->state, ICE_STATE_RESET_FAILED)) {
3082 device_printf(sc->dev, "request to stop interface cannot be completed as the device failed to reset\n");
3086 if (ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) {
3087 device_printf(sc->dev, "request to stop interface while device is prepared for impending reset\n");
3101 ice_flush_txq_interrupts(&sc->pf_vsi);
3102 ice_flush_rxq_interrupts(&sc->pf_vsi);
3105 ice_vsi_disable_tx(&sc->pf_vsi);
3106 ice_control_all_rx_queues(&sc->pf_vsi, false);
3108 if (!ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) &&
3109 !(if_getflags(sc->ifp) & IFF_UP) && sc->link_up)
3112 if (sc->mirr_if && ice_test_state(&mif->state, ICE_STATE_SUBIF_NEEDS_REINIT)) {
3113 ice_subif_if_stop(sc->mirr_if->subctx);
3114 device_printf(sc->dev, "The subinterface also comes down and up after reset\n");
3119 * ice_if_get_counter - Get current value of an ifnet statistic
3133 return ice_get_ifnet_counter(&sc->pf_vsi, counter);
3137 * ice_request_stack_reinit - Request that iflib re-initialize
3140 * Request that the device be brought down and up, to re-initialize. For
3142 * queues need to be re-initialized.
3145 * re-initialized if we need to resart Tx and Rx queues.
3150 if (CTX_ACTIVE(sc->ctx)) {
3151 iflib_request_reset(sc->ctx);
3152 iflib_admin_intr_deferred(sc->ctx);
3157 * ice_driver_is_detaching - Check if the driver is detaching/unloading
3168 * detach-based race conditions as it is possible for a thread to race with
3174 return (ice_test_state(&sc->state, ICE_STATE_DETACHING) ||
3175 iflib_in_detach(sc->ctx));
3179 * ice_if_priv_ioctl - Device private ioctl handler
3193 device_t dev = sc->dev;
3226 switch (ifd->ifd_cmd) {
3237 * ice_if_i2c_req - I2C request handler for iflib
3243 * @remark The iflib-only part is pretty simple.
3254 * ice_if_suspend - PCI device suspend handler for iflib
3268 * either via FLR or during the D3->D0 transition.
3270 ice_clear_state(&sc->state, ICE_STATE_RESET_PFR_REQ);
3278 * ice_if_resume - PCI device resume handler for iflib
3297 * ice_if_needs_restart - Tell iflib when the driver needs to be reinitialized
3312 if (!ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) &&
3313 !(if_getflags(sc->ifp) & IFF_UP))
3321 * ice_init_link - Do link configuration and link status reporting
3330 struct ice_hw *hw = &sc->hw;
3331 device_t dev = sc->dev;
3338 ice_set_state(&sc->state, ICE_STATE_PHY_FW_INIT_PENDING);
3345 ice_clear_state(&sc->state, ICE_STATE_PHY_FW_INIT_PENDING);
3407 * - isc_admin_intrcnt is set to 0
3408 * - Uses subif iflib driver methods
3409 * - Flagged as a VF for iflib
3454 if_softc_ctx_t scctx = mif->subscctx;
3456 scctx->isc_txrx = &ice_subif_txrx;
3458 scctx->isc_capenable = ICE_FULL_CAPS;
3459 scctx->isc_tx_csum_flags = ICE_CSUM_OFFLOAD;
3461 scctx->isc_ntxqsets = 4;
3462 scctx->isc_nrxqsets = 4;
3463 scctx->isc_vectors = scctx->isc_nrxqsets;
3465 scctx->isc_ntxqsets_max = 256;
3466 scctx->isc_nrxqsets_max = 256;
3468 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
3470 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
3473 scctx->isc_tx_nsegments = ICE_MAX_TX_SEGS;
3474 scctx->isc_tx_tso_segments_max = ICE_MAX_TSO_SEGS;
3475 scctx->isc_tx_tso_size_max = ICE_TSO_SIZE;
3476 scctx->isc_tx_tso_segsize_max = ICE_MAX_DMA_SEG_SIZE;
3485 mif->subctx = ctx;
3486 mif->subdev = dev;
3487 mif->subscctx = iflib_get_softc_ctx(ctx);
3502 * ice_destroy_mirror_interface - destroy mirror interface
3508 * @pre: Mirror interface already exists and is initialized.
3513 struct ice_mirr_if *mif = sc->mirr_if;
3514 struct ice_vsi *vsi = mif->vsi;
3518 is_locked = sx_xlocked(sc->iflib_ctx_lock);
3522 if (mif->ifp) {
3523 ret = iflib_device_deregister(mif->subctx);
3525 device_printf(sc->dev,
3532 ret = device_delete_child(sc->dev, mif->subdev);
3535 device_printf(sc->dev,
3543 if (mif->if_imap) {
3544 free(mif->if_imap, M_ICE);
3545 mif->if_imap = NULL;
3547 if (mif->os_imap) {
3548 free(mif->os_imap, M_ICE);
3549 mif->os_imap = NULL;
3554 * - rx_irqvs
3555 * - tx_queues
3556 * - rx_queues
3561 sc->mirr_if = NULL;
3566 * ice_setup_mirror_vsi - Initialize mirror VSI
3577 struct ice_softc *sc = mif->back;
3578 device_t dev = sc->dev;
3588 mif->vsi = vsi;
3592 vsi->num_tx_queues = vsi->num_rx_queues = ICE_DEFAULT_VF_QUEUES;
3595 ret = ice_resmgr_assign_scattered(&sc->tx_qmgr, vsi->tx_qmap,
3596 vsi->num_tx_queues);
3603 ret = ice_resmgr_assign_scattered(&sc->rx_qmgr, vsi->rx_qmap,
3604 vsi->num_rx_queues);
3610 vsi->qmap_type = ICE_RESMGR_ALLOC_SCATTERED;
3611 vsi->max_frame_size = ICE_MAX_FRAME_SIZE;
3630 vsi->mirror_src_vsi = sc->pf_vsi.idx;
3632 ice_debug(&sc->hw, ICE_DBG_INIT,
3634 vsi->mirror_src_vsi, vsi->idx);
3635 ice_debug(&sc->hw, ICE_DBG_INIT, "(HW num: VSI %d to %d)\n",
3636 ice_get_hw_vsi_num(&sc->hw, vsi->mirror_src_vsi),
3637 ice_get_hw_vsi_num(&sc->hw, vsi->idx));
3651 mif->vsi = NULL;
3656 * ice_create_mirror_interface - Initialize mirror interface
3671 device_t dev = sc->dev;
3684 sc->mirr_if = mif;
3685 mif->back = sc;
3688 * - ice_subif_if_tx_queues_alloc
3689 * - ice_subif_if_rx_queues_alloc
3706 mif->subdev = device_add_child(dev, sbuf_data(sb), 0);
3709 if (!mif->subdev) {
3713 sc->mirr_if = NULL;
3718 device_set_driver(mif->subdev, &ice_subif_driver);
3721 * has an initialized softc to pass to iflib
3723 ret = iflib_device_register(mif->subdev, mif, &ice_subif_sctx, &mif->subctx);
3728 mif->ifp = iflib_get_ifp(mif->subctx);
3729 if_setflagbits(mif->ifp, IFF_MONITOR, 0);
3732 media = iflib_get_media(mif->subctx);
3737 device_get_nameunit(mif->subdev), if_name(mif->ifp));
3739 ice_add_vsi_sysctls(mif->vsi);
3745 mif->if_attached = true;
3764 * driver needs to get MSI-X resources from the parent device.
3775 struct ice_softc *sc = mif->back;
3776 struct ice_hw *hw = &sc->hw;
3777 struct ice_vsi *vsi = mif->vsi;
3778 device_t dev = mif->subdev;
3781 if_ctx_t ctx = mif->subctx;
3783 ice_debug(hw, ICE_DBG_INIT, "%s: Last rid: %d\n", __func__, sc->last_rid);
3785 rid = sc->last_rid + 1;
3786 for (i = 0; i < vsi->num_rx_queues; i++, rid++) {
3787 struct ice_rx_queue *rxq = &vsi->rx_queues[i];
3788 struct ice_tx_queue *txq = &vsi->tx_queues[i];
3794 err = iflib_irq_alloc_generic_subctx(sc->ctx, ctx,
3795 &mif->rx_irqvs[i].irq, rid, IFLIB_INTR_RXTX, ice_msix_que,
3796 rxq, rxq->me, irq_name);
3801 i--;
3804 MPASS(rid - 1 > 0);
3806 mif->rx_irqvs[i].me = rid - 1;
3807 rxq->irqv = &mif->rx_irqvs[i];
3811 iflib_softirq_alloc_generic(ctx, &mif->rx_irqvs[i].irq,
3812 IFLIB_INTR_TX, txq, txq->me, irq_name);
3813 txq->irqv = &mif->rx_irqvs[i];
3816 sc->last_rid = rid - 1;
3819 sc->last_rid);
3824 for (; i >= 0; i--)
3825 iflib_irq_free(ctx, &mif->rx_irqvs[i].irq);
3830 * ice_subif_rebuild - Rebuild subinterface post reset
3839 struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(sc->ctx);
3840 struct ice_vsi *vsi = sc->mirr_if->vsi;
3845 device_printf(sc->dev, "Unable to re-assign mirror VSI queues, err %s\n",
3852 device_printf(sc->dev, "Unable to re-initialize mirror VSI, err %s\n",
3859 device_printf(sc->dev,
3865 vsi->mirror_src_vsi = sc->pf_vsi.idx;
3869 device_printf(sc->dev,
3875 ice_set_state(&mif->state, ICE_STATE_SUBIF_NEEDS_REINIT);
3882 ice_resmgr_release_map(&sc->tx_qmgr, vsi->tx_qmap,
3883 sc->mirr_if->num_irq_vectors);
3884 ice_resmgr_release_map(&sc->rx_qmgr, vsi->rx_qmap,
3885 sc->mirr_if->num_irq_vectors);
3891 * ice_subif_rebuild_vsi_qmap - Rebuild the mirror VSI queue mapping
3900 struct ice_vsi *vsi = sc->mirr_if->vsi;
3905 err = ice_resmgr_assign_scattered(&sc->tx_qmgr, vsi->tx_qmap, sc->mirr_if->num_irq_vectors);
3907 device_printf(sc->dev, "Unable to assign mirror VSI Tx queues: %s\n",
3912 err = ice_resmgr_assign_scattered(&sc->rx_qmgr, vsi->rx_qmap, sc->mirr_if->num_irq_vectors);
3914 device_printf(sc->dev, "Unable to assign mirror VSI Rx queues: %s\n",
3919 vsi->qmap_type = ICE_RESMGR_ALLOC_SCATTERED;
3921 /* Re-assign Tx queue tail pointers */
3922 for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++)
3923 txq->tail = QTX_COMM_DBELL(vsi->tx_qmap[i]);
3925 /* Re-assign Rx queue tail pointers */
3926 for (i = 0, rxq = vsi->rx_queues; i < vsi->num_rx_queues; i++, rxq++)
3927 rxq->tail = QRX_TAIL(vsi->rx_qmap[i]);
3932 ice_resmgr_release_map(&sc->tx_qmgr, vsi->tx_qmap, vsi->num_tx_queues);
3938 * ice_subif_if_tx_queues_alloc - Allocate Tx queue memory for subinterfaces
3954 device_t dev = mif->subdev;
3960 MPASS(mif->subscctx->isc_ntxd[0] <= ICE_MAX_DESC_COUNT);
3962 vsi = mif->vsi;
3964 MPASS(vsi->num_tx_queues == ntxqsets);
3967 if (!(vsi->tx_queues =
3975 for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) {
3976 if (!(txq->tx_rsq =
3977 (uint16_t *)malloc(sizeof(uint16_t) * mif->subscctx->isc_ntxd[0], M_ICE, M_NOWAIT))) {
3984 for (j = 0; j < mif->subscctx->isc_ntxd[0]; j++)
3985 txq->tx_rsq[j] = QIDX_INVALID;
3991 for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) {
3993 txq->me = txq->q_handle = i;
3994 txq->vsi = vsi;
3997 txq->desc_count = mif->subscctx->isc_ntxd[0];
4000 txq->tail = QTX_COMM_DBELL(vsi->tx_qmap[i]);
4001 txq->tx_base = (struct ice_tx_desc *)vaddrs[i];
4002 txq->tx_paddr = paddrs[i];
4010 for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) {
4011 if (txq->tx_rsq != NULL) {
4012 free(txq->tx_rsq, M_ICE);
4013 txq->tx_rsq = NULL;
4016 free(vsi->tx_queues, M_ICE);
4017 vsi->tx_queues = NULL;
4022 * ice_subif_if_rx_queues_alloc - Allocate Rx queue memory for subinterfaces
4038 device_t dev = mif->subdev;
4044 MPASS(mif->subscctx->isc_nrxd[0] <= ICE_MAX_DESC_COUNT);
4046 vsi = mif->vsi;
4048 MPASS(vsi->num_rx_queues == nrxqsets);
4051 if (!(vsi->rx_queues =
4061 for (i = 0, rxq = vsi->rx_queues; i < nrxqsets; i++, rxq++) {
4062 rxq->me = i;
4063 rxq->vsi = vsi;
4066 rxq->desc_count = mif->subscctx->isc_nrxd[0];
4069 rxq->tail = QRX_TAIL(vsi->rx_qmap[i]);
4070 rxq->rx_base = (union ice_32b_rx_flex_desc *)vaddrs[i];
4071 rxq->rx_paddr = paddrs[i];
4080 * ice_subif_if_msix_intr_assign - Assign MSI-X interrupts to new sub interface
4084 * Allocates and assigns driver private resources for MSI-X interrupt tracking.
4086 * @pre OS MSI-X resources have been pre-allocated by parent interface.
4092 struct ice_softc *sc = mif->back;
4093 struct ice_vsi *vsi = mif->vsi;
4095 device_t dev = mif->subdev;
4098 if (vsi->num_rx_queues != vsi->num_tx_queues) {
4101 vsi->num_tx_queues, vsi->num_rx_queues);
4105 if (msix > sc->extra_vectors) {
4107 "%s: Not enough spare (%d) msix vectors for new sub-interface requested (%d)\n",
4108 __func__, sc->extra_vectors, msix);
4111 device_printf(dev, "%s: Using %d vectors for sub-interface\n", __func__,
4115 mif->num_irq_vectors = vsi->num_rx_queues;
4116 mif->rx_irqvs = (struct ice_irq_vector *)
4117 malloc(sizeof(struct ice_irq_vector) * (mif->num_irq_vectors),
4119 if (!mif->rx_irqvs) {
4122 mif->num_irq_vectors);
4127 if (!(mif->if_imap =
4128 (u16 *)malloc(sizeof(u16) * mif->num_irq_vectors,
4134 ret = ice_resmgr_assign_contiguous(&sc->dev_imgr, mif->if_imap, mif->num_irq_vectors);
4141 if (!(mif->os_imap =
4142 (u16 *)malloc(sizeof(u16) * mif->num_irq_vectors,
4148 ret = ice_resmgr_assign_contiguous(&sc->os_imgr, mif->os_imap, mif->num_irq_vectors);
4158 free(mif->if_imap, M_ICE);
4159 mif->if_imap = NULL;
4161 free(mif->rx_irqvs, M_ICE);
4162 mif->rx_irqvs = NULL;
4167 * ice_subif_if_intr_enable - Enable device interrupts for a subinterface
4177 struct ice_softc *sc = mif->back;
4178 struct ice_vsi *vsi = mif->vsi;
4179 struct ice_hw *hw = &sc->hw;
4182 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
4186 for (int i = 0; i < vsi->num_rx_queues; i++)
4187 ice_enable_intr(hw, vsi->rx_queues[i].irqv->me);
4191 * ice_subif_if_rx_queue_intr_enable - Enable a specific Rx queue interrupt
4203 struct ice_softc *sc = mif->back;
4204 struct ice_vsi *vsi = mif->vsi;
4205 struct ice_hw *hw = &sc->hw;
4208 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
4211 ice_enable_intr(hw, vsi->rx_queues[rxqid].irqv->me);
4216 * ice_subif_if_tx_queue_intr_enable - Enable a specific Tx queue interrupt
4228 struct ice_softc *sc = mif->back;
4229 struct ice_vsi *vsi = mif->vsi;
4230 struct ice_hw *hw = &sc->hw;
4233 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
4236 ice_enable_intr(hw, vsi->tx_queues[txqid].irqv->me);
4241 * ice_subif_if_init - Initialize the subinterface
4253 struct ice_softc *sc = mif->back;
4254 struct ice_vsi *vsi = mif->vsi;
4255 device_t dev = mif->subdev;
4261 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
4264 if (ice_test_state(&sc->state, ICE_STATE_RESET_FAILED)) {
4267 device_get_nameunit(sc->dev));
4271 if (ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) {
4274 device_get_nameunit(sc->dev));
4279 vsi->mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
4311 ice_set_state(&mif->state, ICE_STATE_DRIVER_INITIALIZED);
4319 * ice_if_stop_subif - Stop the subinterface
4332 struct ice_softc *sc = mif->back;
4333 struct ice_vsi *vsi = mif->vsi;
4334 device_t dev = mif->subdev;
4336 if (!ice_testandclear_state(&mif->state, ICE_STATE_DRIVER_INITIALIZED))
4339 if (ice_test_state(&sc->state, ICE_STATE_RESET_FAILED)) {
4342 device_get_nameunit(sc->dev));
4346 if (ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) {
4349 device_get_nameunit(sc->dev));
4363 * ice_free_irqvs_subif - Free IRQ vector memory for subinterfaces
4371 struct ice_softc *sc = mif->back;
4372 struct ice_vsi *vsi = mif->vsi;
4373 if_ctx_t ctx = sc->ctx;
4377 if (mif->rx_irqvs == NULL)
4380 /* Free the IRQ vectors -- currently subinterfaces have number
4385 for (i = 0; i < vsi->num_rx_queues; i++)
4386 iflib_irq_free(ctx, &mif->rx_irqvs[i].irq);
4388 ice_resmgr_release_map(&sc->os_imgr, mif->os_imap,
4389 mif->num_irq_vectors);
4390 ice_resmgr_release_map(&sc->dev_imgr, mif->if_imap,
4391 mif->num_irq_vectors);
4393 sc->last_rid -= vsi->num_rx_queues;
4396 for (i = 0; i < vsi->num_rx_queues; i++)
4397 vsi->rx_queues[i].irqv = NULL;
4399 for (i = 0; i < vsi->num_tx_queues; i++)
4400 vsi->tx_queues[i].irqv = NULL;
4403 free(mif->rx_irqvs, M_ICE);
4404 mif->rx_irqvs = NULL;
4408 * ice_subif_if_queues_free - Free queue memory for subinterfaces
4418 struct ice_vsi *vsi = mif->vsi;
4428 /* Release MSI-X IRQ vectors */
4431 if (vsi->tx_queues != NULL) {
4433 for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) {
4434 if (txq->tx_rsq != NULL) {
4435 free(txq->tx_rsq, M_ICE);
4436 txq->tx_rsq = NULL;
4439 free(vsi->tx_queues, M_ICE);
4440 vsi->tx_queues = NULL;
4442 if (vsi->rx_queues != NULL) {
4443 free(vsi->rx_queues, M_ICE);
4444 vsi->rx_queues = NULL;
4449 * ice_subif_if_media_status - Report subinterface media
4461 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
4462 ifmr->ifm_active = IFM_ETHER | IFM_AUTO;
4466 * ice_subif_if_promisc_set - Set subinterface promiscuous mode