Lines Matching +full:phy +full:- +full:input +full:- +full:delay +full:- +full:legacy

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2001-2024, Intel Corporation
6 * Copyright (c) 2021-2024 Rubicon Communications, LLC (Netgate)
51 /* Intel(R) PRO/1000 Network Connection - igc */
53 "Intel(R) Ethernet Controller I225-LM"),
55 "Intel(R) Ethernet Controller I225-V"),
57 "Intel(R) Ethernet Controller I225-K"),
59 "Intel(R) Ethernet Controller I225-IT"),
61 "Intel(R) Ethernet Controller I220-V"),
63 "Intel(R) Ethernet Controller I225-K(2)"),
65 "Intel(R) Ethernet Controller I225-LMvP(2)"),
67 "Intel(R) Ethernet Controller I226-K"),
69 "Intel(R) Ethernet Controller I226-LMvP"),
71 "Intel(R) Ethernet Controller I225-IT(2)"),
73 "Intel(R) Ethernet Controller I226-LM"),
75 "Intel(R) Ethernet Controller I226-V"),
77 "Intel(R) Ethernet Controller I226-IT"),
79 "Intel(R) Ethernet Controller I221-V"),
157 /* MSI-X handlers */
259 /* Energy efficient ethernet - default to OFF */
320 struct igc_hw *hw = &sc->hw;
396 if_softc_ctx_t scctx = sc->shared;
397 struct rx_ring *rxr = &rx_que->rxr;
398 struct tx_ring *txr = &tx_que->txr;
399 int ntxd = scctx->isc_ntxd[0];
400 int nrxd = scctx->isc_nrxd[0];
404 u32 staterr = le32toh(rxr->rx_base[j].wb.upper.status_error);
405 u32 length = le32toh(rxr->rx_base[j].wb.upper.length);
408 j, rxr->rx_base[j].read.buffer_addr, staterr, length);
412 unsigned int *ptr = (unsigned int *)&txr->tx_base[j];
416 j, ptr[0], ptr[1], ptr[2], ptr[3], buf->eop,
417 buf->eop != -1 ?
418 txr->tx_base[buf->eop].upper.fields.status &
473 sc->ctx = sc->osdep.ctx = ctx;
474 sc->dev = sc->osdep.dev = dev;
475 scctx = sc->shared = iflib_get_softc_ctx(ctx);
476 sc->media = iflib_get_media(ctx);
477 hw = &sc->hw;
485 sc->enable_aim = igc_enable_aim;
489 &sc->enable_aim, 0,
550 scctx->isc_tx_nsegments = IGC_MAX_SCATTER;
551 scctx->isc_nrxqsets_max =
552 scctx->isc_ntxqsets_max = igc_set_num_queues(ctx);
555 scctx->isc_ntxqsets_max);
557 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] *
559 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] *
561 scctx->isc_txd_size[0] = sizeof(union igc_adv_tx_desc);
562 scctx->isc_rxd_size[0] = sizeof(union igc_adv_rx_desc);
563 scctx->isc_txrx = &igc_txrx;
564 scctx->isc_tx_tso_segments_max = IGC_MAX_SCATTER;
565 scctx->isc_tx_tso_size_max = IGC_TSO_SIZE;
566 scctx->isc_tx_tso_segsize_max = IGC_TSO_SEG_SIZE;
567 scctx->isc_capabilities = scctx->isc_capenable = IGC_CAPS;
568 scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_TSO |
576 scctx->isc_msix_bar = PCIR_BAR(IGC_MSIX_BAR);
577 if (pci_read_config(dev, scctx->isc_msix_bar, 4) == 0)
578 scctx->isc_msix_bar += 4;
599 hw->mac.autoneg = DO_AUTO_NEG;
600 hw->phy.autoneg_wait_to_complete = false;
601 hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
604 if (hw->phy.media_type == igc_media_type_copper) {
605 hw->phy.mdix = AUTO_ALL_MODES;
612 scctx->isc_max_frame_size = sc->hw.mac.max_frame_size =
616 sc->mta = malloc(sizeof(u8) * ETHER_ADDR_LEN *
618 if (sc->mta == NULL) {
627 device_printf(dev, "PHY reset is blocked"
631 sc->hw.dev_spec._i225.eee_disable = igc_eee_setting;
649 ** Some PCI-E parts fail the first check due to
669 if (!igc_is_valid_ether_addr(hw->mac.addr)) {
681 * Get Wake-on-Lan and Management info for later use
686 scctx->isc_capenable &= ~IFCAP_WOL;
687 if (sc->wol != 0)
688 scctx->isc_capenable |= IFCAP_WOL_MAGIC;
690 iflib_set_mac(ctx, hw->mac.addr);
698 free(sc->mta, M_DEVBUF);
707 struct igc_hw *hw = &sc->hw;
720 hw->mac.get_link_status = true;
735 free(sc->mta, M_DEVBUF);
756 igc_phy_hw_reset(&sc->hw);
809 if (mtu > max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) {
813 scctx->isc_max_frame_size = sc->hw.mac.max_frame_size =
831 if_softc_ctx_t scctx = sc->shared;
839 bcopy(if_getlladdr(ifp), sc->hw.mac.addr,
843 igc_rar_set(&sc->hw, sc->hw.mac.addr, 0);
849 for (i = 0, tx_que = sc->tx_queues; i < sc->tx_num_queues;
851 struct tx_ring *txr = &tx_que->txr;
853 txr->tx_rs_cidx = txr->tx_rs_pidx;
857 * off-by-one error when calculating how many descriptors are
860 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
864 IGC_WRITE_REG(&sc->hw, IGC_VET, ETHERTYPE_VLAN);
872 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
880 igc_clear_hw_cntrs_base_generic(&sc->hw);
882 if (sc->intr_type == IFLIB_INTR_MSIX) /* Set up queue routing */
886 IGC_READ_REG(&sc->hw, IGC_ICR);
887 IGC_WRITE_REG(&sc->hw, IGC_ICS, IGC_ICS_LSC);
893 igc_set_eee_i225(&sc->hw, true, true, true);
911 struct igc_hw *hw = &sc->hw;
919 if ((txr->tx_bytes == 0) && (rxr->rx_bytes == 0))
924 if (sc->enable_aim) {
925 nextlatency = rxr->rx_nextlatency;
927 /* Use half default (4K) ITR if sub-gig */
928 if (sc->link_speed < 1000) {
933 if (sc->shared->isc_max_frame_size * 2 > (sc->pba << 10)) {
935 sc->enable_aim = 0;
940 if (txr->tx_bytes && txr->tx_packets) {
941 bytes = txr->tx_bytes;
942 bytes_packets = txr->tx_bytes/txr->tx_packets;
943 packets = txr->tx_packets;
945 if (rxr->rx_bytes && rxr->rx_packets) {
946 bytes = max(bytes, rxr->rx_bytes);
948 rxr->rx_bytes/rxr->rx_packets);
949 packets = max(packets, rxr->rx_packets);
989 device_printf(sc->dev,
996 if (sc->enable_aim == 1 && nextlatency == eitr_latency_lowest)
1000 rxr->rx_nextlatency = nextlatency;
1004 rxr->rx_nextlatency = nextlatency;
1029 if (neweitr != que->eitr_setting) {
1030 que->eitr_setting = neweitr;
1031 IGC_WRITE_REG(hw, IGC_EITR(que->msix), que->eitr_setting);
1037 * Fast Legacy/MSI Combined Interrupt Service routine
1044 struct igc_hw *hw = &sc->hw;
1045 struct igc_rx_queue *que = &sc->rx_queues[0];
1046 struct tx_ring *txr = &sc->tx_queues[0].txr;
1047 struct rx_ring *rxr = &que->rxr;
1048 if_ctx_t ctx = sc->ctx;
1065 * Only MSI-X interrupts have one-shot behavior by taking advantage
1077 sc->rx_overruns++;
1082 txr->tx_bytes = 0;
1083 txr->tx_packets = 0;
1084 rxr->rx_bytes = 0;
1085 rxr->rx_packets = 0;
1094 struct igc_rx_queue *rxq = &sc->rx_queues[rxqid];
1096 IGC_WRITE_REG(&sc->hw, IGC_EIMS, rxq->eims);
1104 struct igc_tx_queue *txq = &sc->tx_queues[txqid];
1106 IGC_WRITE_REG(&sc->hw, IGC_EIMS, txq->eims);
1112 * MSI-X RX Interrupt Service routine
1119 struct igc_softc *sc = que->sc;
1120 struct tx_ring *txr = &sc->tx_queues[que->msix].txr;
1121 struct rx_ring *rxr = &que->rxr;
1123 ++que->irqs;
1128 txr->tx_bytes = 0;
1129 txr->tx_packets = 0;
1130 rxr->rx_bytes = 0;
1131 rxr->rx_packets = 0;
1138 * MSI-X Link Fast Interrupt Service routine
1147 ++sc->link_irq;
1148 MPASS(sc->hw.back != NULL);
1149 reg_icr = IGC_READ_REG(&sc->hw, IGC_ICR);
1152 sc->rx_overruns++;
1155 igc_handle_link(sc->ctx);
1158 IGC_WRITE_REG(&sc->hw, IGC_IMS, IGC_IMS_LSC);
1159 IGC_WRITE_REG(&sc->hw, IGC_EIMS, sc->link_mask);
1170 sc->hw.mac.get_link_status = true;
1191 ifmr->ifm_status = IFM_AVALID;
1192 ifmr->ifm_active = IFM_ETHER;
1194 if (!sc->link_active) {
1198 ifmr->ifm_status |= IFM_ACTIVE;
1200 switch (sc->link_speed) {
1202 ifmr->ifm_active |= IFM_10_T;
1205 ifmr->ifm_active |= IFM_100_TX;
1208 ifmr->ifm_active |= IFM_1000_T;
1211 ifmr->ifm_active |= IFM_2500_T;
1215 if (sc->link_duplex == FULL_DUPLEX)
1216 ifmr->ifm_active |= IFM_FDX;
1218 ifmr->ifm_active |= IFM_HDX;
1237 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1240 sc->hw.mac.autoneg = DO_AUTO_NEG;
1242 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1244 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1247 sc->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL;
1250 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1253 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1254 sc->hw.phy.autoneg_advertised = ADVERTISE_100_FULL;
1256 sc->hw.phy.autoneg_advertised = ADVERTISE_100_HALF;
1259 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1260 sc->hw.phy.autoneg_advertised = ADVERTISE_10_FULL;
1262 sc->hw.phy.autoneg_advertised = ADVERTISE_10_HALF;
1265 device_printf(sc->dev, "Unsupported media type\n");
1281 reg_rctl = IGC_READ_REG(&sc->hw, IGC_RCTL);
1291 IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl);
1298 IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl);
1302 IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl);
1338 mta = sc->mta;
1343 reg_rctl = IGC_READ_REG(&sc->hw, IGC_RCTL);
1358 igc_update_mc_addr_list(&sc->hw, mta, mcnt);
1360 IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl);
1368 * controller-specific hardware patting.
1385 struct igc_hw *hw = &sc->hw;
1390 /* Get the cached link value or read phy for real */
1391 switch (hw->phy.media_type) {
1393 if (hw->mac.get_link_status == true) {
1394 /* Do the work to read phy */
1396 link_check = !hw->mac.get_link_status;
1402 link_check = !hw->mac.get_link_status;
1409 if (link_check && (sc->link_active == 0)) {
1410 igc_get_speed_and_duplex(hw, &sc->link_speed,
1411 &sc->link_duplex);
1414 sc->link_speed,
1415 ((sc->link_duplex == FULL_DUPLEX) ?
1417 sc->link_active = 1;
1419 IF_Mbps(sc->link_speed));
1420 } else if (!link_check && (sc->link_active == 1)) {
1421 sc->link_speed = 0;
1422 sc->link_duplex = 0;
1423 sc->link_active = 0;
1438 sc->watchdog_events++;
1454 igc_reset_hw(&sc->hw);
1455 IGC_WRITE_REG(&sc->hw, IGC_WUC, 0);
1470 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1473 sc->hw.vendor_id = pci_get_vendor(dev);
1474 sc->hw.device_id = pci_get_device(dev);
1475 sc->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1476 sc->hw.subsystem_vendor_id =
1478 sc->hw.subsystem_device_id =
1482 if (igc_set_mac_type(&sc->hw)) {
1496 sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1498 if (sc->memory == NULL) {
1503 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory);
1504 sc->osdep.mem_bus_space_handle =
1505 rman_get_bushandle(sc->memory);
1506 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
1508 sc->hw.back = &sc->osdep;
1515 * Set up the MSI-X Interrupt handlers
1522 struct igc_rx_queue *rx_que = sc->rx_queues;
1523 struct igc_tx_queue *tx_que = sc->tx_queues;
1528 for (i = 0; i < sc->rx_num_queues; i++, rx_que++, vector++) {
1531 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1532 IFLIB_INTR_RXTX, igc_msix_que, rx_que, rx_que->me, buf);
1537 sc->rx_num_queues = i + 1;
1541 rx_que->msix = vector;
1545 * in IGC_IMS -- bits 20 and 21
1547 * NOTHING to do with the MSI-X vector
1549 rx_que->eims = 1 << vector;
1554 for (i = 0; i < sc->tx_num_queues; i++, tx_que++, vector++) {
1556 tx_que = &sc->tx_queues[i];
1558 &sc->rx_queues[i % sc->rx_num_queues].que_irq,
1559 IFLIB_INTR_TX, tx_que, tx_que->me, buf);
1561 tx_que->msix = (vector % sc->rx_num_queues);
1565 * in IGC_IMS -- bits 22 and 23
1567 * NOTHING to do with the MSI-X vector
1569 tx_que->eims = 1 << i;
1574 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid, IFLIB_INTR_ADMIN,
1582 sc->linkvec = rx_vectors;
1585 iflib_irq_free(ctx, &sc->irq);
1586 rx_que = sc->rx_queues;
1587 for (int i = 0; i < sc->rx_num_queues; i++, rx_que++)
1588 iflib_irq_free(ctx, &rx_que->que_irq);
1595 struct igc_hw *hw = &sc->hw;
1605 /* Turn on MSI-X */
1607 for (int i = 0; i < sc->rx_num_queues; i++) {
1610 rx_que = &sc->rx_queues[i];
1613 ivar |= (rx_que->msix | IGC_IVAR_VALID) << 16;
1616 ivar |= rx_que->msix | IGC_IVAR_VALID;
1621 for (int i = 0; i < sc->tx_num_queues; i++) {
1624 tx_que = &sc->tx_queues[i];
1627 ivar |= (tx_que->msix | IGC_IVAR_VALID) << 24;
1630 ivar |= (tx_que->msix | IGC_IVAR_VALID) << 8;
1633 sc->que_mask |= tx_que->eims;
1637 ivar = (sc->linkvec | IGC_IVAR_VALID) << 8;
1638 sc->link_mask = 1 << sc->linkvec;
1647 for (int i = 0; i < sc->rx_num_queues; i++) {
1648 rx_que = &sc->rx_queues[i];
1649 IGC_WRITE_REG(hw, IGC_EITR(rx_que->msix), newitr);
1659 struct igc_rx_queue *que = sc->rx_queues;
1662 /* Release all MSI-X queue resources */
1663 if (sc->intr_type == IFLIB_INTR_MSIX)
1664 iflib_irq_free(ctx, &sc->irq);
1666 for (int i = 0; i < sc->rx_num_queues; i++, que++) {
1667 iflib_irq_free(ctx, &que->que_irq);
1670 if (sc->memory != NULL) {
1672 rman_get_rid(sc->memory), sc->memory);
1673 sc->memory = NULL;
1676 if (sc->flash != NULL) {
1678 rman_get_rid(sc->flash), sc->flash);
1679 sc->flash = NULL;
1682 if (sc->ioport != NULL) {
1684 rman_get_rid(sc->ioport), sc->ioport);
1685 sc->ioport = NULL;
1689 /* Set up MSI or MSI-X */
1704 device_t dev = sc->dev;
1705 struct igc_hw *hw = &sc->hw;
1711 max_frame_size = sc->shared->isc_max_frame_size;
1713 if (sc->dmac == 0) { /* Disabling it */
1722 hwm = 64 * pba - max_frame_size / 16;
1723 if (hwm < 64 * (pba - 6))
1724 hwm = 64 * (pba - 6);
1731 dmac = pba - max_frame_size / 512;
1732 if (dmac < pba - 10)
1733 dmac = pba - 10;
1751 reg |= ((sc->dmac * 5) >> 6);
1753 reg |= (sc->dmac >> 5);
1765 ** which is 0x4*2 = 0xA. But delay is still 4 usec
1777 IGC_WRITE_REG(hw, IGC_DMCTXTH, (IGC_TXPBSIZE -
1797 struct igc_hw *hw = &sc->hw;
1817 * - High water mark should allow for at least two frames to be
1819 * - Low water mark works best when it is very near the high water
1827 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1830 hw->fc.high_water = rx_buffer_size -
1831 roundup2(sc->hw.mac.max_frame_size, 1024);
1832 /* 16-byte granularity */
1833 hw->fc.low_water = hw->fc.high_water - 16;
1835 if (sc->fc) /* locally set flow control value? */
1836 hw->fc.requested_mode = sc->fc;
1838 hw->fc.requested_mode = igc_fc_full;
1840 hw->fc.pause_time = IGC_FC_PAUSE_TIME;
1842 hw->fc.send_xon = true;
1848 /* and a re-init */
1858 sc->pba = pba;
1874 struct igc_hw *hw = &sc->hw;
1886 * This just allocates buckets to queues using round-robin
1911 queue_id = queue_id % sc->rx_num_queues;
1913 queue_id = (i % sc->rx_num_queues);
1972 if_softc_ctx_t scctx = sc->shared;
1977 if (sc->tx_num_queues == 1) {
1978 if_setsendqlen(ifp, scctx->isc_ntxd[0] - 1);
1986 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1987 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
1988 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1989 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
1990 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
1991 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1992 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL);
1994 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1995 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
2004 if_softc_ctx_t scctx = sc->shared;
2009 MPASS(sc->tx_num_queues > 0);
2010 MPASS(sc->tx_num_queues == ntxqsets);
2013 if (!(sc->tx_queues =
2015 sc->tx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2021 for (i = 0, que = sc->tx_queues; i < sc->tx_num_queues; i++, que++) {
2024 struct tx_ring *txr = &que->txr;
2025 txr->sc = que->sc = sc;
2026 que->me = txr->me = i;
2029 if (!(txr->tx_rsq = (qidx_t *) malloc(sizeof(qidx_t) *
2030 scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
2036 for (j = 0; j < scctx->isc_ntxd[0]; j++)
2037 txr->tx_rsq[j] = QIDX_INVALID;
2039 txr->tx_base = (struct igc_tx_desc *)vaddrs[i*ntxqs];
2040 txr->tx_paddr = paddrs[i*ntxqs];
2045 "allocated for %d tx_queues\n", sc->tx_num_queues);
2061 MPASS(sc->rx_num_queues > 0);
2062 MPASS(sc->rx_num_queues == nrxqsets);
2065 if (!(sc->rx_queues =
2067 sc->rx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2074 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) {
2076 struct rx_ring *rxr = &que->rxr;
2077 rxr->sc = que->sc = sc;
2078 rxr->que = que;
2079 que->me = rxr->me = i;
2082 rxr->rx_base = (union igc_rx_desc_extended *)vaddrs[i*nrxqs];
2083 rxr->rx_paddr = paddrs[i*nrxqs];
2088 "allocated for %d rx_queues\n", sc->rx_num_queues);
2100 struct igc_tx_queue *tx_que = sc->tx_queues;
2101 struct igc_rx_queue *rx_que = sc->rx_queues;
2104 for (int i = 0; i < sc->tx_num_queues; i++, tx_que++) {
2105 struct tx_ring *txr = &tx_que->txr;
2106 if (txr->tx_rsq == NULL)
2109 free(txr->tx_rsq, M_DEVBUF);
2110 txr->tx_rsq = NULL;
2112 free(sc->tx_queues, M_DEVBUF);
2113 sc->tx_queues = NULL;
2117 free(sc->rx_queues, M_DEVBUF);
2118 sc->rx_queues = NULL;
2121 if (sc->mta != NULL) {
2122 free(sc->mta, M_DEVBUF);
2135 if_softc_ctx_t scctx = sc->shared;
2138 struct igc_hw *hw = &sc->hw;
2143 for (int i = 0; i < sc->tx_num_queues; i++, txr++) {
2147 que = &sc->tx_queues[i];
2148 txr = &que->txr;
2149 bus_addr = txr->tx_paddr;
2152 offp = (caddr_t)&txr->csum_flags;
2154 bzero(offp, endp - offp);
2158 scctx->isc_ntxd[0] * sizeof(struct igc_tx_desc));
2168 IGC_READ_REG(&sc->hw, IGC_TDBAL(i)),
2169 IGC_READ_REG(&sc->hw, IGC_TDLEN(i)));
2183 tctl = IGC_READ_REG(&sc->hw, IGC_TCTL);
2189 IGC_WRITE_REG(&sc->hw, IGC_TCTL, tctl);
2197 #define BSIZEPKT_ROUNDUP ((1<<IGC_SRRCTL_BSIZEPKT_SHIFT)-1)
2203 if_softc_ctx_t scctx = sc->shared;
2205 struct igc_hw *hw = &sc->hw;
2223 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
2241 if (sc->tx_num_queues > 1)
2246 if (sc->tx_num_queues > 1)
2253 if (sc->rx_num_queues > 1)
2257 psize = scctx->isc_max_frame_size;
2261 IGC_WRITE_REG(&sc->hw, IGC_RLPML, psize);
2265 srrctl |= (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
2276 if ((sc->rx_num_queues > 1) &&
2277 (sc->fc == igc_fc_none ||
2278 sc->fc == igc_fc_rx_pause)) {
2283 for (i = 0, que = sc->rx_queues; i < sc->rx_num_queues; i++, que++) {
2284 struct rx_ring *rxr = &que->rxr;
2285 u64 bus_addr = rxr->rx_paddr;
2289 /* Configure for header split? -- ignore for now */
2290 rxr->hdr_split = igc_header_split;
2296 scctx->isc_nrxd[0] * sizeof(struct igc_rx_desc));
2326 struct igc_hw *hw = &sc->hw;
2348 struct igc_hw *hw = &sc->hw;
2351 if (__predict_true(sc->intr_type == IFLIB_INTR_MSIX)) {
2352 mask = (sc->que_mask | sc->link_mask);
2366 struct igc_hw *hw = &sc->hw;
2368 if (__predict_true(sc->intr_type == IFLIB_INTR_MSIX)) {
2387 if (sc->vf_ifp)
2390 ctrl_ext = IGC_READ_REG(&sc->hw, IGC_CTRL_EXT);
2391 IGC_WRITE_REG(&sc->hw, IGC_CTRL_EXT,
2406 ctrl_ext = IGC_READ_REG(&sc->hw, IGC_CTRL_EXT);
2407 IGC_WRITE_REG(&sc->hw, IGC_CTRL_EXT,
2426 ** to both system management and wake-on-lan for
2436 eeprom_data = IGC_READ_REG(&sc->hw, IGC_WUC);
2439 sc->wol = IGC_WUFC_LNKC;
2464 sc->wol &= ~IGC_WUFC_MAG;
2467 sc->wol &= ~IGC_WUFC_EX;
2470 sc->wol &= ~IGC_WUFC_MC;
2472 rctl = IGC_READ_REG(&sc->hw, IGC_RCTL);
2474 IGC_WRITE_REG(&sc->hw, IGC_RCTL, rctl);
2477 if (!(sc->wol & (IGC_WUFC_EX | IGC_WUFC_MAG | IGC_WUFC_MC)))
2481 ctrl = IGC_READ_REG(&sc->hw, IGC_CTRL);
2483 IGC_WRITE_REG(&sc->hw, IGC_CTRL, ctrl);
2486 IGC_WRITE_REG(&sc->hw, IGC_WUC, IGC_WUC_PME_EN);
2487 IGC_WRITE_REG(&sc->hw, IGC_WUFC, sc->wol);
2507 u64 prev_xoffrxc = sc->stats.xoffrxc;
2509 sc->stats.crcerrs += IGC_READ_REG(&sc->hw, IGC_CRCERRS);
2510 sc->stats.mpc += IGC_READ_REG(&sc->hw, IGC_MPC);
2511 sc->stats.scc += IGC_READ_REG(&sc->hw, IGC_SCC);
2512 sc->stats.ecol += IGC_READ_REG(&sc->hw, IGC_ECOL);
2514 sc->stats.mcc += IGC_READ_REG(&sc->hw, IGC_MCC);
2515 sc->stats.latecol += IGC_READ_REG(&sc->hw, IGC_LATECOL);
2516 sc->stats.colc += IGC_READ_REG(&sc->hw, IGC_COLC);
2517 sc->stats.colc += IGC_READ_REG(&sc->hw, IGC_RERC);
2518 sc->stats.dc += IGC_READ_REG(&sc->hw, IGC_DC);
2519 sc->stats.rlec += IGC_READ_REG(&sc->hw, IGC_RLEC);
2520 sc->stats.xonrxc += IGC_READ_REG(&sc->hw, IGC_XONRXC);
2521 sc->stats.xontxc += IGC_READ_REG(&sc->hw, IGC_XONTXC);
2522 sc->stats.xoffrxc += IGC_READ_REG(&sc->hw, IGC_XOFFRXC);
2527 if (sc->stats.xoffrxc != prev_xoffrxc)
2528 sc->shared->isc_pause_frames = 1;
2529 sc->stats.xofftxc += IGC_READ_REG(&sc->hw, IGC_XOFFTXC);
2530 sc->stats.fcruc += IGC_READ_REG(&sc->hw, IGC_FCRUC);
2531 sc->stats.prc64 += IGC_READ_REG(&sc->hw, IGC_PRC64);
2532 sc->stats.prc127 += IGC_READ_REG(&sc->hw, IGC_PRC127);
2533 sc->stats.prc255 += IGC_READ_REG(&sc->hw, IGC_PRC255);
2534 sc->stats.prc511 += IGC_READ_REG(&sc->hw, IGC_PRC511);
2535 sc->stats.prc1023 += IGC_READ_REG(&sc->hw, IGC_PRC1023);
2536 sc->stats.prc1522 += IGC_READ_REG(&sc->hw, IGC_PRC1522);
2537 sc->stats.tlpic += IGC_READ_REG(&sc->hw, IGC_TLPIC);
2538 sc->stats.rlpic += IGC_READ_REG(&sc->hw, IGC_RLPIC);
2539 sc->stats.gprc += IGC_READ_REG(&sc->hw, IGC_GPRC);
2540 sc->stats.bprc += IGC_READ_REG(&sc->hw, IGC_BPRC);
2541 sc->stats.mprc += IGC_READ_REG(&sc->hw, IGC_MPRC);
2542 sc->stats.gptc += IGC_READ_REG(&sc->hw, IGC_GPTC);
2544 /* For the 64-bit byte counters the low dword must be read first. */
2547 sc->stats.gorc += IGC_READ_REG(&sc->hw, IGC_GORCL) +
2548 ((u64)IGC_READ_REG(&sc->hw, IGC_GORCH) << 32);
2549 sc->stats.gotc += IGC_READ_REG(&sc->hw, IGC_GOTCL) +
2550 ((u64)IGC_READ_REG(&sc->hw, IGC_GOTCH) << 32);
2552 sc->stats.rnbc += IGC_READ_REG(&sc->hw, IGC_RNBC);
2553 sc->stats.ruc += IGC_READ_REG(&sc->hw, IGC_RUC);
2554 sc->stats.rfc += IGC_READ_REG(&sc->hw, IGC_RFC);
2555 sc->stats.roc += IGC_READ_REG(&sc->hw, IGC_ROC);
2556 sc->stats.rjc += IGC_READ_REG(&sc->hw, IGC_RJC);
2558 sc->stats.mgprc += IGC_READ_REG(&sc->hw, IGC_MGTPRC);
2559 sc->stats.mgpdc += IGC_READ_REG(&sc->hw, IGC_MGTPDC);
2560 sc->stats.mgptc += IGC_READ_REG(&sc->hw, IGC_MGTPTC);
2562 sc->stats.tor += IGC_READ_REG(&sc->hw, IGC_TORH);
2563 sc->stats.tot += IGC_READ_REG(&sc->hw, IGC_TOTH);
2565 sc->stats.tpr += IGC_READ_REG(&sc->hw, IGC_TPR);
2566 sc->stats.tpt += IGC_READ_REG(&sc->hw, IGC_TPT);
2567 sc->stats.ptc64 += IGC_READ_REG(&sc->hw, IGC_PTC64);
2568 sc->stats.ptc127 += IGC_READ_REG(&sc->hw, IGC_PTC127);
2569 sc->stats.ptc255 += IGC_READ_REG(&sc->hw, IGC_PTC255);
2570 sc->stats.ptc511 += IGC_READ_REG(&sc->hw, IGC_PTC511);
2571 sc->stats.ptc1023 += IGC_READ_REG(&sc->hw, IGC_PTC1023);
2572 sc->stats.ptc1522 += IGC_READ_REG(&sc->hw, IGC_PTC1522);
2573 sc->stats.mptc += IGC_READ_REG(&sc->hw, IGC_MPTC);
2574 sc->stats.bptc += IGC_READ_REG(&sc->hw, IGC_BPTC);
2577 sc->stats.iac += IGC_READ_REG(&sc->hw, IGC_IAC);
2578 sc->stats.rxdmtc += IGC_READ_REG(&sc->hw, IGC_RXDMTC);
2580 sc->stats.algnerrc += IGC_READ_REG(&sc->hw, IGC_ALGNERRC);
2581 sc->stats.tncrs += IGC_READ_REG(&sc->hw, IGC_TNCRS);
2582 sc->stats.htdpmc += IGC_READ_REG(&sc->hw, IGC_HTDPMC);
2583 sc->stats.tsctc += IGC_READ_REG(&sc->hw, IGC_TSCTC);
2594 return (sc->stats.colc);
2596 return (sc->dropped_pkts + sc->stats.rxerrc +
2597 sc->stats.crcerrs + sc->stats.algnerrc +
2598 sc->stats.ruc + sc->stats.roc +
2599 sc->stats.mpc + sc->stats.htdpmc);
2601 return (sc->stats.ecol + sc->stats.latecol +
2602 sc->watchdog_events);
2608 /* igc_if_needs_restart - Tell iflib when the driver needs to be reinitialized
2626 /* Export a single 32-bit register via a read-only sysctl. */
2633 sc = oidp->oid_arg1;
2634 val = IGC_READ_REG(&sc->hw, oidp->oid_arg2);
2648 bool tx = oidp->oid_arg2;
2651 tque = oidp->oid_arg1;
2652 hw = &tque->sc->hw;
2653 reg = IGC_READ_REG(hw, IGC_EITR(tque->me));
2655 rque = oidp->oid_arg1;
2656 hw = &rque->sc->hw;
2657 reg = IGC_READ_REG(hw, IGC_EITR(rque->msix));
2667 if (error || !req->newptr)
2678 device_t dev = iflib_get_dev(sc->ctx);
2679 struct igc_tx_queue *tx_que = sc->tx_queues;
2680 struct igc_rx_queue *rx_que = sc->rx_queues;
2685 struct igc_hw_stats *stats = &sc->stats;
2695 CTLFLAG_RD, &sc->dropped_pkts,
2698 CTLFLAG_RD, &sc->link_irq,
2699 "Link MSI-X IRQ Handled");
2701 CTLFLAG_RD, &sc->rx_overruns,
2704 CTLFLAG_RD, &sc->watchdog_events,
2715 CTLFLAG_RD, &sc->hw.fc.high_water, 0,
2718 CTLFLAG_RD, &sc->hw.fc.low_water, 0,
2721 for (int i = 0; i < sc->tx_num_queues; i++, tx_que++) {
2722 struct tx_ring *txr = &tx_que->txr;
2734 IGC_TDH(txr->me), igc_sysctl_reg_handler, "IU",
2738 IGC_TDT(txr->me), igc_sysctl_reg_handler, "IU",
2741 CTLFLAG_RD, &txr->tx_irq,
2742 "Queue MSI-X Transmit Interrupts");
2745 for (int j = 0; j < sc->rx_num_queues; j++, rx_que++) {
2746 struct rx_ring *rxr = &rx_que->rxr;
2758 IGC_RDH(rxr->me), igc_sysctl_reg_handler, "IU",
2762 IGC_RDT(rxr->me), igc_sysctl_reg_handler, "IU",
2765 CTLFLAG_RD, &rxr->rx_irq,
2766 "Queue MSI-X Receive Interrupts");
2775 CTLFLAG_RD, &stats->ecol,
2778 CTLFLAG_RD, &stats->scc,
2781 CTLFLAG_RD, &stats->mcc,
2784 CTLFLAG_RD, &stats->latecol,
2787 CTLFLAG_RD, &stats->colc,
2790 CTLFLAG_RD, &sc->stats.symerrs,
2793 CTLFLAG_RD, &sc->stats.sec,
2796 CTLFLAG_RD, &sc->stats.dc,
2799 CTLFLAG_RD, &sc->stats.mpc,
2802 CTLFLAG_RD, &sc->stats.rlec,
2805 CTLFLAG_RD, &sc->stats.rnbc,
2808 CTLFLAG_RD, &sc->stats.ruc,
2811 CTLFLAG_RD, &sc->stats.rfc,
2814 CTLFLAG_RD, &sc->stats.roc,
2817 CTLFLAG_RD, &sc->stats.rjc,
2820 CTLFLAG_RD, &sc->stats.rxerrc,
2823 CTLFLAG_RD, &sc->stats.crcerrs,
2826 CTLFLAG_RD, &sc->stats.algnerrc,
2829 CTLFLAG_RD, &sc->stats.xonrxc,
2832 CTLFLAG_RD, &sc->stats.xontxc,
2835 CTLFLAG_RD, &sc->stats.xoffrxc,
2838 CTLFLAG_RD, &sc->stats.xofftxc,
2841 CTLFLAG_RD, &sc->stats.fcruc,
2844 CTLFLAG_RD, &sc->stats.mgprc,
2847 CTLFLAG_RD, &sc->stats.mgpdc,
2850 CTLFLAG_RD, &sc->stats.mgptc,
2855 CTLFLAG_RD, &sc->stats.tpr,
2858 CTLFLAG_RD, &sc->stats.gprc,
2861 CTLFLAG_RD, &sc->stats.bprc,
2864 CTLFLAG_RD, &sc->stats.mprc,
2867 CTLFLAG_RD, &sc->stats.prc64,
2870 CTLFLAG_RD, &sc->stats.prc127,
2871 "65-127 byte frames received");
2873 CTLFLAG_RD, &sc->stats.prc255,
2874 "128-255 byte frames received");
2876 CTLFLAG_RD, &sc->stats.prc511,
2877 "256-511 byte frames received");
2879 CTLFLAG_RD, &sc->stats.prc1023,
2880 "512-1023 byte frames received");
2882 CTLFLAG_RD, &sc->stats.prc1522,
2883 "1023-1522 byte frames received");
2885 CTLFLAG_RD, &sc->stats.gorc,
2890 CTLFLAG_RD, &sc->stats.gotc,
2893 CTLFLAG_RD, &sc->stats.tpt,
2896 CTLFLAG_RD, &sc->stats.gptc,
2899 CTLFLAG_RD, &sc->stats.bptc,
2902 CTLFLAG_RD, &sc->stats.mptc,
2905 CTLFLAG_RD, &sc->stats.ptc64,
2908 CTLFLAG_RD, &sc->stats.ptc127,
2909 "65-127 byte frames transmitted");
2911 CTLFLAG_RD, &sc->stats.ptc255,
2912 "128-255 byte frames transmitted");
2914 CTLFLAG_RD, &sc->stats.ptc511,
2915 "256-511 byte frames transmitted");
2917 CTLFLAG_RD, &sc->stats.ptc1023,
2918 "512-1023 byte frames transmitted");
2920 CTLFLAG_RD, &sc->stats.ptc1522,
2921 "1024-1522 byte frames transmitted");
2923 CTLFLAG_RD, &sc->stats.tsctc,
2932 CTLFLAG_RD, &sc->stats.iac,
2936 CTLFLAG_RD, &sc->stats.rxdmtc,
2943 struct igc_hw *hw = &sc->hw;
2944 struct igc_fw_version *fw_ver = &sc->fw_ver;
2956 if (fw_ver->eep_major || fw_ver->eep_minor || fw_ver->eep_build) {
2957 sbuf_printf(buf, "EEPROM V%d.%d-%d", fw_ver->eep_major,
2958 fw_ver->eep_minor, fw_ver->eep_build);
2962 if (fw_ver->invm_major || fw_ver->invm_minor ||
2963 fw_ver->invm_img_type) {
2965 space, fw_ver->invm_major, fw_ver->invm_minor,
2966 fw_ver->invm_img_type);
2970 if (fw_ver->or_valid) {
2971 sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d",
2972 space, fw_ver->or_major, fw_ver->or_build,
2973 fw_ver->or_patch);
2977 if (fw_ver->etrack_id)
2978 sbuf_printf(buf, "%seTrack 0x%08x", space, fw_ver->etrack_id);
2984 device_t dev = sc->dev;
2994 igc_sbuf_fw_version(&sc->fw_ver, buf);
3009 device_t dev = sc->dev;
3019 igc_sbuf_fw_version(&sc->fw_ver, buf);
3044 result = -1;
3047 if (error || !req->newptr)
3052 * first 32 16-bit words of the EEPROM to
3075 igc_read_nvm(&sc->hw, i, 1, &eeprom_data);
3088 sc = oidp->oid_arg1;
3089 switch (oidp->oid_arg2) {
3106 val = IGC_READ_REG(&sc->hw, reg);
3109 if (error != 0 || req->newptr == NULL)
3114 IGC_WRITE_REG(&sc->hw, reg, val);
3121 * 0 - off
3122 * 1 - rx pause
3123 * 2 - tx pause
3124 * 3 - full
3130 static int input = 3; /* default is full */
3133 error = sysctl_handle_int(oidp, &input, 0, req);
3135 if ((error) || (req->newptr == NULL))
3138 if (input == sc->fc) /* no change? */
3141 switch (input) {
3146 sc->hw.fc.requested_mode = input;
3147 sc->fc = input;
3154 sc->hw.fc.current_mode = sc->hw.fc.requested_mode;
3155 igc_force_mac_fc(&sc->hw);
3162 * 0/1 - off/on
3164 * 250,500,1000-10000 in thousands
3172 error = sysctl_handle_int(oidp, &sc->dmac, 0, req);
3174 if ((error) || (req->newptr == NULL))
3177 switch (sc->dmac) {
3182 sc->dmac = 1000;
3196 /* Legal values - allow */
3200 sc->dmac = 0;
3204 igc_if_init(sc->ctx);
3211 * 0/1 - enabled/disabled
3219 value = sc->hw.dev_spec._i225.eee_disable;
3221 if (error || req->newptr == NULL)
3224 sc->hw.dev_spec._i225.eee_disable = (value != 0);
3225 igc_if_init(sc->ctx);
3237 result = -1;
3240 if (error || !req->newptr)
3261 if (error || !req->newptr || result != 1)
3276 * needed for debugging a problem. -jfv
3281 device_t dev = iflib_get_dev(sc->ctx);
3282 if_t ifp = iflib_get_ifp(sc->ctx);
3283 struct tx_ring *txr = &sc->tx_queues->txr;
3284 struct rx_ring *rxr = &sc->rx_queues->rxr;
3296 for (int i = 0; i < sc->tx_num_queues; i++, txr++) {
3297 device_printf(dev, "TX Queue %d ------\n", i);
3299 IGC_READ_REG(&sc->hw, IGC_TDH(i)),
3300 IGC_READ_REG(&sc->hw, IGC_TDT(i)));
3303 for (int j=0; j < sc->rx_num_queues; j++, rxr++) {
3304 device_printf(dev, "RX Queue %d ------\n", j);
3306 IGC_READ_REG(&sc->hw, IGC_RDH(j)),
3307 IGC_READ_REG(&sc->hw, IGC_RDT(j)));