Lines Matching +full:hw +full:- +full:flow +full:- +full:ctrl

3   Copyright (c) 2001-2017, Intel Corporation
48 static const char ixgbe_driver_version[] = "4.0.1-k";
93 "Intel(R) X520-T 82599 LOM"),
105 "Intel(R) X520-1 82599EN (SFP+)"),
107 "Intel(R) X520-4 82599 (Quad SFP+)"),
109 "Intel(R) X520-Q1 82599 (QSFP+)"),
111 "Intel(R) X540-AT2"),
112 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) X540-T1"),
113 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) X550-T2"),
114 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"),
120 "Intel(R) X552/X557-AT (10GBASE-T)"),
122 "Intel(R) X552 (1000BASE-T)"),
138 "Intel(R) X553/X557-AT (10GBASE-T)"),
144 "Intel(R) X540-T2 (Bypass)"),
340 /* Flow control setting, default to full */
343 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
360 * MSI-X should be the default for best performance,
366 "Enable MSI-X interrupts");
379 * Not sure if Flow Director is fully baked,
385 "Enable Flow Director");
387 /* Receive-Side Scaling */
391 "Enable Receive-Side Scaling (RSS)");
412 * For Flow Director: this is the number of TX packets we sample
457 if_softc_ctx_t scctx = sc->shared;
461 MPASS(sc->num_tx_queues > 0);
462 MPASS(sc->num_tx_queues == ntxqsets);
466 sc->tx_queues =
469 if (!sc->tx_queues) {
475 for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) {
476 struct tx_ring *txr = &que->txr;
478 /* In case SR-IOV is enabled, align the index properly */
479 txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
481 txr->sc = que->sc = sc;
484 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) *
485 scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
486 if (txr->tx_rsq == NULL) {
490 for (j = 0; j < scctx->isc_ntxd[0]; j++)
491 txr->tx_rsq[j] = QIDX_INVALID;
493 txr->tail = IXGBE_TDT(txr->me);
494 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
495 txr->tx_paddr = paddrs[i];
497 txr->bytes = 0;
498 txr->total_packets = 0;
501 if (sc->feat_en & IXGBE_FEATURE_FDIR)
502 txr->atr_sample = atr_sample_rate;
507 sc->num_tx_queues);
528 MPASS(sc->num_rx_queues > 0);
529 MPASS(sc->num_rx_queues == nrxqsets);
533 sc->rx_queues =
536 if (!sc->rx_queues) {
542 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) {
543 struct rx_ring *rxr = &que->rxr;
545 /* In case SR-IOV is enabled, align the index properly */
546 rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
548 rxr->sc = que->sc = sc;
550 /* get the virtual and physical address of the hw queues */
551 rxr->tail = IXGBE_RDT(rxr->me);
552 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
553 rxr->rx_paddr = paddrs[i];
554 rxr->bytes = 0;
555 rxr->que = que;
559 sc->num_rx_queues);
571 struct ix_tx_queue *tx_que = sc->tx_queues;
572 struct ix_rx_queue *rx_que = sc->rx_queues;
576 for (i = 0; i < sc->num_tx_queues; i++, tx_que++) {
577 struct tx_ring *txr = &tx_que->txr;
578 if (txr->tx_rsq == NULL)
581 free(txr->tx_rsq, M_IXGBE);
582 txr->tx_rsq = NULL;
585 free(sc->tx_queues, M_IXGBE);
586 sc->tx_queues = NULL;
589 free(sc->rx_queues, M_IXGBE);
590 sc->rx_queues = NULL;
600 struct ixgbe_hw *hw = &sc->hw;
606 if (sc->feat_en & IXGBE_FEATURE_RSS) {
617 switch (sc->hw.mac.type) {
632 if (j == sc->num_rx_queues)
635 if (sc->feat_en & IXGBE_FEATURE_RSS) {
642 queue_id = queue_id % sc->num_rx_queues;
654 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
656 IXGBE_WRITE_REG(hw,
657 IXGBE_ERETA((i >> 2) - 32), reta);
664 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
667 if (sc->feat_en & IXGBE_FEATURE_RSS)
671 * Disable UDP - IP fragments aren't currently being handled
672 * and so we end up with a mix of 2-tuple and 4-tuple
702 mrqc |= ixgbe_get_mrqc(sc->iov_mode);
703 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
707 * ixgbe_initialize_receive_units - Setup receive registers and features.
709 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
715 if_softc_ctx_t scctx = sc->shared;
716 struct ixgbe_hw *hw = &sc->hw;
727 ixgbe_disable_rx(hw);
730 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
732 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
736 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
739 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
744 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
746 bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
750 for (i = 0, que = sc->rx_queues; i < sc->num_rx_queues; i++, que++) {
751 struct rx_ring *rxr = &que->rxr;
752 u64 rdba = rxr->rx_paddr;
754 j = rxr->me;
757 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
759 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
760 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
761 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
764 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
771 * Set DROP_EN iff we have no flow control and >1 queue.
776 if (sc->num_rx_queues > 1 &&
777 sc->hw.fc.requested_mode == ixgbe_fc_none) {
783 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
785 /* Setup the HW Rx Head and Tail Descriptor Pointers */
786 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
787 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
790 rxr->tail = IXGBE_RDT(rxr->me);
793 if (sc->hw.mac.type != ixgbe_mac_82598EB) {
798 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
801 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
805 if (sc->feat_en & IXGBE_FEATURE_RSS) {
817 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
822 * ixgbe_initialize_transmit_units - Enable transmit units.
828 struct ixgbe_hw *hw = &sc->hw;
829 if_softc_ctx_t scctx = sc->shared;
834 for (i = 0, que = sc->tx_queues; i < sc->num_tx_queues;
836 struct tx_ring *txr = &que->txr;
837 u64 tdba = txr->tx_paddr;
839 int j = txr->me;
841 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
843 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
844 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
845 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
847 /* Setup the HW Tx Head and Tail descriptor pointers */
848 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
849 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
852 txr->tail = IXGBE_TDT(txr->me);
854 txr->tx_rs_cidx = txr->tx_rs_pidx;
855 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
856 for (int k = 0; k < scctx->isc_ntxd[0]; k++)
857 txr->tx_rsq[k] = QIDX_INVALID;
865 switch (hw->mac.type) {
867 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
871 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
875 switch (hw->mac.type) {
877 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
880 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j),
887 if (hw->mac.type != ixgbe_mac_82598EB) {
890 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
892 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
894 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
896 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
897 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
898 ixgbe_get_mtqc(sc->iov_mode));
900 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
915 * ixgbe_if_attach_pre - Device initialization routine, part 1
929 struct ixgbe_hw *hw;
939 sc->hw.back = sc;
940 sc->ctx = ctx;
941 sc->dev = dev;
942 scctx = sc->shared = iflib_get_softc_ctx(ctx);
943 sc->media = iflib_get_media(ctx);
944 hw = &sc->hw;
947 hw->vendor_id = pci_get_vendor(dev);
948 hw->device_id = pci_get_device(dev);
949 hw->revision_id = pci_get_revid(dev);
950 hw->subsystem_vendor_id = pci_get_subvendor(dev);
951 hw->subsystem_device_id = pci_get_subdevice(dev);
953 /* Do base PCI setup - map BAR0 */
960 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
962 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
967 if (ixgbe_init_shared_code(hw) != 0) {
973 if (hw->mac.ops.fw_recovery_mode &&
974 hw->mac.ops.fw_recovery_mode(hw)) {
984 /* 82598 Does not support SR-IOV, initialize everything else */
985 if (hw->mac.type >= ixgbe_mac_82599_vf) {
986 for (i = 0; i < sc->num_vfs; i++)
987 hw->mbx.ops[i].init_params(hw);
990 hw->allow_unsupported_sfp = allow_unsupported_sfp;
992 if (hw->mac.type != ixgbe_mac_82598EB)
993 hw->phy.smart_speed = ixgbe_smart_speed;
1001 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
1002 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
1007 ixgbe_init_swfw_semaphore(hw);
1009 /* Set an initial default flow control value */
1010 hw->fc.requested_mode = ixgbe_flow_control;
1012 hw->phy.reset_if_overtemp = true;
1013 error = ixgbe_reset_hw(hw);
1014 hw->phy.reset_if_overtemp = false;
1021 sc->sfp_probe = true;
1034 if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) {
1040 error = ixgbe_start_hw(hw);
1044 "This device is a pre-production adapter/LOM. Please be"
1063 iflib_set_mac(ctx, hw->mac.addr);
1064 switch (sc->hw.mac.type) {
1068 scctx->isc_rss_table_size = 512;
1069 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
1072 scctx->isc_rss_table_size = 128;
1073 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
1079 scctx->isc_txqsizes[0] =
1080 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1082 scctx->isc_rxqsizes[0] =
1083 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1087 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1089 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
1090 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1092 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1093 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1096 scctx->isc_msix_bar = pci_msix_table_bar(dev);
1098 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1099 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1100 scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1102 scctx->isc_txrx = &ixgbe_txrx;
1104 scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1109 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
1111 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
1118 * ixgbe_if_attach_post - Device initialization routine, part 2
1131 struct ixgbe_hw *hw;
1136 hw = &sc->hw;
1138 if (sc->intr_type == IFLIB_INTR_LEGACY &&
1139 (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1146 sc->mta = malloc(sizeof(*sc->mta) * MAX_NUM_MULTICAST_ADDRESSES,
1148 if (sc->mta == NULL) {
1155 /* hw.ix defaults init */
1159 ixgbe_enable_tx_laser(hw);
1162 ixgbe_set_phy_power(hw, true);
1191 sc->dmac = 0;
1193 sc->advertise = ixgbe_get_default_advertise(sc);
1195 if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
1202 if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1203 sc->recovery_mode = 0;
1206 callout_init(&sc->fw_mode_timer, true);
1209 callout_reset(&sc->fw_mode_timer, hz, ixgbe_fw_mode_timer, sc);
1223 * Sets each port's hw->wol_enabled value depending
1229 struct ixgbe_hw *hw = &sc->hw;
1233 sc->wol_support = hw->wol_enabled = 0;
1234 ixgbe_get_device_caps(hw, &dev_caps);
1237 hw->bus.func == 0))
1238 sc->wol_support = hw->wol_enabled = 1;
1241 sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1261 sc->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
1263 sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw);
1268 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
1284 return (sc->ipackets);
1286 return (sc->opackets);
1288 return (sc->ibytes);
1290 return (sc->obytes);
1292 return (sc->imcasts);
1294 return (sc->omcasts);
1298 return (sc->iqdrops);
1302 return (sc->ierrors);
1315 struct ixgbe_hw *hw = &sc->hw;
1318 if (hw->phy.ops.read_i2c_byte == NULL)
1320 for (i = 0; i < req->len; i++)
1321 hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1322 req->dev_addr, &req->data[i]);
1326 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be
1352 struct ixgbe_hw *hw = &sc->hw;
1356 layer = sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
1360 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1362 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1364 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1366 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1368 if (hw->mac.type == ixgbe_mac_X550) {
1369 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL);
1370 ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL);
1375 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1377 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1381 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1382 if (hw->phy.multispeed_fiber)
1383 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0,
1387 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1388 if (hw->phy.multispeed_fiber)
1389 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0,
1392 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1394 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1398 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1400 ifmedia_add( sc->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1402 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1404 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1409 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1414 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1419 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1424 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1430 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1431 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1433 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1436 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1443 ixgbe_is_sfp(struct ixgbe_hw *hw)
1445 switch (hw->mac.type) {
1447 if (hw->phy.type == ixgbe_phy_nl)
1451 switch (hw->mac.ops.get_media_type(hw)) {
1460 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1475 struct ixgbe_hw *hw = &sc->hw;
1479 sfp = ixgbe_is_sfp(hw);
1482 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
1485 if (hw->mac.ops.check_link)
1486 err = ixgbe_check_link(hw, &sc->link_speed,
1487 &sc->link_up, false);
1490 autoneg = hw->phy.autoneg_advertised;
1491 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1492 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1497 if (hw->mac.type == ixgbe_mac_X550 &&
1498 hw->phy.autoneg_advertised == 0) {
1509 * If hw->phy.autoneg_advertised does not
1517 * Otherwise (i.e. if hw->phy.autoneg_advertised
1526 if (hw->mac.ops.setup_link)
1527 err = hw->mac.ops.setup_link(hw, autoneg,
1528 sc->link_up);
1533 * ixgbe_update_stats_counters - Update board statistics counters.
1538 struct ixgbe_hw *hw = &sc->hw;
1539 struct ixgbe_hw_stats *stats = &sc->stats.pf;
1544 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1545 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1546 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1547 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1548 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1551 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1552 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1553 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1555 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1556 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1557 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1560 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1561 stats->gprc -= missed_rx;
1563 if (hw->mac.type != ixgbe_mac_82598EB) {
1564 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1565 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1566 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1567 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1568 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1569 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1570 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1571 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1572 stats->lxoffrxc += lxoffrxc;
1574 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1575 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1576 stats->lxoffrxc += lxoffrxc;
1578 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1579 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1580 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1588 sc->shared->isc_pause_frames = 1;
1594 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1595 stats->bprc += bprc;
1596 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1597 if (hw->mac.type == ixgbe_mac_82598EB)
1598 stats->mprc -= bprc;
1600 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1601 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1602 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1603 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1604 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1605 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1607 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1608 stats->lxontxc += lxon;
1609 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1610 stats->lxofftxc += lxoff;
1613 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1614 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1615 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1616 stats->gptc -= total;
1617 stats->mptc -= total;
1618 stats->ptc64 -= total;
1619 stats->gotc -= total * ETHER_MIN_LEN;
1621 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1622 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1623 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1624 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1625 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1626 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1627 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1628 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1629 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1630 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1631 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1632 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1633 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1634 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1635 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1636 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1637 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1638 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1640 if (hw->mac.type != ixgbe_mac_82598EB) {
1641 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1642 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1643 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1644 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1645 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1649 IXGBE_SET_IPACKETS(sc, stats->gprc);
1650 IXGBE_SET_OPACKETS(sc, stats->gptc);
1651 IXGBE_SET_IBYTES(sc, stats->gorc);
1652 IXGBE_SET_OBYTES(sc, stats->gotc);
1653 IXGBE_SET_IMCASTS(sc, stats->mprc);
1654 IXGBE_SET_OMCASTS(sc, stats->mptc);
1660 * - CRC error count,
1661 * - illegal byte error count,
1662 * - missed packets count,
1663 * - length error count,
1664 * - undersized packets count,
1665 * - fragmented packets count,
1666 * - oversized packets count,
1667 * - jabber count.
1669 IXGBE_SET_IERRORS(sc, stats->crcerrs + stats->illerrc +
1670 stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc +
1671 stats->roc + stats->rjc);
1682 device_t dev = iflib_get_dev(sc->ctx);
1688 struct ixgbe_hw_stats *stats = &sc->stats.pf;
1698 CTLFLAG_RD, &sc->dropped_pkts, "Driver dropped packets");
1700 CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts");
1702 CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled");
1704 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues;
1706 struct tx_ring *txr = &tx_que->txr;
1721 CTLFLAG_RD, &txr->tso_tx, "TSO");
1723 CTLFLAG_RD, &txr->total_packets,
1727 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues;
1729 struct rx_ring *rxr = &rx_que->rxr;
1737 &sc->rx_queues[i], 0,
1741 CTLFLAG_RD, &(sc->rx_queues[i].irqs),
1752 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1754 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1756 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1758 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1767 CTLFLAG_RD, &sc->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS);
1769 CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1771 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1773 CTLFLAG_RD, &stats->errbc, "Byte Errors");
1775 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1777 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1779 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1781 CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1783 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1785 /* Flow Control stats */
1787 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1789 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1791 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1793 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1797 CTLFLAG_RD, &stats->tor, "Total Octets Received");
1799 CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1801 CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1803 CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1805 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1807 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1809 CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1811 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1813 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1815 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1817 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1819 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1821 CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1823 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1825 CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1827 CTLFLAG_RD, &stats->rjc, "Received Jabber");
1829 CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1831 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1833 CTLFLAG_RD, &stats->xec, "Checksum Errors");
1837 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1839 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1841 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1843 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1845 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1847 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1849 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1851 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1853 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1855 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1857 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1859 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1863 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1870 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1878 if (atomic_load_acq_int(&txr->sc->recovery_mode))
1881 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDH(txr->me));
1883 if (error || !req->newptr)
1890 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1897 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1904 if (atomic_load_acq_int(&txr->sc->recovery_mode))
1907 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDT(txr->me));
1909 if (error || !req->newptr)
1916 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1923 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1930 if (atomic_load_acq_int(&rxr->sc->recovery_mode))
1933 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDH(rxr->me));
1935 if (error || !req->newptr)
1942 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1949 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1956 if (atomic_load_acq_int(&rxr->sc->recovery_mode))
1959 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDT(rxr->me));
1961 if (error || !req->newptr)
1971 * HW Filter table since we can get the vlan id. This
1983 sc->shadow_vfta[index] |= (1 << bit);
1984 ++sc->num_vlans;
2001 sc->shadow_vfta[index] &= ~(1 << bit);
2002 --sc->num_vlans;
2003 /* Re-init to load the changes */
2015 struct ixgbe_hw *hw = &sc->hw;
2018 u32 ctrl;
2027 if (sc->num_vlans == 0 ||
2029 /* Clear the vlan hw flag */
2030 for (i = 0; i < sc->num_rx_queues; i++) {
2031 rxr = &sc->rx_queues[i].rxr;
2033 if (hw->mac.type != ixgbe_mac_82598EB) {
2034 ctrl = IXGBE_READ_REG(hw,
2035 IXGBE_RXDCTL(rxr->me));
2036 ctrl &= ~IXGBE_RXDCTL_VME;
2037 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me),
2038 ctrl);
2040 rxr->vtag_strip = false;
2042 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2044 ctrl |= IXGBE_VLNCTRL_CFIEN;
2045 ctrl &= ~IXGBE_VLNCTRL_VFE;
2046 if (hw->mac.type == ixgbe_mac_82598EB)
2047 ctrl &= ~IXGBE_VLNCTRL_VME;
2048 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2054 for (i = 0; i < sc->num_rx_queues; i++) {
2055 rxr = &sc->rx_queues[i].rxr;
2057 if (hw->mac.type != ixgbe_mac_82598EB) {
2058 ctrl = IXGBE_READ_REG(hw,
2059 IXGBE_RXDCTL(rxr->me));
2060 ctrl |= IXGBE_RXDCTL_VME;
2061 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me),
2062 ctrl);
2064 rxr->vtag_strip = true;
2075 if (sc->shadow_vfta[i] != 0)
2076 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
2077 sc->shadow_vfta[i]);
2079 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2082 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2083 ctrl |= IXGBE_VLNCTRL_VFE;
2085 if (hw->mac.type == ixgbe_mac_82598EB)
2086 ctrl |= IXGBE_VLNCTRL_VME;
2087 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2099 device_t dev = iflib_get_dev(sc->ctx);
2100 struct ixgbe_hw *hw = &sc->hw;
2106 switch (hw->device_id) {
2114 ixgbe_get_bus_info(hw);
2117 * Some devices don't use PCI-E, but there is no need
2120 switch (hw->mac.type) {
2147 * Hmm...can't get PCI-Express capabilities.
2151 ixgbe_get_bus_info(hw);
2156 ixgbe_set_pci_config_data_generic(hw, link);
2160 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2161 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2162 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2164 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
2165 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
2166 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
2170 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2171 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2172 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2174 "PCI-Express bandwidth available for this card"
2180 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2181 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2182 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2184 "PCI-Express bandwidth available for this card"
2201 * Setup MSI-X Interrupt resources and handlers
2207 struct ix_rx_queue *rx_que = sc->rx_queues;
2214 for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) {
2218 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
2219 IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me,
2226 sc->num_rx_queues = i + 1;
2230 rx_que->msix = vector;
2232 for (int i = 0; i < sc->num_tx_queues; i++) {
2234 tx_que = &sc->tx_queues[i];
2235 tx_que->msix = i % sc->num_rx_queues;
2237 &sc->rx_queues[tx_que->msix].que_irq,
2238 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2241 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid,
2249 sc->vector = vector;
2253 iflib_irq_free(ctx, &sc->irq);
2254 rx_que = sc->rx_queues;
2255 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++)
2256 iflib_irq_free(ctx, &rx_que->que_irq);
2265 struct rx_ring *rxr = &que->rxr;
2266 /* FIXME struct tx_ring *txr = ... ->txr; */
2270 * - Write out last calculated setting
2271 * - Calculate based on average size over
2274 if (que->eitr_setting) {
2275 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(que->msix),
2276 que->eitr_setting);
2279 que->eitr_setting = 0;
2281 if (rxr->bytes == 0) {
2282 /* FIXME && txr->bytes == 0 */
2286 if ((rxr->bytes) && (rxr->packets))
2287 newitr = rxr->bytes / rxr->packets;
2289 * if ((txr->bytes) && (txr->packets))
2290 * newitr = txr->bytes/txr->packets;
2291 * if ((rxr->bytes) && (rxr->packets))
2292 * newitr = max(newitr, (rxr->bytes / rxr->packets));
2306 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
2313 que->eitr_setting = newitr;
2316 /* FIXME txr->bytes = 0; */
2317 /* FIXME txr->packets = 0; */
2318 rxr->bytes = 0;
2319 rxr->packets = 0;
2325 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2331 struct ixgbe_softc *sc = que->sc;
2332 if_t ifp = iflib_get_ifp(que->sc->ctx);
2338 ixgbe_disable_queue(sc, que->msix);
2339 ++que->irqs;
2342 if (sc->enable_aim) {
2350 * ixgbe_media_status - Media Ioctl callback
2359 struct ixgbe_hw *hw = &sc->hw;
2364 ifmr->ifm_status = IFM_AVALID;
2365 ifmr->ifm_active = IFM_ETHER;
2367 if (!sc->link_active)
2370 ifmr->ifm_status |= IFM_ACTIVE;
2371 layer = sc->phy_layer;
2377 switch (sc->link_speed) {
2379 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2382 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2385 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2388 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2391 if (hw->mac.type == ixgbe_mac_X550)
2392 switch (sc->link_speed) {
2394 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2397 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2402 switch (sc->link_speed) {
2404 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2407 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2411 switch (sc->link_speed) {
2413 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2416 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2420 switch (sc->link_speed) {
2422 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2425 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2430 switch (sc->link_speed) {
2432 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2435 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2439 switch (sc->link_speed) {
2441 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2450 switch (sc->link_speed) {
2452 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2455 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2458 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2464 switch (sc->link_speed) {
2466 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2469 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2472 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2477 switch (sc->link_speed) {
2479 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2482 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2485 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2491 switch (sc->link_speed) {
2493 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2496 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2499 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2505 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2506 ifmr->ifm_active |= IFM_UNKNOWN;
2508 /* Display current flow control setting used on link */
2509 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2510 hw->fc.current_mode == ixgbe_fc_full)
2511 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2512 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2513 hw->fc.current_mode == ixgbe_fc_full)
2514 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2518 * ixgbe_media_change - Media Ioctl callback
2528 struct ixgbe_hw *hw = &sc->hw;
2533 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2536 if (hw->phy.media_type == ixgbe_media_type_backplane)
2544 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2595 hw->mac.autotry_restart = true;
2596 hw->mac.ops.setup_link(hw, speed, true);
2597 sc->advertise =
2624 rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
2634 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2638 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2642 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2648 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2654 struct ixgbe_hw *hw = &sc->hw;
2658 ++sc->link_irq;
2661 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2664 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2668 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2672 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2673 sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
2676 if (sc->hw.mac.type != ixgbe_mac_82598EB) {
2677 if ((sc->feat_en & IXGBE_FEATURE_FDIR) &&
2680 if (!atomic_cmpset_int(&sc->fdir_reinit, 0, 1))
2683 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2684 sc->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2687 device_printf(iflib_get_dev(sc->ctx),
2689 hw->mac.flags |=
2691 ixgbe_reset_hw(hw);
2692 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2697 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2698 switch (sc->hw.mac.type) {
2702 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2704 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2706 retval = hw->phy.ops.check_overtemp(hw);
2709 device_printf(iflib_get_dev(sc->ctx),
2712 device_printf(iflib_get_dev(sc->ctx),
2718 retval = hw->phy.ops.check_overtemp(hw);
2721 device_printf(iflib_get_dev(sc->ctx),
2724 device_printf(iflib_get_dev(sc->ctx),
2726 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2733 if ((sc->feat_en & IXGBE_FEATURE_SRIOV) &&
2735 sc->task_requests |= IXGBE_REQUEST_TASK_MBX;
2738 if (ixgbe_is_sfp(hw)) {
2739 /* Pluggable optics-related interrupt */
2740 if (hw->mac.type >= ixgbe_mac_X540)
2743 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2746 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2747 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
2750 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2751 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2752 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2753 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2754 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
2759 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2761 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2762 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2766 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2768 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2769 sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
2772 return (sc->task_requests != 0) ?
2782 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2786 if (atomic_load_acq_int(&que->sc->recovery_mode))
2789 reg = IXGBE_READ_REG(&que->sc->hw, IXGBE_EITR(que->msix));
2796 if (error || !req->newptr)
2806 IXGBE_WRITE_REG(&que->sc->hw, IXGBE_EITR(que->msix), reg);
2819 struct ixgbe_hw *hw = &sc->hw;
2837 sc->enable_aim = ixgbe_enable_aim;
2839 &sc->enable_aim, 0, "Interrupt Moderation");
2875 if (hw->mac.type >= ixgbe_mac_X550)
2881 /* for WoL-capable devices */
2882 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2894 /* for X552/X557-AT devices */
2895 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2916 if (sc->feat_cap & IXGBE_FEATURE_EEE) {
2934 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2937 if (!(sc->pci_mem)) {
2944 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem);
2945 sc->osdep.mem_bus_space_handle =
2946 rman_get_bushandle(sc->pci_mem);
2947 /* Set hw values for shared code */
2948 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
2954 * ixgbe_detach - Device removal routine
2972 device_printf(dev, "SR-IOV in use; detach first.\n");
2979 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
2981 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
2983 callout_drain(&sc->fw_mode_timer);
2986 free(sc->mta, M_IXGBE);
2992 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3000 struct ixgbe_hw *hw = &sc->hw;
3004 if (!hw->wol_enabled)
3005 ixgbe_set_phy_power(hw, false);
3007 /* Limit power management flow to X550EM baseT */
3008 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3009 hw->phy.ops.enter_lplu) {
3011 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3012 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3019 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3025 IXGBE_WRITE_REG(hw, IXGBE_WUFC, sc->wufc);
3028 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3031 /* X550EM baseT adapters need a special LPLU flow */
3032 hw->phy.reset_disable = true;
3034 error = hw->phy.ops.enter_lplu(hw);
3038 hw->phy.reset_disable = false;
3048 * ixgbe_shutdown - Shutdown entry point
3090 struct ixgbe_hw *hw = &sc->hw;
3096 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3099 IXGBE_READ_REG(hw, IXGBE_WUS));
3100 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3101 /* And clear WUFC until next low-power transition */
3102 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3105 * Required after D3->D0 transition;
3106 * will re-advertise all previous advertised speeds
3115 * ixgbe_if_mtu_set - Ioctl mtu entry point
3130 sc->max_frame_size = mtu + IXGBE_MTU_HDR;
3143 struct ixgbe_hw *hw = &sc->hw;
3145 * IXGBE_HLREG0 (modified on init_locked and hw reset)
3154 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3155 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3161 /* hw requirements ... */
3178 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
3179 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
3183 * ixgbe_if_init - Init entry point
3187 * used by the driver as a hw/sw initialization routine to
3198 struct ixgbe_hw *hw = &sc->hw;
3213 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, IXGBE_RAH_AV);
3216 bcopy(if_getlladdr(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3217 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, 1);
3218 hw->addr_ctrl.rar_used_count = 1;
3220 ixgbe_init_hw(hw);
3230 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
3237 * from MSI-X interrupts
3239 sc->task_requests = 0;
3241 /* Enable SDP & MSI-X interrupts based on adapter */
3247 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3249 mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3250 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3254 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues;
3256 struct tx_ring *txr = &tx_que->txr;
3258 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3270 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3273 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues;
3275 struct rx_ring *rxr = &rx_que->rxr;
3277 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3278 if (hw->mac.type == ixgbe_mac_82598EB) {
3288 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
3290 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
3300 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3301 if (hw->mac.type == ixgbe_mac_82598EB)
3304 ixgbe_enable_rx_dma(hw, rxctrl);
3306 /* Set up MSI/MSI-X routing */
3309 /* Set up auto-mask */
3310 if (hw->mac.type == ixgbe_mac_82598EB)
3311 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3313 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3314 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3319 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3326 * need to be kick-started
3328 if (hw->phy.type == ixgbe_phy_none) {
3329 err = hw->phy.ops.identify(hw);
3338 IXGBE_WRITE_REG(hw, IXGBE_EITR(sc->vector), IXGBE_LINK_ITR);
3341 ixgbe_set_phy_power(hw, true);
3346 /* Hardware Packet Buffer & Flow Control setup */
3350 ixgbe_start_hw(hw);
3362 if (sc->feat_en & IXGBE_FEATURE_SRIOV) {
3363 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3365 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3373 * Setup the correct IVAR register for a particular MSI-X interrupt
3375 * - entry is the register array entry
3376 * - vector is the MSI-X vector for this queue
3377 * - type is RX/TX/MISC
3382 struct ixgbe_hw *hw = &sc->hw;
3387 switch (hw->mac.type) {
3389 if (type == -1)
3394 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3397 IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
3404 if (type == -1) { /* MISC IVAR */
3406 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3409 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3412 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3415 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3428 struct ix_rx_queue *rx_que = sc->rx_queues;
3429 struct ix_tx_queue *tx_que = sc->tx_queues;
3439 sc->dmac = 0;
3443 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) {
3444 struct rx_ring *rxr = &rx_que->rxr;
3447 ixgbe_set_ivar(sc, rxr->me, rx_que->msix, 0);
3450 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(rx_que->msix), newitr);
3452 for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) {
3453 struct tx_ring *txr = &tx_que->txr;
3456 ixgbe_set_ivar(sc, txr->me, tx_que->msix, 1);
3459 ixgbe_set_ivar(sc, 1, sc->vector, -1);
3468 struct ixgbe_hw *hw = &sc->hw;
3471 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3473 if (sc->intr_type == IFLIB_INTR_MSIX) {
3474 /* Enable Enhanced MSI-X mode */
3482 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
3486 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3490 switch (hw->mac.type) {
3502 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3509 * Requires sc->max_frame_size to be set.
3514 struct ixgbe_hw *hw = &sc->hw;
3517 frame = sc->max_frame_size;
3520 switch (hw->mac.type) {
3532 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3533 hw->fc.high_water[0] = rxpb - size;
3536 switch (hw->mac.type) {
3547 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3549 hw->fc.pause_time = IXGBE_FC_PAUSE;
3550 hw->fc.send_xon = true;
3554 * ixgbe_set_multi - Multicast Update
3562 struct ixgbe_mc_addr *mta = sc->mta;
3567 mta[idx].vmdq = sc->pool;
3584 mta = sc->mta;
3592 ixgbe_update_mc_addr_list(&sc->hw, update_ptr, mcnt,
3596 fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
3607 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
3618 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3623 *vmdq = mta->vmdq;
3627 return (mta->addr);
3631 * ixgbe_local_timer - Timer routine
3645 if (sc->sfp_probe)
3649 ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0);
3657 * ixgbe_fw_mode_timer - FW mode timer routine
3663 struct ixgbe_hw *hw = &sc->hw;
3665 if (ixgbe_fw_recovery_mode(hw)) {
3666 if (atomic_cmpset_acq_int(&sc->recovery_mode, 0, 1)) {
3668 device_printf(sc->dev,
3674 if (hw->adapter_stopped == FALSE)
3675 ixgbe_if_stop(sc->ctx);
3678 atomic_cmpset_acq_int(&sc->recovery_mode, 1, 0);
3681 callout_reset(&sc->fw_mode_timer, hz,
3694 struct ixgbe_hw *hw = &sc->hw;
3698 if ((hw->phy.type == ixgbe_phy_nl) &&
3699 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3700 s32 ret = hw->phy.ops.identify_sfp(hw);
3703 ret = hw->phy.ops.reset(hw);
3704 sc->sfp_probe = false;
3722 * ixgbe_handle_mod - Tasklet for SFP module interrupts
3729 struct ixgbe_hw *hw = &sc->hw;
3733 if (sc->hw.need_crosstalk_fix) {
3734 switch (hw->mac.type) {
3736 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3741 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3752 err = hw->phy.ops.identify_sfp(hw);
3759 if (hw->mac.type == ixgbe_mac_82598EB)
3760 err = hw->phy.ops.reset(hw);
3762 err = hw->mac.ops.setup_sfp(hw);
3766 "Setup failure - unsupported SFP+ module type.\n");
3769 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
3773 sc->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3778 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3785 struct ixgbe_hw *hw = &sc->hw;
3789 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3790 sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
3792 autoneg = hw->phy.autoneg_advertised;
3793 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3794 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3795 if (hw->mac.ops.setup_link)
3796 hw->mac.ops.setup_link(hw, autoneg, true);
3799 ifmedia_removeall(sc->media);
3800 ixgbe_add_media_types(sc->ctx);
3801 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
3805 * ixgbe_handle_phy - Tasklet for external PHY interrupts
3812 struct ixgbe_hw *hw = &sc->hw;
3815 error = hw->phy.ops.handle_lasi(hw);
3817 device_printf(sc->dev,
3821 device_printf(sc->dev,
3826 * ixgbe_if_stop - Stop the hardware
3835 struct ixgbe_hw *hw = &sc->hw;
3839 ixgbe_reset_hw(hw);
3840 hw->adapter_stopped = false;
3841 ixgbe_stop_adapter(hw);
3842 if (hw->mac.type == ixgbe_mac_82599EB)
3843 ixgbe_stop_mac_link_on_d3_82599(hw);
3844 /* Turn off the laser - noop with no optics */
3845 ixgbe_disable_tx_laser(hw);
3848 sc->link_up = false;
3852 ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
3858 * ixgbe_update_link_status - Update OS on link state
3870 if (sc->link_up) {
3871 if (sc->link_active == false) {
3874 ((sc->link_speed == 128) ? 10 : 1),
3876 sc->link_active = true;
3877 /* Update any Flow Control changes */
3878 ixgbe_fc_enable(&sc->hw);
3882 ixgbe_link_speed_to_baudrate(sc->link_speed));
3884 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3888 if (sc->link_active == true) {
3892 sc->link_active = false;
3893 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3899 if (sc->task_requests & IXGBE_REQUEST_TASK_MOD)
3901 if (sc->task_requests & IXGBE_REQUEST_TASK_MSF)
3903 if (sc->task_requests & IXGBE_REQUEST_TASK_MBX)
3905 if (sc->task_requests & IXGBE_REQUEST_TASK_FDIR)
3907 if (sc->task_requests & IXGBE_REQUEST_TASK_PHY)
3909 sc->task_requests = 0;
3915 * ixgbe_config_dmac - Configure DMA Coalescing
3920 struct ixgbe_hw *hw = &sc->hw;
3921 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3923 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3926 if (dcfg->watchdog_timer ^ sc->dmac ||
3927 dcfg->link_speed ^ sc->link_speed) {
3928 dcfg->watchdog_timer = sc->dmac;
3929 dcfg->fcoe_en = false;
3930 dcfg->link_speed = sc->link_speed;
3931 dcfg->num_tcs = 1;
3934 dcfg->watchdog_timer, dcfg->link_speed);
3936 hw->mac.ops.dmac_config(hw);
3947 struct ixgbe_hw *hw = &sc->hw;
3948 struct ix_rx_queue *que = sc->rx_queues;
3953 switch (sc->hw.mac.type) {
3964 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3977 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3978 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3979 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3980 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3981 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3982 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3991 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
3993 /* Enable SR-IOV */
3994 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3996 /* Enable Flow Director */
3997 if (sc->feat_en & IXGBE_FEATURE_FDIR)
4000 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4002 /* With MSI-X we use auto clear */
4003 if (sc->intr_type == IFLIB_INTR_MSIX) {
4008 if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
4010 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4015 * allow for handling the extended (beyond 32) MSI-X
4018 for (int i = 0; i < sc->num_rx_queues; i++, que++)
4019 ixgbe_enable_queue(sc, que->msix);
4021 IXGBE_WRITE_FLUSH(hw);
4033 if (sc->intr_type == IFLIB_INTR_MSIX)
4034 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
4035 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
4036 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0);
4038 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000);
4039 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0);
4040 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0);
4042 IXGBE_WRITE_FLUSH(&sc->hw);
4052 struct ixgbe_hw *hw =
4053 &((struct ixgbe_softc *)iflib_get_softc(ctx))->hw;
4055 /* Re-enable other interrupts */
4056 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
4066 struct ix_rx_queue *que = &sc->rx_queues[rxqid];
4068 ixgbe_enable_queue(sc, que->msix);
4079 struct ixgbe_hw *hw = &sc->hw;
4083 if (hw->mac.type == ixgbe_mac_82598EB) {
4085 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4089 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
4092 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
4102 struct ixgbe_hw *hw = &sc->hw;
4106 if (hw->mac.type == ixgbe_mac_82598EB) {
4108 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
4112 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
4115 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
4120 * ixgbe_intr - Legacy Interrupt Service Routine
4126 struct ix_rx_queue *que = sc->rx_queues;
4127 struct ixgbe_hw *hw = &sc->hw;
4128 if_ctx_t ctx = sc->ctx;
4131 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4133 ++que->irqs;
4140 if ((sc->feat_en & IXGBE_FEATURE_FAN_FAIL) &&
4142 device_printf(sc->dev,
4144 IXGBE_WRITE_REG(hw, IXGBE_EIMS,
4145 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4150 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
4154 if (ixgbe_is_sfp(hw)) {
4155 /* Pluggable optics-related interrupt */
4156 if (hw->mac.type >= ixgbe_mac_X540)
4159 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4162 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
4163 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
4166 if ((hw->mac.type == ixgbe_mac_82599EB) &&
4167 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
4168 IXGBE_WRITE_REG(hw, IXGBE_EICR,
4169 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4170 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
4175 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
4177 sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
4189 struct ix_rx_queue *que = sc->rx_queues;
4192 /* Release all MSI-X queue resources */
4193 if (sc->intr_type == IFLIB_INTR_MSIX)
4194 iflib_irq_free(ctx, &sc->irq);
4197 for (int i = 0; i < sc->num_rx_queues; i++, que++) {
4198 iflib_irq_free(ctx, &que->que_irq);
4202 if (sc->pci_mem != NULL)
4204 rman_get_rid(sc->pci_mem), sc->pci_mem);
4210 * SYSCTL wrapper around setting Flow Control
4219 fc = sc->hw.fc.current_mode;
4222 if ((error) || (req->newptr == NULL))
4226 if (fc == sc->hw.fc.current_mode)
4233 * ixgbe_set_flowcntl - Set flow control
4235 * Flow control values:
4236 * 0 - off
4237 * 1 - rx pause
4238 * 2 - tx pause
4239 * 3 - full
4248 sc->hw.fc.requested_mode = fc;
4249 if (sc->num_rx_queues > 1)
4253 sc->hw.fc.requested_mode = ixgbe_fc_none;
4254 if (sc->num_rx_queues > 1)
4262 sc->hw.fc.disable_fc_autoneg = true;
4263 ixgbe_fc_enable(&sc->hw);
4274 * enable this when Multiqueue is enabled AND Flow Control
4280 struct ixgbe_hw *hw = &sc->hw;
4284 for (int i = 0; i < sc->num_rx_queues; i++) {
4285 rxr = &sc->rx_queues[i].rxr;
4286 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4288 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4292 for (int i = 0; i < sc->num_vfs; i++) {
4293 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4306 struct ixgbe_hw *hw = &sc->hw;
4310 for (int i = 0; i < sc->num_rx_queues; i++) {
4311 rxr = &sc->rx_queues[i].rxr;
4312 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4314 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4318 for (int i = 0; i < sc->num_vfs; i++) {
4319 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4336 if (atomic_load_acq_int(&sc->recovery_mode))
4339 advertise = sc->advertise;
4342 if ((error) || (req->newptr == NULL))
4349 * ixgbe_set_advertise - Control advertised link speed
4352 * 0x1 - advertise 100 Mb
4353 * 0x2 - advertise 1G
4354 * 0x4 - advertise 10G
4355 * 0x8 - advertise 10 Mb (yes, Mb)
4356 * 0x10 - advertise 2.5G (disabled by default)
4357 * 0x20 - advertise 5G (disabled by default)
4363 device_t dev = iflib_get_dev(sc->ctx);
4364 struct ixgbe_hw *hw;
4371 if (sc->advertise == advertise) /* no change */
4374 hw = &sc->hw;
4377 if (hw->phy.media_type == ixgbe_media_type_backplane)
4380 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4381 (hw->phy.multispeed_fiber))) {
4395 if (hw->mac.ops.get_link_capabilities) {
4396 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4462 hw->mac.autotry_restart = true;
4463 hw->mac.ops.setup_link(hw, speed, true);
4464 sc->advertise = advertise;
4470 * ixgbe_get_default_advertise - Get default advertised speed settings
4474 * 0x1 - advertise 100 Mb
4475 * 0x2 - advertise 1G
4476 * 0x4 - advertise 10G
4477 * 0x8 - advertise 10 Mb (yes, Mb)
4478 * 0x10 - advertise 2.5G (disabled by default)
4479 * 0x20 - advertise 5G (disabled by default)
4484 struct ixgbe_hw *hw = &sc->hw;
4492 * multi-speed fiber
4494 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4495 !(hw->phy.multispeed_fiber))
4498 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4502 if (hw->mac.type == ixgbe_mac_X550) {
4524 * ixgbe_sysctl_dmac - Manage DMA Coalescing
4527 * 0/1 - off / on (use default value of 1000)
4538 if_t ifp = iflib_get_ifp(sc->ctx);
4542 newval = sc->dmac;
4544 if ((error) || (req->newptr == NULL))
4550 sc->dmac = 0;
4554 sc->dmac = 1000;
4564 /* Legal values - allow */
4565 sc->dmac = newval;
4572 /* Re-initialize hardware if it's already running */
4585 * 0 - set device to D0
4586 * 3 - set device to D3
4587 * (none) - get current device power state
4593 device_t dev = sc->dev;
4599 if ((error) || (req->newptr == NULL))
4625 * 0 - disabled
4626 * 1 - enabled
4632 struct ixgbe_hw *hw = &sc->hw;
4636 new_wol_enabled = hw->wol_enabled;
4638 if ((error) || (req->newptr == NULL))
4641 if (new_wol_enabled == hw->wol_enabled)
4644 if (new_wol_enabled > 0 && !sc->wol_support)
4647 hw->wol_enabled = new_wol_enabled;
4653 * ixgbe_sysctl_wufc - Wake Up Filter Control
4658 * 0x1 - Link Status Change
4659 * 0x2 - Magic Packet
4660 * 0x4 - Direct Exact
4661 * 0x8 - Directed Multicast
4662 * 0x10 - Broadcast
4663 * 0x20 - ARP/IPv4 Request Packet
4664 * 0x40 - Direct IPv4 Packet
4665 * 0x80 - Direct IPv6 Packet
4676 new_wufc = sc->wufc;
4679 if ((error) || (req->newptr == NULL))
4681 if (new_wufc == sc->wufc)
4688 new_wufc |= (0xffffff & sc->wufc);
4689 sc->wufc = new_wufc;
4702 struct ixgbe_hw *hw = &sc->hw;
4703 device_t dev = sc->dev;
4708 if (atomic_load_acq_int(&sc->recovery_mode))
4719 switch (sc->hw.mac.type) {
4734 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4737 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4738 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4755 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4757 * For X552/X557-AT devices using an external PHY
4763 struct ixgbe_hw *hw = &sc->hw;
4766 if (atomic_load_acq_int(&sc->recovery_mode))
4769 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4770 device_printf(iflib_get_dev(sc->ctx),
4775 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4777 device_printf(iflib_get_dev(sc->ctx),
4799 struct ixgbe_hw *hw = &sc->hw;
4802 if (atomic_load_acq_int(&sc->recovery_mode))
4805 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4806 device_printf(iflib_get_dev(sc->ctx),
4811 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4813 device_printf(iflib_get_dev(sc->ctx),
4829 * 0 - disable EEE
4830 * 1 - enable EEE
4831 * (none) - get current device EEE state
4837 device_t dev = sc->dev;
4838 if_t ifp = iflib_get_ifp(sc->ctx);
4842 if (atomic_load_acq_int(&sc->recovery_mode))
4845 curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE);
4848 if ((error) || (req->newptr == NULL))
4856 if (!(sc->feat_cap & IXGBE_FEATURE_EEE))
4863 retval = ixgbe_setup_eee(&sc->hw, new_eee);
4869 /* Restart auto-neg */
4876 sc->feat_en |= IXGBE_FEATURE_EEE;
4878 sc->feat_en &= ~IXGBE_FEATURE_EEE;
4890 sc = oidp->oid_arg1;
4891 switch (oidp->oid_arg2) {
4908 val = IXGBE_READ_REG(&sc->hw, reg);
4911 if (error != 0 || req->newptr == NULL)
4916 IXGBE_WRITE_REG(&sc->hw, reg, val);
4926 sc->feat_cap = IXGBE_FEATURE_NETMAP |
4933 switch (sc->hw.mac.type) {
4935 if (sc->hw.device_id == IXGBE_DEV_ID_82598AT)
4936 sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4939 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4940 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4941 if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4942 (sc->hw.bus.func == 0))
4943 sc->feat_cap |= IXGBE_FEATURE_BYPASS;
4946 sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
4947 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4948 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4949 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4952 sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
4953 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4954 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4955 if (sc->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
4956 sc->feat_cap |= IXGBE_FEATURE_EEE;
4959 sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
4960 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4961 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4962 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4963 if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4964 (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4965 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4966 sc->feat_cap |= IXGBE_FEATURE_EEE;
4970 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4971 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4972 if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4973 (sc->hw.bus.func == 0))
4974 sc->feat_cap |= IXGBE_FEATURE_BYPASS;
4975 if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4976 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4984 if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4985 sc->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4987 if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
4988 sc->feat_en |= IXGBE_FEATURE_NETMAP;
4990 if (sc->feat_cap & IXGBE_FEATURE_EEE)
4991 sc->feat_en |= IXGBE_FEATURE_EEE;
4993 if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4994 sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4996 if (sc->feat_cap & IXGBE_FEATURE_RECOVERY_MODE)
4997 sc->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
5000 /* Flow Director */
5002 if (sc->feat_cap & IXGBE_FEATURE_FDIR)
5003 sc->feat_en |= IXGBE_FEATURE_FDIR;
5005 device_printf(sc->dev,
5006 "Device does not support Flow Director."
5010 * Message Signal Interrupts - Extended (MSI-X)
5011 * Normal MSI is only enabled if MSI-X calls fail.
5014 sc->feat_cap &= ~IXGBE_FEATURE_MSIX;
5015 /* Receive-Side Scaling (RSS) */
5016 if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
5017 sc->feat_en |= IXGBE_FEATURE_RSS;
5020 /* No MSI-X */
5021 if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) {
5022 sc->feat_cap &= ~IXGBE_FEATURE_RSS;
5023 sc->feat_cap &= ~IXGBE_FEATURE_SRIOV;
5024 sc->feat_en &= ~IXGBE_FEATURE_RSS;
5025 sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
5037 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) :
5041 device_printf(sc->dev,
5049 ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf)
5054 ixgbe_get_nvm_version(hw, &nvm_ver); /* NVM version */
5055 ixgbe_get_oem_prod_version(hw, &nvm_ver); /* OEM's NVM version */
5056 ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack a build ID in Intel's SCM */
5057 ixgbe_get_orom_version(hw, &nvm_ver); /* Option ROM */
5097 sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d",
5116 struct ixgbe_hw *hw = &sc->hw;
5117 device_t dev = sc->dev;
5127 ixgbe_sbuf_fw_version(hw, buf);
5145 struct ixgbe_hw *hw = &sc->hw;
5146 device_t dev = sc->dev;
5156 ixgbe_sbuf_fw_version(hw, buf);