Lines Matching +full:tcam +full:- +full:based

2 SPDX-License-Identifier: BSD-2-Clause
4 Copyright (c) 2007-2009, Chelsio Inc.
153 {PCI_VENDOR_ID_CHELSIO, 0x0036, 3, "S320E-CR"},
154 {PCI_VENDOR_ID_CHELSIO, 0x0037, 7, "N320E-G2"},
176 nitems(cxgb_identifiers) - 1);
228 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
240 "MSI-X, MSI, INTx selector");
243 * The driver uses an auto-queue algorithm by default.
244 * To disable it and force a single queue-set per port, use multiq = 0
248 "use min(ncpus/ports, 8) queue-sets per port");
259 int cxgb_use_16k_clusters = -1;
263 static int nfilters = -1;
304 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
315 switch(adapter->params.rev) {
335 for (id = cxgb_identifiers; id->desc != NULL; id++) {
336 if ((id->vendor == pci_get_vendor(dev)) &&
337 (id->device == pci_get_device(dev))) {
354 ai = t3_get_adapter_info(id->index);
370 nports = ai->nports0 + ai->nports1;
376 device_set_descf(dev, "%s, %d %s", ai->desc, nports, ports);
392 device_printf(sc->dev, "Could not find firmware image %s\n", FW_FNAME);
395 device_printf(sc->dev, "installing firmware on card\n");
396 status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
399 device_printf(sc->dev, "failed to install firmware: %d\n",
403 snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
417 * 1. Determine if the device supports MSI or MSI-X.
422 * 5. Allocate the BAR for doing MSI-X.
423 * 6. Setup the line interrupt iff MSI-X is not supported.
426 * 9. Check if the firmware and SRAM are up-to-date. They will be
427 * auto-updated later (before FULL_INIT_DONE), if required.
451 sc->dev = dev;
452 sc->msi_count = 0;
455 snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d",
457 ADAPTER_LOCK_INIT(sc, sc->lockbuf);
459 snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d",
461 snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d",
463 snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d",
466 MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_SPIN);
467 MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF);
468 MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF);
479 sc->link_width = (lnk & PCIEM_LINK_STA_WIDTH) >> 4;
480 if (sc->link_width < 8 &&
481 (ai->caps & SUPPORTED_10000baseT_Full)) {
482 device_printf(sc->dev,
484 sc->link_width);
496 sc->regs_rid = PCIR_BAR(0);
497 if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
498 &sc->regs_rid, RF_ACTIVE)) == NULL) {
504 sc->bt = rman_get_bustag(sc->regs_res);
505 sc->bh = rman_get_bushandle(sc->regs_res);
506 sc->mmio_len = rman_get_size(sc->regs_res);
509 sc->port[i].adapter = sc;
517 sc->udbs_rid = PCIR_BAR(2);
518 sc->udbs_res = NULL;
520 ((sc->udbs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
521 &sc->udbs_rid, RF_ACTIVE)) == NULL)) {
527 /* Allocate the BAR for doing MSI-X. If it succeeds, try to allocate
532 sc->msix_regs_rid = 0x20;
534 (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
535 &sc->msix_regs_rid, RF_ACTIVE)) != NULL) {
538 port_qsets = min(SGE_QSETS/sc->params.nports, mp_ncpus);
539 msi_needed = sc->msi_count = sc->params.nports * port_qsets + 1;
542 (error = pci_alloc_msix(dev, &sc->msi_count)) != 0 ||
543 sc->msi_count != msi_needed) {
544 device_printf(dev, "alloc msix failed - "
546 "will try MSI\n", sc->msi_count,
548 sc->msi_count = 0;
552 sc->msix_regs_rid, sc->msix_regs_res);
553 sc->msix_regs_res = NULL;
555 sc->flags |= USING_MSIX;
556 sc->cxgb_intr = cxgb_async_intr;
558 "using MSI-X interrupts (%u vectors)\n",
559 sc->msi_count);
563 if ((msi_allowed >= 1) && (sc->msi_count == 0)) {
564 sc->msi_count = 1;
565 if ((error = pci_alloc_msi(dev, &sc->msi_count)) != 0) {
566 device_printf(dev, "alloc msi failed - "
568 sc->msi_count = 0;
572 sc->flags |= USING_MSI;
573 sc->cxgb_intr = t3_intr_msi;
577 if (sc->msi_count == 0) {
579 sc->cxgb_intr = t3b_intr;
583 sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT,
584 taskqueue_thread_enqueue, &sc->tq);
585 if (sc->tq == NULL) {
590 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
592 TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc);
596 callout_init(&sc->cxgb_tick_ch, 1);
604 sc->flags &= ~FW_UPTODATE;
606 sc->flags |= FW_UPTODATE;
613 device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n",
615 sc->flags &= ~TPS_UPTODATE;
617 sc->flags |= TPS_UPTODATE;
624 for (i = 0; i < (sc)->params.nports; i++) {
627 if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {
632 pi = &sc->port[i];
633 pi->adapter = sc;
634 pi->nqsets = port_qsets;
635 pi->first_qset = i*port_qsets;
636 pi->port_id = i;
637 pi->tx_chan = i >= ai->nports0;
638 pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i;
639 sc->rxpkt_map[pi->txpkt_intf] = i;
640 sc->port[i].tx_chan = i >= ai->nports0;
641 sc->portdev[i] = child;
655 snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
660 ai->desc, is_offload(sc) ? "R" : "",
661 sc->params.vpd.ec, sc->params.vpd.sn);
663 snprintf(&sc->port_types[0], sizeof(sc->port_types), "%x%x%x%x",
664 sc->params.vpd.port_type[0], sc->params.vpd.port_type[1],
665 sc->params.vpd.port_type[2], sc->params.vpd.port_type[3]);
667 device_printf(sc->dev, "Firmware Version %s\n", &sc->fw_version[0]);
668 callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc);
673 sc->cpl_handler[i] = cpl_not_handled;
726 sc->flags |= CXGB_SHUTDOWN;
732 bus_detach_children(sc->dev);
733 for (i = 0; i < (sc)->params.nports; i++) {
734 if (sc->portdev[i] &&
735 device_delete_child(sc->dev, sc->portdev[i]) != 0)
736 device_printf(sc->dev, "failed to delete child port\n");
737 nqsets += sc->port[i].nqsets;
745 KASSERT(sc->open_device_map == 0, ("%s: device(s) still open (%x)",
746 __func__, sc->open_device_map));
747 for (i = 0; i < sc->params.nports; i++) {
748 KASSERT(sc->port[i].ifp == NULL, ("%s: port %i undead!",
755 callout_drain(&sc->cxgb_tick_ch);
756 callout_drain(&sc->sge_timer_ch);
762 if (sc->flags & FULL_INIT_DONE) {
764 sc->flags &= ~FULL_INIT_DONE;
771 if (sc->flags & (USING_MSI | USING_MSIX)) {
772 device_printf(sc->dev, "releasing msi message(s)\n");
773 pci_release_msi(sc->dev);
775 device_printf(sc->dev, "no msi message to release\n");
778 if (sc->msix_regs_res != NULL) {
779 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
780 sc->msix_regs_res);
786 if (sc->tq != NULL) {
787 taskqueue_free(sc->tq);
788 sc->tq = NULL;
791 free(sc->filters, M_DEVBUF);
794 if (sc->udbs_res != NULL)
795 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->udbs_rid,
796 sc->udbs_res);
798 if (sc->regs_res != NULL)
799 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
800 sc->regs_res);
802 MTX_DESTROY(&sc->mdio_lock);
803 MTX_DESTROY(&sc->sge.reg_lock);
804 MTX_DESTROY(&sc->elmer_lock);
812 * setup_sge_qsets - configure SGE Tx/Rx/response queues
816 * We support multiple queue sets per port if we have MSI-X, otherwise
826 device_printf(sc->dev, "t3_sge_alloc returned %d\n", err);
830 if (sc->params.rev > 0 && !(sc->flags & USING_MSI))
831 irq_idx = -1;
833 for (i = 0; i < (sc)->params.nports; i++) {
834 struct port_info *pi = &sc->port[i];
836 for (j = 0; j < pi->nqsets; j++, qset_idx++) {
837 err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports,
838 (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx,
839 &sc->params.sge.qset[qset_idx], ntxq, pi);
842 device_printf(sc->dev,
849 sc->nqsets = qset_idx;
860 if (sc->msix_intr_tag[i] == NULL) {
863 KASSERT(sc->msix_irq_res[i] == NULL &&
864 sc->msix_irq_rid[i] == 0,
865 ("%s: half-done interrupt (%d).", __func__, i));
870 bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
871 sc->msix_intr_tag[i]);
872 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->msix_irq_rid[i],
873 sc->msix_irq_res[i]);
875 sc->msix_irq_res[i] = sc->msix_intr_tag[i] = NULL;
876 sc->msix_irq_rid[i] = 0;
879 if (sc->intr_tag) {
880 KASSERT(sc->irq_res != NULL,
881 ("%s: half-done interrupt.", __func__));
883 bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
884 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
885 sc->irq_res);
887 sc->irq_res = sc->intr_tag = NULL;
888 sc->irq_rid = 0;
897 int i, rid, err, intr_flag = sc->flags & (USING_MSI | USING_MSIX);
899 sc->irq_rid = intr_flag ? 1 : 0;
900 sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irq_rid,
902 if (sc->irq_res == NULL) {
903 device_printf(sc->dev, "Cannot allocate interrupt (%x, %u)\n",
904 intr_flag, sc->irq_rid);
906 sc->irq_rid = 0;
908 err = bus_setup_intr(sc->dev, sc->irq_res,
910 sc->cxgb_intr, sc, &sc->intr_tag);
913 device_printf(sc->dev,
915 intr_flag, sc->irq_rid, err);
916 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
917 sc->irq_res);
918 sc->irq_res = sc->intr_tag = NULL;
919 sc->irq_rid = 0;
927 bus_describe_intr(sc->dev, sc->irq_res, sc->intr_tag, "err");
928 for (i = 0; i < sc->msi_count - 1; i++) {
930 res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid,
933 device_printf(sc->dev, "Cannot allocate interrupt "
939 err = bus_setup_intr(sc->dev, res, INTR_MPSAFE | INTR_TYPE_NET,
940 NULL, t3_intr_msix, &sc->sge.qs[i], &tag);
942 device_printf(sc->dev, "Cannot set up interrupt "
944 bus_release_resource(sc->dev, SYS_RES_IRQ, rid, res);
948 sc->msix_irq_rid[i] = rid;
949 sc->msix_irq_res[i] = res;
950 sc->msix_intr_tag[i] = tag;
951 bus_describe_intr(sc->dev, res, tag, "qs%d", i);
968 desc = p->phy.desc;
969 device_set_descf(dev, "Port %d %s", p->port_id, desc);
978 pi->port_cdev = make_dev(&cxgb_cdevsw, if_getdunit(pi->ifp),
979 UID_ROOT, GID_WHEEL, 0600, "%s", if_name(pi->ifp));
981 if (pi->port_cdev == NULL)
984 pi->port_cdev->si_drv1 = (void *)pi;
1003 sc = p->adapter;
1004 snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d",
1005 device_get_unit(device_get_parent(dev)), p->port_id);
1006 PORT_LOCK_INIT(p, p->lockbuf);
1008 callout_init(&p->link_check_ch, 1);
1009 TASK_INIT(&p->link_check_task, 0, check_link_status, p);
1012 ifp = p->ifp = if_alloc(IFT_ETHER);
1035 * Disable TSO on 4-port - it isn't supported by the firmware.
1037 if (sc->params.nports > 2) {
1044 ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change,
1048 ether_ifattach(ifp, p->hw_addr);
1054 if (sc->params.nports <= 2)
1082 sc = p->adapter;
1087 wakeup(&sc->flags);
1089 mtx_sleep(&sc->flags, &sc->lock, 0, "cxgbdtch", 0);
1093 if (p->port_cdev != NULL)
1094 destroy_dev(p->port_cdev);
1097 ether_ifdetach(p->ifp);
1099 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1100 struct sge_qset *qs = &sc->sge.qs[i];
1101 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1103 callout_drain(&txq->txq_watchdog);
1104 callout_drain(&txq->txq_timer);
1108 if_free(p->ifp);
1109 p->ifp = NULL;
1113 wakeup_one(&sc->flags);
1123 if (sc->flags & FULL_INIT_DONE) {
1131 device_printf(sc->dev,"encountered fatal error, operation suspended\n");
1133 device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n",
1146 dev = sc->dev;
1148 cfg = &dinfo->cfg;
1154 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1183 dev = sc->dev;
1196 dev = sc->dev;
1204 * t3_os_link_changed - handle link status changes
1210 * @fc: the new flow-control setting
1212 * This is the OS-dependent handler for link status changes. The OS
1214 * then calls this handler for any OS-specific processing.
1220 struct port_info *pi = &adapter->port[port_id];
1221 if_t ifp = pi->ifp;
1241 * t3_os_phymod_changed - handle PHY module changes
1245 * This is the OS-dependent handler for PHY module changes. It is
1246 * invoked when a PHY module is removed or inserted for any OS-specific
1252 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX-L", "unknown"
1254 struct port_info *pi = &adap->port[port_id];
1255 int mod = pi->phy.modtype;
1257 if (mod != pi->media.ifm_cur->ifm_data)
1261 if_printf(pi->ifp, "PHY module unplugged\n");
1265 if_printf(pi->ifp, "%s PHY module inserted\n", mod_str[mod]);
1280 bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN);
1284 * Programs the XGMAC based on the settings in the ifnet. These settings
1290 if_t ifp = p->ifp;
1292 struct cmac *mac = &p->mac;
1297 bcopy(if_getlladdr(ifp), p->hw_addr, ETHER_ADDR_LEN);
1306 t3_set_vlan_accel(p->adapter, 1 << p->tx_chan, hwtagging);
1307 t3_mac_set_address(mac, 0, p->hw_addr);
1319 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
1320 if (!--attempts)
1333 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
1342 m->m_len = m->m_pkthdr.len = sizeof(*req);
1344 req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1346 req->iff = i;
1355 m->m_len = m->m_pkthdr.len = sizeof(*req);
1357 req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1359 req->params = htonl(V_L2T_W_IDX(i));
1368 m->m_len = m->m_pkthdr.len = sizeof(*req);
1370 req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1372 req->l2t_idx = htonl(V_L2T_W_IDX(i));
1378 m->m_len = m->m_pkthdr.len = sizeof(*greq);
1380 greq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1382 greq->mask = htobe64(1);
1391 * setup_rss - configure Receive Side Steering (per-queue connection demux)
1417 nq[pi->tx_chan] += pi->nqsets;
1426 adap->rrss_map[i] = 0xff;
1428 if (adap->rrss_map[rspq_map[i]] == 0xff)
1429 adap->rrss_map[rspq_map[i]] = i;
1447 req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
1448 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
1449 req->sched = sched;
1450 req->idx = qidx;
1451 req->min = lo;
1452 req->max = hi;
1453 req->binding = port;
1454 m->m_len = m->m_pkthdr.len = sizeof(*req);
1464 for (i = 0; i < (sc)->params.nports; ++i) {
1467 for (j = 0; j < pi->nqsets; ++j) {
1468 send_pktsched_cmd(sc, 1, pi->first_qset + j, -1,
1469 -1, pi->tx_chan);
1497 device_printf(adap->dev,
1503 len = tpeeprom->datasize - 4;
1505 ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize);
1510 device_printf(adap->dev,
1516 ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize,
1520 device_printf(adap->dev,
1524 device_printf(adap->dev,
1547 device_printf(adap->dev, "could not load TP SRAM\n");
1550 device_printf(adap->dev, "updating TP SRAM\n");
1552 ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize);
1556 ret = t3_set_proto_sram(adap, tpsram->data);
1558 device_printf(adap->dev, "loading protocol SRAM failed\n");
1567 * cxgb_up - enable the adapter
1578 unsigned int mxf = t3_mc5_size(&sc->mc5) - MC5_MIN_TIDS;
1580 KASSERT(sc->open_device_map == 0, ("%s: device(s) already open (%x)",
1581 __func__, sc->open_device_map));
1583 if ((sc->flags & FULL_INIT_DONE) == 0) {
1587 if ((sc->flags & FW_UPTODATE) == 0)
1591 if ((sc->flags & TPS_UPTODATE) == 0)
1596 sc->params.mc5.nservers = 0;
1599 sc->params.mc5.nfilters = mxf;
1601 sc->params.mc5.nfilters = min(nfilters, mxf);
1609 t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1619 sc->flags |= FULL_INIT_DONE;
1626 if (sc->params.rev >= T3_REV_C && !(sc->flags & TP_PARITY_INIT) &&
1628 sc->flags |= TP_PARITY_INIT;
1630 if (sc->flags & TP_PARITY_INIT) {
1635 if (!(sc->flags & QUEUES_BOUND)) {
1638 sc->flags |= QUEUES_BOUND;
1665 struct adapter *sc = p->adapter;
1675 struct adapter *sc = p->adapter;
1676 if_t ifp = p->ifp;
1677 struct cmac *mac = &p->mac;
1684 if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbinit", 0)) {
1696 * The code that runs during one-time adapter initialization can sleep
1699 may_sleep = sc->flags & FULL_INIT_DONE ? 0 : 1;
1707 if (sc->open_device_map == 0 && ((rc = cxgb_up(sc)) != 0))
1711 if (isset(&sc->open_device_map, p->port_id) &&
1716 t3_port_intr_enable(sc, p->port_id);
1717 if (!mac->multiport)
1720 t3_link_start(&p->phy, mac, &p->link_config);
1725 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1726 struct sge_qset *qs = &sc->sge.qs[i];
1727 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1729 callout_reset_on(&txq->txq_watchdog, hz, cxgb_tx_watchdog, qs,
1730 txq->txq_watchdog.c_cpu);
1734 setbit(&sc->open_device_map, p->port_id);
1735 callout_reset(&p->link_check_ch,
1736 p->phy.caps & SUPPORTED_LINK_IRQ ? hz * 3 : hz / 4,
1746 wakeup_one(&sc->flags);
1754 struct adapter *sc = p->adapter;
1760 if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbunin", 0)) {
1778 wakeup_one(&sc->flags);
1790 struct adapter *sc = pi->adapter;
1791 if_t ifp = pi->ifp;
1809 clrbit(&sc->open_device_map, pi->port_id);
1810 t3_port_intr_disable(sc, pi->port_id);
1811 taskqueue_drain(sc->tq, &sc->slow_intr_task);
1812 taskqueue_drain(sc->tq, &sc->tick_task);
1814 callout_drain(&pi->link_check_ch);
1815 taskqueue_drain(sc->tq, &pi->link_check_task);
1821 t3_set_reg_field(sc, A_XGM_TX_CFG + pi->mac.offset, F_TXPAUSEEN, 0);
1824 t3_set_reg_field(sc, A_XGM_RXFIFO_CFG + pi->mac.offset,
1830 t3_wait_op_done(sc, A_XGM_TXFIFO_CFG + pi->mac.offset,
1834 t3_mac_disable(&pi->mac, MAC_DIRECTION_RX);
1836 pi->phy.ops->power_down(&pi->phy, 1);
1840 pi->link_config.link_ok = 0;
1841 t3_os_link_changed(sc, pi->port_id, 0, 0, 0, 0, 0);
1843 if (sc->open_device_map == 0)
1844 cxgb_down(pi->adapter);
1856 struct adapter *adp = p->adapter;
1859 for (i = 0; i < p->nqsets; i++) {
1860 q = &adp->sge.qs[p->first_qset + i];
1861 q->lro.enabled = (enabled != 0);
1870 struct adapter *sc = p->adapter;
1885 mtu = ifr->ifr_mtu;
1904 flags = p->if_flags;
1918 p->if_flags = if_getflags(ifp);
1947 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
1957 "tso4 disabled due to -txcsum.\n");
1969 "tso6 disabled due to -txcsum6.\n");
2044 error = ifmedia_ioctl(ifp, ifr, &p->media, command);
2060 * Translates phy->modtype to the correct Ethernet media subtype.
2092 struct cphy *phy = &p->phy;
2093 struct ifmedia *media = &p->media;
2094 int mod = phy->modtype;
2100 if (phy->caps & SUPPORTED_TP && phy->caps & SUPPORTED_Autoneg) {
2103 if (phy->caps & SUPPORTED_10000baseT_Full)
2106 if (phy->caps & SUPPORTED_1000baseT_Full)
2109 if (phy->caps & SUPPORTED_100baseT_Full)
2112 if (phy->caps & SUPPORTED_10baseT_Full)
2118 } else if (phy->caps & SUPPORTED_TP) {
2121 KASSERT(phy->caps & SUPPORTED_10000baseT_Full,
2122 ("%s: unexpected cap 0x%x", __func__, phy->caps));
2127 } else if (phy->caps & SUPPORTED_FIBRE &&
2128 phy->caps & SUPPORTED_10000baseT_Full) {
2138 } else if (phy->caps & SUPPORTED_FIBRE &&
2139 phy->caps & SUPPORTED_1000baseT_Full) {
2142 /* XXX: Lie and claim to be SX, could actually be any 1G-X */
2148 phy->caps));
2158 struct ifmedia_entry *cur = p->media.ifm_cur;
2159 int speed = p->link_config.speed;
2161 if (cur->ifm_data != p->phy.modtype) {
2163 cur = p->media.ifm_cur;
2166 ifmr->ifm_status = IFM_AVALID;
2167 if (!p->link_config.link_ok)
2170 ifmr->ifm_status |= IFM_ACTIVE;
2176 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
2178 KASSERT(p->phy.caps & SUPPORTED_TP && p->phy.caps & SUPPORTED_Autoneg,
2179 ("%s: unexpected PHY caps 0x%x", __func__, p->phy.caps));
2181 ifmr->ifm_active = IFM_ETHER | IFM_FDX;
2183 ifmr->ifm_active |= IFM_10G_T;
2185 ifmr->ifm_active |= IFM_1000_T;
2187 ifmr->ifm_active |= IFM_100_TX;
2189 ifmr->ifm_active |= IFM_10_T;
2199 struct adapter *sc = pi->adapter;
2200 struct cmac *mac = &pi->mac;
2201 struct mac_stats *mstats = &mac->stats;
2207 return (mstats->rx_frames);
2210 return (mstats->rx_jabber + mstats->rx_data_errs +
2211 mstats->rx_sequence_errs + mstats->rx_runt +
2212 mstats->rx_too_long + mstats->rx_mac_internal_errs +
2213 mstats->rx_short + mstats->rx_fcs_errs);
2216 return (mstats->tx_frames);
2219 return (mstats->tx_excess_collisions + mstats->tx_underrun +
2220 mstats->tx_len_errs + mstats->tx_mac_internal_errs +
2221 mstats->tx_excess_deferral + mstats->tx_fcs_errs);
2224 return (mstats->tx_total_collisions);
2227 return (mstats->rx_octets);
2230 return (mstats->tx_octets);
2233 return (mstats->rx_mcast_frames);
2236 return (mstats->tx_mcast_frames);
2239 return (mstats->rx_cong_drops);
2246 if (sc->flags & FULL_INIT_DONE) {
2247 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
2248 drops += sc->sge.qs[i].txq[TXQ_ETH].txq_mr->br_drops;
2267 taskqueue_enqueue(sc->tq, &sc->slow_intr_task);
2274 struct adapter *sc = pi->adapter;
2276 if (!isset(&sc->open_device_map, pi->port_id))
2279 taskqueue_enqueue(sc->tq, &pi->link_check_task);
2286 struct adapter *sc = pi->adapter;
2288 if (!isset(&sc->open_device_map, pi->port_id))
2291 t3_link_changed(sc, pi->port_id);
2293 if (pi->link_fault || !(pi->phy.caps & SUPPORTED_LINK_IRQ) ||
2294 pi->link_config.link_ok == 0)
2295 callout_reset(&pi->link_check_ch, hz, link_check_callout, pi);
2306 callout_reset(&pi->link_check_ch, hz / 4, link_check_callout, pi);
2314 if (sc->flags & CXGB_SHUTDOWN)
2318 struct port_info *p = &sc->port[i];
2321 if_t ifp = p->ifp;
2324 if (!isset(&sc->open_device_map, p->port_id) || p->link_fault ||
2325 !p->link_config.link_ok)
2330 __func__, if_getdrvflags(ifp), sc->open_device_map));
2333 status = t3b2_mac_watchdog_task(&p->mac);
2335 p->mac.stats.num_toggled++;
2337 struct cmac *mac = &p->mac;
2340 t3_link_start(&p->phy, mac, &p->link_config);
2342 t3_port_intr_enable(sc, p->port_id);
2343 p->mac.stats.num_resets++;
2354 if (sc->flags & CXGB_SHUTDOWN)
2357 taskqueue_enqueue(sc->tq, &sc->tick_task);
2358 callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc);
2369 if (timevalcmp(&tv, &pi->last_refreshed, <))
2373 t3_mac_update_stats(&pi->mac);
2375 getmicrotime(&pi->last_refreshed);
2382 const struct adapter_params *p = &sc->params;
2386 if (sc->flags & CXGB_SHUTDOWN || !(sc->flags & FULL_INIT_DONE))
2389 if (p->rev == T3_REV_B2 && p->nports < 4 && sc->open_device_map)
2394 struct sge_qset *qs = &sc->sge.qs[0];
2420 for (i = 0; i < sc->params.nports; i++) {
2421 struct port_info *pi = &sc->port[i];
2422 struct cmac *mac = &pi->mac;
2424 if (!isset(&sc->open_device_map, pi->port_id))
2429 if (mac->multiport)
2433 cause = t3_read_reg(sc, A_XGM_INT_CAUSE + mac->offset);
2436 mac->stats.rx_fifo_ovfl++;
2439 t3_write_reg(sc, A_XGM_INT_CAUSE + mac->offset, reset);
2467 struct adapter *adapter = pi->adapter;
2478 aligned_offset + aligned_len - 4,
2479 (u32 *)&buf[aligned_len - 4]);
2490 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2527 struct port_info *pi = dev->si_drv1;
2528 adapter_t *sc = pi->adapter;
2547 struct cphy *phy = &pi->phy;
2550 if (!phy->mdio_read)
2553 mmd = mid->phy_id >> 8;
2559 error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd,
2560 mid->reg_num, &val);
2562 error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0,
2563 mid->reg_num & 0x1f, &val);
2565 mid->val_out = val;
2569 struct cphy *phy = &pi->phy;
2572 if (!phy->mdio_write)
2575 mmd = mid->phy_id >> 8;
2581 error = phy->mdio_write(sc, mid->phy_id & 0x1f,
2582 mmd, mid->reg_num, mid->val_in);
2584 error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0,
2585 mid->reg_num & 0x1f,
2586 mid->val_in);
2591 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2593 t3_write_reg(sc, edata->addr, edata->val);
2598 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2600 edata->val = t3_read_reg(sc, edata->addr);
2605 mtx_lock_spin(&sc->sge.reg_lock);
2606 switch (ecntxt->cntxt_type) {
2608 error = -t3_sge_read_ecntxt(sc, ecntxt->cntxt_id,
2609 ecntxt->data);
2612 error = -t3_sge_read_fl(sc, ecntxt->cntxt_id,
2613 ecntxt->data);
2616 error = -t3_sge_read_rspq(sc, ecntxt->cntxt_id,
2617 ecntxt->data);
2620 error = -t3_sge_read_cq(sc, ecntxt->cntxt_id,
2621 ecntxt->data);
2627 mtx_unlock_spin(&sc->sge.reg_lock);
2633 if (edesc->queue_num >= SGE_QSETS * 6)
2635 ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6],
2636 edesc->queue_num % 6, edesc->idx, edesc->data);
2639 edesc->size = ret;
2645 int q1 = pi->first_qset;
2646 int nqsets = pi->nqsets;
2649 if (t->qset_idx >= nqsets)
2652 i = q1 + t->qset_idx;
2653 q = &sc->params.sge.qset[i];
2654 t->rspq_size = q->rspq_size;
2655 t->txq_size[0] = q->txq_size[0];
2656 t->txq_size[1] = q->txq_size[1];
2657 t->txq_size[2] = q->txq_size[2];
2658 t->fl_size[0] = q->fl_size;
2659 t->fl_size[1] = q->jumbo_size;
2660 t->polling = q->polling;
2661 t->lro = q->lro;
2662 t->intr_lat = q->coalesce_usecs;
2663 t->cong_thres = q->cong_thres;
2664 t->qnum = i;
2666 if ((sc->flags & FULL_INIT_DONE) == 0)
2667 t->vector = 0;
2668 else if (sc->flags & USING_MSIX)
2669 t->vector = rman_get_start(sc->msix_irq_res[i]);
2671 t->vector = rman_get_start(sc->irq_res);
2677 edata->val = pi->nqsets;
2695 if (sc->open_device_map || sc->flags & FULL_INIT_DONE) {
2700 fw_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
2704 error = copyin(t->buf, fw_data, t->len);
2707 error = -t3_load_fw(sc, fw_data, t->len);
2710 snprintf(&sc->fw_version[0], sizeof(sc->fw_version),
2716 sc->flags |= FW_UPTODATE;
2726 boot_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
2730 error = copyin(t->buf, boot_data, t->len);
2732 error = -t3_load_boot(sc, boot_data, t->len);
2739 struct tp_params *p = &sc->params.tp;
2744 m->tx_pg_sz = p->tx_pg_size;
2745 m->tx_num_pg = p->tx_num_pgs;
2746 m->rx_pg_sz = p->rx_pg_size;
2747 m->rx_num_pg = p->rx_num_pgs;
2748 m->pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2754 struct tp_params *p = &sc->params.tp;
2758 if (sc->flags & FULL_INIT_DONE)
2761 if (!m->rx_pg_sz || (m->rx_pg_sz & (m->rx_pg_sz - 1)) ||
2762 !m->tx_pg_sz || (m->tx_pg_sz & (m->tx_pg_sz - 1)))
2764 if (!(m->rx_pg_sz & 0x14000))
2766 if (!(m->tx_pg_sz & 0x1554000))
2768 if (m->tx_num_pg == -1)
2769 m->tx_num_pg = p->tx_num_pgs;
2770 if (m->rx_num_pg == -1)
2771 m->rx_num_pg = p->rx_num_pgs;
2772 if (m->tx_num_pg % 24 || m->rx_num_pg % 24)
2774 if (m->rx_num_pg * m->rx_pg_sz > p->chan_rx_size ||
2775 m->tx_num_pg * m->tx_pg_sz > p->chan_tx_size)
2778 p->rx_pg_size = m->rx_pg_sz;
2779 p->tx_pg_size = m->tx_pg_sz;
2780 p->rx_num_pgs = m->rx_num_pg;
2781 p->tx_num_pgs = m->tx_num_pg;
2792 if (m->nmtus != NMTUS)
2794 if (m->mtus[0] < 81) /* accommodate SACK */
2801 if (m->mtus[i] < m->mtus[i - 1])
2804 memcpy(sc->params.mtus, m->mtus, sizeof(sc->params.mtus));
2813 memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus));
2814 m->nmtus = NMTUS;
2827 uint32_t len = t->len, addr = t->addr;
2831 if (!(sc->flags & FULL_INIT_DONE))
2835 if (t->mem_id == MEM_CM)
2836 mem = &sc->cm;
2837 else if (t->mem_id == MEM_PMRX)
2838 mem = &sc->pmrx;
2839 else if (t->mem_id == MEM_PMTX)
2840 mem = &sc->pmtx;
2849 t->version = 3 | (sc->params.rev << 10);
2855 useraddr = (uint8_t *)t->buf;
2861 return (-error);
2866 len -= chunk;
2875 if (!(sc->flags & FULL_INIT_DONE))
2877 return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf);
2884 tp = (const struct trace_params *)&t->sip;
2885 if (t->config_tx)
2886 t3_config_trace_filter(sc, tp, 0, t->invert_match,
2887 t->trace_tx);
2888 if (t->config_rx)
2889 t3_config_trace_filter(sc, tp, 1, t->invert_match,
2890 t->trace_rx);
2895 if (sc->open_device_map == 0)
2897 send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max,
2898 p->binding);
2908 if (regs->len > reglen)
2909 regs->len = reglen;
2910 else if (regs->len < reglen)
2915 error = copyout(buf, regs->data, reglen);
2925 if ((sc->flags & FULL_INIT_DONE) == 0)
2927 if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) ||
2928 !in_range(t->channel, 0, 1) ||
2929 !in_range(t->kbps, 0, 10000000) ||
2930 !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) ||
2931 !in_range(t->flow_ipg, 0,
2935 if (t->kbps >= 0) {
2936 error = t3_config_sched(sc, t->kbps, t->sched);
2938 return (-error);
2940 if (t->class_ipg >= 0)
2941 t3_set_sched_ipg(sc, t->sched, t->class_ipg);
2942 if (t->flow_ipg >= 0) {
2943 t->flow_ipg *= 1000; /* us -> ns */
2944 t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1);
2946 if (t->mode >= 0) {
2947 int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched);
2950 bit, t->mode ? bit : 0);
2952 if (t->channel >= 0)
2954 1 << t->sched, t->channel << t->sched);
2962 if (e->offset & 3 || e->offset >= EEPROMSIZE ||
2963 e->len > EEPROMSIZE || e->offset + e->len > EEPROMSIZE) {
2971 e->magic = EEPROM_MAGIC;
2972 for (i = e->offset & ~3; !error && i < e->offset + e->len; i += 4)
2973 error = -t3_seeprom_read(sc, i, (uint32_t *)&buf[i]);
2976 error = copyout(buf + e->offset, e->data, e->len);
2982 if (!(sc->flags & FULL_INIT_DONE))
2986 t3_mac_update_stats(&pi->mac);
2987 memset(&pi->mac.stats, 0, sizeof(pi->mac.stats));
2997 if (la->bufsize < LA_BUFSIZE)
3001 error = -t3_get_up_la(sc, &la->stopped, &la->idx,
3002 &la->bufsize, buf);
3004 error = copyout(buf, la->data, la->bufsize);
3017 if (ioqs->bufsize < IOQS_BUFSIZE)
3021 error = -t3_get_up_ioqs(sc, &ioqs->bufsize, buf);
3026 ioqs->ioq_rx_enable = *v++;
3027 ioqs->ioq_tx_enable = *v++;
3028 ioqs->ioq_rx_status = *v++;
3029 ioqs->ioq_tx_status = *v++;
3031 error = copyout(v, ioqs->data, ioqs->bufsize);
3040 unsigned int nfilters = sc->params.mc5.nfilters;
3043 return (EOPNOTSUPP); /* No TCAM */
3044 if (!(sc->flags & FULL_INIT_DONE))
3047 return (EBUSY); /* TOE will use TCAM */
3050 if (f->filter_id >= nfilters ||
3051 (f->val.dip && f->mask.dip != 0xffffffff) ||
3052 (f->val.sport && f->mask.sport != 0xffff) ||
3053 (f->val.dport && f->mask.dport != 0xffff) ||
3054 (f->val.vlan && f->mask.vlan != 0xfff) ||
3055 (f->val.vlan_prio &&
3056 f->mask.vlan_prio != FILTER_NO_VLAN_PRI) ||
3057 (f->mac_addr_idx != 0xffff && f->mac_addr_idx > 15) ||
3058 f->qset >= SGE_QSETS ||
3059 sc->rrss_map[f->qset] >= RSS_TABLE_SIZE)
3063 KASSERT(sc->filters, ("filter table NULL\n"));
3065 p = &sc->filters[f->filter_id];
3066 if (p->locked)
3070 p->sip = f->val.sip;
3071 p->sip_mask = f->mask.sip;
3072 p->dip = f->val.dip;
3073 p->sport = f->val.sport;
3074 p->dport = f->val.dport;
3075 p->vlan = f->mask.vlan ? f->val.vlan : 0xfff;
3076 p->vlan_prio = f->mask.vlan_prio ? (f->val.vlan_prio & 6) :
3078 p->mac_hit = f->mac_hit;
3079 p->mac_vld = f->mac_addr_idx != 0xffff;
3080 p->mac_idx = f->mac_addr_idx;
3081 p->pkt_type = f->proto;
3082 p->report_filter_id = f->want_filter_id;
3083 p->pass = f->pass;
3084 p->rss = f->rss;
3085 p->qset = f->qset;
3087 error = set_filter(sc, f->filter_id, p);
3089 p->valid = 1;
3095 unsigned int nfilters = sc->params.mc5.nfilters;
3099 if (!(sc->flags & FULL_INIT_DONE))
3101 if (nfilters == 0 || sc->filters == NULL)
3103 if (f->filter_id >= nfilters)
3106 p = &sc->filters[f->filter_id];
3107 if (p->locked)
3109 if (!p->valid)
3113 p->sip = p->sip_mask = 0xffffffff;
3114 p->vlan = 0xfff;
3115 p->vlan_prio = FILTER_NO_VLAN_PRI;
3116 p->pkt_type = 1;
3117 error = set_filter(sc, f->filter_id, p);
3123 unsigned int i, nfilters = sc->params.mc5.nfilters;
3127 if (!(sc->flags & FULL_INIT_DONE))
3129 if (nfilters == 0 || sc->filters == NULL)
3132 i = f->filter_id == 0xffffffff ? 0 : f->filter_id + 1;
3134 p = &sc->filters[i];
3135 if (!p->valid)
3140 f->filter_id = i;
3141 f->val.sip = p->sip;
3142 f->mask.sip = p->sip_mask;
3143 f->val.dip = p->dip;
3144 f->mask.dip = p->dip ? 0xffffffff : 0;
3145 f->val.sport = p->sport;
3146 f->mask.sport = p->sport ? 0xffff : 0;
3147 f->val.dport = p->dport;
3148 f->mask.dport = p->dport ? 0xffff : 0;
3149 f->val.vlan = p->vlan == 0xfff ? 0 : p->vlan;
3150 f->mask.vlan = p->vlan == 0xfff ? 0 : 0xfff;
3151 f->val.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ?
3152 0 : p->vlan_prio;
3153 f->mask.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ?
3155 f->mac_hit = p->mac_hit;
3156 f->mac_addr_idx = p->mac_vld ? p->mac_idx : 0xffff;
3157 f->proto = p->pkt_type;
3158 f->want_filter_id = p->report_filter_id;
3159 f->pass = p->pass;
3160 f->rss = p->rss;
3161 f->qset = p->qset;
3167 f->filter_id = 0xffffffff;
3205 regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31);
3208 * We skip the MAC statistics registers because they are clear-on-read.
3209 * Also reading multi-register stats would need to synchronize with the
3228 unsigned int nfilters = sc->params.mc5.nfilters;
3234 sc->filters = p;
3236 p = &sc->filters[nfilters - 1];
3237 p->vlan = 0xfff;
3238 p->vlan_prio = FILTER_NO_VLAN_PRI;
3239 p->pass = p->rss = p->valid = p->locked = 1;
3248 unsigned int nfilters = sc->params.mc5.nfilters;
3250 if (!sc->filters)
3256 if (sc->filters[i].locked)
3257 rc = set_filter(sc, i, &sc->filters[i]);
3276 id += t3_mc5_size(&sc->mc5) - sc->params.mc5.nroutes -
3277 sc->params.mc5.nfilters;
3280 m->m_len = m->m_pkthdr.len = len;
3284 wr->wrh_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS) | F_WR_ATOMIC);
3288 txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT));
3289 txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*oreq) / 8));
3291 oreq->local_port = htons(f->dport);
3292 oreq->peer_port = htons(f->sport);
3293 oreq->local_ip = htonl(f->dip);
3294 oreq->peer_ip = htonl(f->sip);
3295 oreq->peer_netmask = htonl(f->sip_mask);
3296 oreq->opt0h = 0;
3297 oreq->opt0l = htonl(F_NO_OFFLOAD);
3298 oreq->opt1 = htonl(V_MAC_MATCH_VALID(f->mac_vld) |
3300 V_VLAN_PRI(f->vlan_prio >> 1) |
3301 V_VLAN_PRI_VALID(f->vlan_prio != FILTER_NO_VLAN_PRI) |
3302 V_PKT_TYPE(f->pkt_type) | V_OPT1_VLAN(f->vlan) |
3303 V_MAC_MATCH(f->mac_idx | (f->mac_hit << 4)));
3307 (f->report_filter_id << 15) | (1 << 23) |
3308 ((u64)f->pass << 35) | ((u64)!f->rss << 36));
3312 if (f->pass && !f->rss) {
3315 m->m_len = m->m_pkthdr.len = len;
3318 sreq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
3320 (u64)sc->rrss_map[f->qset] << 19);
3331 req->reply = V_NO_REPLY(1);
3332 req->cpu_idx = 0;
3333 req->word = htons(word);
3334 req->mask = htobe64(mask);
3335 req->val = htobe64(val);
3344 txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT));
3345 txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*req) / 8));
3358 * in - the only guarantee is that sc->sc_lock is a valid lock.
3370 struct adapter *sc = pi->adapter;
3378 if (!(sc->flags & FULL_INIT_DONE)) {
3384 if (isset(&sc->offload_map, pi->port_id))
3387 if (!(sc->flags & TOM_INIT_DONE)) {
3396 KASSERT(sc->tom_softc != NULL,
3398 KASSERT(sc->flags & TOM_INIT_DONE,
3402 setbit(&sc->offload_map, pi->port_id);
3409 if (!isset(&sc->offload_map, MAX_NPORTS) &&
3411 setbit(&sc->offload_map, MAX_NPORTS);
3413 if (!isset(&sc->offload_map, pi->port_id))
3416 KASSERT(sc->flags & TOM_INIT_DONE,
3418 clrbit(&sc->offload_map, pi->port_id);
3435 if (u->uld_id == ui->uld_id) {
3442 ui->refcount = 0;
3458 if (ui->refcount > 0) {
3482 if (ui->uld_id == id) {
3483 rc = ui->activate(sc);
3485 ui->refcount++;
3504 if (ui->uld_id == id) {
3505 rc = ui->deactivate(sc);
3507 ui->refcount--;
3534 loc = (uintptr_t *) &sc->cpl_handler[opcode];
3589 adap = pi->adapter;
3591 *nrxr = adap->nqsets;
3592 *ncl = adap->sge.qs[0].fl[1].size;
3593 *clsize = adap->sge.qs[0].fl[1].buf_size;
3606 for (i = 0; i < pi->adapter->nqsets; i++) {
3607 qs = &pi->adapter->sge.qs[i];
3610 qs->fl[0].zone = zone_pack;
3611 qs->fl[1].zone = zone_clust;
3612 qs->lro.enabled = 0;
3627 qs = &pi->adapter->sge.qs[pi->first_qset];
3642 adap = pi->adapter;
3643 for (i = 0; i < adap->nqsets; i++)
3644 (void)cxgb_debugnet_poll_rx(adap, &adap->sge.qs[i]);
3645 (void)cxgb_debugnet_poll_tx(&adap->sge.qs[pi->first_qset]);