Lines Matching +full:led +full:- +full:7 +full:seg

1 /*-
2 * Copyright (c) 2014-2018, Matthew Macy <mmacy@mattmacy.io>
82 #include <dev/led/led.h>
107 * - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead
110 * - NFLX's m_free path is dominated by vm-based M_EXT manipulation which
112 * - small packet forwarding which is just returning a single mbuf to
119 * - private structures
120 * - iflib private utility functions
121 * - ifnet functions
122 * - vlan registry and other exported functions
123 * - iflib public core functions
224 return (ctx->ifc_softc);
231 return (ctx->ifc_dev);
238 return (ctx->ifc_ifp);
245 return (ctx->ifc_mediap);
252 bcopy(mac, ctx->ifc_mac.octet, ETHER_ADDR_LEN);
259 return (&ctx->ifc_softc_ctx);
266 return (ctx->ifc_sctx);
273 return (ctx->ifc_sysctl_extra_msix_vectors);
276 #define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2)
278 #define CACHE_PTR_NEXT(ptr) ((void *)(((uintptr_t)(ptr) + CACHE_LINE_SIZE - 1) & (CACHE_LINE_SIZE - 1)))
280 #define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP)
281 #define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF)
309 /* this should really scale with ring size - this is a fairly arbitrary value */
426 used = pidx - cidx;
428 used = size - cidx + pidx;
439 #define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen))
442 ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
506 pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0;
507 pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0;
509 pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0;
510 pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0;
522 ri_pad->rxd_val[i] = 0;
523 ri_pad->rxd_val[i + 1] = 0;
524 ri_pad->rxd_val[i + 2] = 0;
525 ri_pad->rxd_val[i + 3] = 0;
528 ri_pad->rxd_val[RXD_INFO_SIZE - 1] = 0;
536 #define IF_BAD_DMA ((bus_addr_t)-1)
538 #define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING))
540 #define CTX_LOCK_INIT(_sc) sx_init(&(_sc)->ifc_ctx_sx, "iflib ctx lock")
541 #define CTX_LOCK(ctx) sx_xlock(&(ctx)->ifc_ctx_sx)
542 #define CTX_UNLOCK(ctx) sx_xunlock(&(ctx)->ifc_ctx_sx)
543 #define CTX_LOCK_DESTROY(ctx) sx_destroy(&(ctx)->ifc_ctx_sx)
545 #define STATE_LOCK_INIT(_sc, _name) mtx_init(&(_sc)->ifc_state_mtx, _name, "iflib state lock", MTX_DEF)
546 #define STATE_LOCK(ctx) mtx_lock(&(ctx)->ifc_state_mtx)
547 #define STATE_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_state_mtx)
548 #define STATE_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_state_mtx)
550 #define CALLOUT_LOCK(txq) mtx_lock(&txq->ift_mtx)
551 #define CALLOUT_UNLOCK(txq) mtx_unlock(&txq->ift_mtx)
553 /* Our boot-time initialization hook */
745 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
746 if_shared_ctx_t sctx = ctx->ifc_sctx;
747 uint16_t first_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0;
749 return (scctx->isc_nrxd[first_rxq]);
755 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
756 if_shared_ctx_t sctx = ctx->ifc_sctx;
757 uint16_t first_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0;
759 return (scctx->isc_ntxd[first_txq]);
773 * device-specific sysctl variables:
805 if_t ifp = na->ifp;
817 * ifp->if_transmit. This is done once the device has been stopped
841 if_t ifp = na->ifp;
843 iflib_rxq_t rxq = &ctx->ifc_rxqs[0];
844 iflib_fl_t fl = &rxq->ifr_fl[0];
846 info->num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets;
847 info->num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets;
848 info->num_tx_descs = iflib_num_tx_descs(ctx);
849 info->num_rx_descs = iflib_num_rx_descs(ctx);
850 info->rx_buf_maxsize = fl->ifl_buf_size;
852 info->num_tx_rings, info->num_rx_rings, info->num_tx_descs,
853 info->num_rx_descs, info->rx_buf_maxsize);
861 struct netmap_adapter *na = kring->na;
862 u_int const lim = kring->nkr_num_slots - 1;
863 struct netmap_ring *ring = kring->ring;
866 if_ctx_t ctx = rxq->ifr_ctx;
867 iflib_fl_t fl = &rxq->ifr_fl[0];
879 * such a way to keep fl->ifl_pidx and kring->nr_hwcur in sync
880 * (except for kring->nkr_hwofs). These may be less than
881 * kring->nkr_num_slots if netmap_reset() was called while
887 * (fl->ifl_pidx - 1) % N (included), to avoid the NIC tail/prod
892 n = kring->nkr_num_slots - nm_kr_rxspace(kring);
894 n = kring->rhead - kring->nr_hwcur;
898 n += kring->nkr_num_slots;
902 map = fl->ifl_sds.ifsd_map;
903 nic_i = fl->ifl_pidx;
911 MPASS(nm_i == kring->nr_hwtail);
913 MPASS(nm_i == kring->nr_hwcur);
921 for (i = 0; n > 0 && i < IFLIB_MAX_RX_REFRESH; n--, i++) {
922 struct netmap_slot *slot = &ring->slot[nm_i];
931 fl->ifl_bus_addrs[i] = paddr +
933 fl->ifl_rxd_idxs[i] = nic_i;
936 netmap_load_map(na, fl->ifl_buf_tag,
938 } else if (slot->flags & NS_BUF_CHANGED) {
940 netmap_reload_map(na, fl->ifl_buf_tag,
943 bus_dmamap_sync(fl->ifl_buf_tag, map[nic_i],
945 slot->flags &= ~NS_BUF_CHANGED;
953 ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
955 fl->ifl_pidx = nic_i;
960 MPASS(nm_i == kring->rhead);
961 kring->nr_hwcur = nm_i;
963 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
965 ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id,
978 * Userspace wants to send packets up to the one before kring->rhead,
979 * kernel knows kring->nr_hwcur is the first unsent packet.
991 struct netmap_adapter *na = kring->na;
992 if_t ifp = na->ifp;
993 struct netmap_ring *ring = kring->ring;
996 u_int const lim = kring->nkr_num_slots - 1;
997 u_int const head = kring->rhead;
1005 u_int report_frequency = kring->nkr_num_slots >> 1;
1006 /* device-specific */
1008 iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id];
1010 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
1035 nm_i = kring->nr_hwcur;
1038 int nic_i_start = -1, flags = 0;
1040 pi.ipi_segs = txq->ift_segs;
1041 pi.ipi_qsidx = kring->ring_id;
1044 __builtin_prefetch(&ring->slot[nm_i]);
1045 __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]);
1046 __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]);
1049 struct netmap_slot *slot = &ring->slot[nm_i];
1051 u_int len = slot->len;
1055 flags |= (slot->flags & NS_REPORT ||
1073 if (!(slot->flags & NS_MOREFRAG)) {
1081 ctx->isc_txd_encap(ctx->ifc_softc, &pi);
1088 /* Reinit per-packet info for the next one. */
1090 nic_i_start = -1;
1094 __builtin_prefetch(&ring->slot[nm_i + 1]);
1095 __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]);
1096 __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]);
1100 if (slot->flags & NS_BUF_CHANGED) {
1102 netmap_reload_map(na, txq->ift_buf_tag,
1103 txq->ift_sds.ifsd_map[nic_i], addr);
1106 bus_dmamap_sync(txq->ift_buf_tag,
1107 txq->ift_sds.ifsd_map[nic_i],
1110 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED | NS_MOREFRAG);
1114 kring->nr_hwcur = nm_i;
1117 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
1121 ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i);
1129 * trigger a per-tx-queue timer to try again later.
1131 if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) {
1134 nic_i = txq->ift_cidx_processed;
1135 kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
1139 if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ))
1140 if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) {
1141 callout_reset_sbt_on(&txq->ift_netmap_timer,
1144 txq->ift_netmap_timer.c_cpu, 0);
1159 * On call, kring->rhead is the first packet that userspace wants
1160 * to keep, and kring->rcur is the wakeup point.
1161 * The kernel has previously reported packets up to kring->rtail.
1169 struct netmap_adapter *na = kring->na;
1170 struct netmap_ring *ring = kring->ring;
1171 if_t ifp = na->ifp;
1175 u_int const lim = kring->nkr_num_slots - 1;
1176 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
1180 if_shared_ctx_t sctx = ctx->ifc_sctx;
1181 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1182 iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id];
1183 iflib_fl_t fl = &rxq->ifr_fl[0];
1192 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
1204 * nic_i = fl->ifl_cidx;
1205 * nm_i = kring->nr_hwtail (previous)
1207 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size
1209 * fl->ifl_cidx is set to 0 on a ring reinit
1212 uint32_t hwtail_lim = nm_prev(kring->nr_hwcur, lim);
1213 bool have_rxcq = sctx->isc_flags & IFLIB_HAS_RXCQ;
1222 cidxp = &rxq->ifr_cq_cidx;
1224 cidxp = &fl->ifl_cidx;
1225 avail = ctx->isc_rxd_available(ctx->ifc_softc,
1226 rxq->ifr_id, *cidxp, USHRT_MAX);
1228 nic_i = fl->ifl_cidx;
1230 MPASS(nm_i == kring->nr_hwtail);
1231 for (n = 0; avail > 0 && nm_i != hwtail_lim; n++, avail--) {
1233 ri.iri_frags = rxq->ifr_frags;
1234 ri.iri_qsidx = kring->ring_id;
1235 ri.iri_ifp = ctx->ifc_ifp;
1238 error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
1241 ring->slot[nm_i].len = 0;
1242 ring->slot[nm_i].flags = 0;
1244 ring->slot[nm_i].len = ri.iri_frags[i].irf_len;
1245 if (i == (ri.iri_nfrags - 1)) {
1246 ring->slot[nm_i].len -= crclen;
1247 ring->slot[nm_i].flags = 0;
1253 ring->slot[nm_i].flags = NS_MOREFRAG;
1256 bus_dmamap_sync(fl->ifl_buf_tag,
1257 fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD);
1259 fl->ifl_cidx = nic_i = nm_next(nic_i, lim);
1264 while (*cidxp >= scctx->isc_nrxd[0])
1265 *cidxp -= scctx->isc_nrxd[0];
1275 kring->nr_hwtail = nm_i;
1277 kring->nr_kflags &= ~NKR_PENDINTR;
1281 * (kring->nr_hwcur to head excluded),
1285 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size
1298 if_ctx_t ctx = if_getsoftc(na->ifp);
1316 na.ifp = ctx->ifc_ifp;
1318 MPASS(ctx->ifc_softc_ctx.isc_ntxqsets);
1319 MPASS(ctx->ifc_softc_ctx.isc_nrxqsets);
1328 na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets;
1329 na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets;
1336 struct netmap_adapter *na = NA(ctx->ifc_ifp);
1339 slot = netmap_reset(na, NR_TX, txq->ift_id, 0);
1342 for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) {
1350 int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i);
1351 netmap_load_map(na, txq->ift_buf_tag, txq->ift_sds.ifsd_map[i],
1360 struct netmap_adapter *na = NA(ctx->ifc_ifp);
1364 slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0);
1367 kring = na->rx_rings[rxq->ifr_id];
1376 if_ctx_t ctx = txq->ift_ctx;
1382 netmap_tx_irq(ctx->ifc_ifp, txq->ift_id);
1430 fl = &rxq->ifr_fl[flid];
1431 iru->iru_paddrs = fl->ifl_bus_addrs;
1432 iru->iru_idxs = fl->ifl_rxd_idxs;
1433 iru->iru_qsidx = rxq->ifr_id;
1434 iru->iru_buf_size = fl->ifl_buf_size;
1435 iru->iru_flidx = fl->ifl_id;
1448 BUS_SPACE_MAXADDR : (1ULL << (width)) - 1ULL)
1454 device_t dev = ctx->ifc_dev;
1457 lowaddr = DMA_WIDTH_TO_BUS_LOWADDR(ctx->ifc_softc_ctx.isc_dma_width);
1470 &dma->idi_tag);
1478 err = bus_dmamem_alloc(dma->idi_tag, (void **)&dma->idi_vaddr,
1479 BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map);
1487 dma->idi_paddr = IF_BAD_DMA;
1488 err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr,
1489 size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT);
1490 if (err || dma->idi_paddr == IF_BAD_DMA) {
1497 dma->idi_size = size;
1501 bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
1503 bus_dma_tag_destroy(dma->idi_tag);
1505 dma->idi_tag = NULL;
1513 if_shared_ctx_t sctx = ctx->ifc_sctx;
1515 KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized"));
1517 return (iflib_dma_alloc_align(ctx, size, sctx->isc_q_align, dma, mapflags));
1539 if (dma->idi_tag == NULL)
1541 if (dma->idi_paddr != IF_BAD_DMA) {
1542 bus_dmamap_sync(dma->idi_tag, dma->idi_map,
1544 bus_dmamap_unload(dma->idi_tag, dma->idi_map);
1545 dma->idi_paddr = IF_BAD_DMA;
1547 if (dma->idi_vaddr != NULL) {
1548 bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
1549 dma->idi_vaddr = NULL;
1551 bus_dma_tag_destroy(dma->idi_tag);
1552 dma->idi_tag = NULL;
1569 struct grouptask *gtask = info->ifi_task;
1573 if (info->ifi_filter != NULL) {
1574 result = info->ifi_filter(info->ifi_filter_arg);
1587 struct grouptask *gtask = info->ifi_task;
1589 iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx;
1597 if (info->ifi_filter != NULL) {
1598 result = info->ifi_filter(info->ifi_filter_arg);
1603 ctx = rxq->ifr_ctx;
1604 sc = ctx->ifc_softc;
1606 intr_legacy = !!(ctx->ifc_flags & IFC_LEGACY);
1607 MPASS(rxq->ifr_ntxqirq);
1608 for (i = 0; i < rxq->ifr_ntxqirq; i++) {
1609 txqid = rxq->ifr_txqid[i];
1610 txq = &ctx->ifc_txqs[txqid];
1611 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
1613 if (!ctx->isc_txd_credits_update(sc, txqid, false)) {
1620 GROUPTASK_ENQUEUE(&txq->ift_task);
1622 if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ)
1623 cidx = rxq->ifr_cq_cidx;
1625 cidx = rxq->ifr_fl[0].ifl_cidx;
1632 IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
1644 if_ctx_t ctx = info->ifi_ctx;
1648 if (info->ifi_filter != NULL) {
1649 result = info->ifi_filter(info->ifi_filter_arg);
1654 taskqueue_enqueue(ctx->ifc_tq, &ctx->ifc_admin_task);
1665 device_t dev = ctx->ifc_dev;
1669 if (ctx->ifc_flags & IFC_LEGACY)
1679 irq->ii_res = res;
1680 KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL"));
1691 irq->ii_tag = tag;
1698 * mbuf map. TX DMA maps (non-TSO/TSO) and TX mbuf map are kept in a
1707 if_ctx_t ctx = txq->ift_ctx;
1708 if_shared_ctx_t sctx = ctx->ifc_sctx;
1709 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1710 device_t dev = ctx->ifc_dev;
1716 nsegments = scctx->isc_tx_nsegments;
1717 ntsosegments = scctx->isc_tx_tso_segments_max;
1718 tsomaxsize = scctx->isc_tx_tso_size_max;
1719 if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_VLAN_MTU)
1721 MPASS(scctx->isc_ntxd[0] > 0);
1722 MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0);
1724 if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) {
1726 MPASS(sctx->isc_tso_maxsize >= tsomaxsize);
1729 lowaddr = DMA_WIDTH_TO_BUS_LOWADDR(scctx->isc_dma_width);
1739 sctx->isc_tx_maxsize, /* maxsize */
1741 sctx->isc_tx_maxsegsize, /* maxsegsize */
1745 &txq->ift_buf_tag))) {
1748 (uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize);
1751 tso = (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) != 0;
1759 sctx->isc_tso_maxsegsize,/* maxsegsize */
1763 &txq->ift_tso_buf_tag))) {
1770 if (!(txq->ift_sds.ifsd_m =
1772 scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1781 if ((txq->ift_sds.ifsd_map = (bus_dmamap_t *)malloc(
1782 sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset],
1789 if (tso && (txq->ift_sds.ifsd_tso_map = (bus_dmamap_t *)malloc(
1790 sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset],
1797 for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) {
1798 err = bus_dmamap_create(txq->ift_buf_tag, 0,
1799 &txq->ift_sds.ifsd_map[i]);
1806 err = bus_dmamap_create(txq->ift_tso_buf_tag, 0,
1807 &txq->ift_sds.ifsd_tso_map[i]);
1825 if (txq->ift_sds.ifsd_map != NULL) {
1826 map = txq->ift_sds.ifsd_map[i];
1827 bus_dmamap_sync(txq->ift_buf_tag, map, BUS_DMASYNC_POSTWRITE);
1828 bus_dmamap_unload(txq->ift_buf_tag, map);
1829 bus_dmamap_destroy(txq->ift_buf_tag, map);
1830 txq->ift_sds.ifsd_map[i] = NULL;
1833 if (txq->ift_sds.ifsd_tso_map != NULL) {
1834 map = txq->ift_sds.ifsd_tso_map[i];
1835 bus_dmamap_sync(txq->ift_tso_buf_tag, map,
1837 bus_dmamap_unload(txq->ift_tso_buf_tag, map);
1838 bus_dmamap_destroy(txq->ift_tso_buf_tag, map);
1839 txq->ift_sds.ifsd_tso_map[i] = NULL;
1846 if_ctx_t ctx = txq->ift_ctx;
1848 for (int i = 0; i < txq->ift_size; i++)
1851 if (txq->ift_br != NULL) {
1852 ifmp_ring_free(txq->ift_br);
1853 txq->ift_br = NULL;
1856 mtx_destroy(&txq->ift_mtx);
1858 if (txq->ift_sds.ifsd_map != NULL) {
1859 free(txq->ift_sds.ifsd_map, M_IFLIB);
1860 txq->ift_sds.ifsd_map = NULL;
1862 if (txq->ift_sds.ifsd_tso_map != NULL) {
1863 free(txq->ift_sds.ifsd_tso_map, M_IFLIB);
1864 txq->ift_sds.ifsd_tso_map = NULL;
1866 if (txq->ift_sds.ifsd_m != NULL) {
1867 free(txq->ift_sds.ifsd_m, M_IFLIB);
1868 txq->ift_sds.ifsd_m = NULL;
1870 if (txq->ift_buf_tag != NULL) {
1871 bus_dma_tag_destroy(txq->ift_buf_tag);
1872 txq->ift_buf_tag = NULL;
1874 if (txq->ift_tso_buf_tag != NULL) {
1875 bus_dma_tag_destroy(txq->ift_tso_buf_tag);
1876 txq->ift_tso_buf_tag = NULL;
1878 if (txq->ift_ifdi != NULL) {
1879 free(txq->ift_ifdi, M_IFLIB);
1888 mp = &txq->ift_sds.ifsd_m[i];
1892 if (txq->ift_sds.ifsd_map != NULL) {
1893 bus_dmamap_sync(txq->ift_buf_tag,
1894 txq->ift_sds.ifsd_map[i], BUS_DMASYNC_POSTWRITE);
1895 bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[i]);
1897 if (txq->ift_sds.ifsd_tso_map != NULL) {
1898 bus_dmamap_sync(txq->ift_tso_buf_tag,
1899 txq->ift_sds.ifsd_tso_map[i], BUS_DMASYNC_POSTWRITE);
1900 bus_dmamap_unload(txq->ift_tso_buf_tag,
1901 txq->ift_sds.ifsd_tso_map[i]);
1911 if_ctx_t ctx = txq->ift_ctx;
1912 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1913 if_shared_ctx_t sctx = ctx->ifc_sctx;
1918 txq->ift_qstatus = IFLIB_QUEUE_IDLE;
1920 txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ;
1923 txq->ift_cidx_processed = 0;
1924 txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0;
1925 txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset];
1927 for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++)
1928 bzero((void *)di->idi_vaddr, di->idi_size);
1930 IFDI_TXQ_SETUP(ctx, txq->ift_id);
1931 for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++)
1932 bus_dmamap_sync(di->idi_tag, di->idi_map,
1951 if_ctx_t ctx = rxq->ifr_ctx;
1952 if_shared_ctx_t sctx = ctx->ifc_sctx;
1953 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1954 device_t dev = ctx->ifc_dev;
1959 MPASS(scctx->isc_nrxd[0] > 0);
1960 MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0);
1962 lowaddr = DMA_WIDTH_TO_BUS_LOWADDR(scctx->isc_dma_width);
1964 fl = rxq->ifr_fl;
1965 for (int i = 0; i < rxq->ifr_nfl; i++, fl++) {
1966 fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */
1973 sctx->isc_rx_maxsize, /* maxsize */
1974 sctx->isc_rx_nsegments, /* nsegments */
1975 sctx->isc_rx_maxsegsize, /* maxsegsize */
1979 &fl->ifl_buf_tag);
1987 if (!(fl->ifl_sds.ifsd_m =
1989 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1997 if (!(fl->ifl_sds.ifsd_cl =
1999 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
2007 if (!(fl->ifl_sds.ifsd_ba =
2009 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
2019 if (!(fl->ifl_sds.ifsd_map =
2020 (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
2026 for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) {
2027 err = bus_dmamap_create(fl->ifl_buf_tag, 0,
2028 &fl->ifl_sds.ifsd_map[i]);
2048 bus_dma_segment_t seg;
2057 cb_arg->error = error;
2058 cb_arg->seg = segs[0];
2059 cb_arg->nseg = nseg;
2063 * iflib_fl_refill - refill an rxq free-buffer list
2068 * (Re)populate an rxq free-buffer list with up to @count new packet buffers.
2085 MPASS(count <= fl->ifl_size - fl->ifl_credits - 1);
2087 sd_m = fl->ifl_sds.ifsd_m;
2088 sd_map = fl->ifl_sds.ifsd_map;
2089 sd_cl = fl->ifl_sds.ifsd_cl;
2090 sd_ba = fl->ifl_sds.ifsd_ba;
2091 pidx = fl->ifl_pidx;
2093 frag_idx = fl->ifl_fragidx;
2094 credits = fl->ifl_credits;
2099 MPASS(credits + n <= fl->ifl_size);
2101 if (pidx < fl->ifl_cidx)
2102 MPASS(pidx + n <= fl->ifl_cidx);
2103 if (pidx == fl->ifl_cidx && (credits < fl->ifl_size))
2104 MPASS(fl->ifl_gen == 0);
2105 if (pidx > fl->ifl_cidx)
2106 MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx);
2111 iru_init(&iru, fl->ifl_rxq, fl->ifl_id);
2112 while (n-- > 0) {
2120 bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size,
2123 bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx);
2126 cl = uma_zalloc(fl->ifl_zone, M_NOWAIT);
2132 err = bus_dmamap_load(fl->ifl_buf_tag, sd_map[frag_idx],
2133 cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg,
2136 uma_zfree(fl->ifl_zone, cl);
2140 sd_ba[frag_idx] = bus_addr = cb_arg.seg.ds_addr;
2143 fl->ifl_cl_enqueued++;
2148 bus_dmamap_sync(fl->ifl_buf_tag, sd_map[frag_idx],
2157 bit_set(fl->ifl_rx_bitmap, frag_idx);
2159 fl->ifl_m_enqueued++;
2163 fl->ifl_rxd_idxs[i] = frag_idx;
2164 fl->ifl_bus_addrs[i] = bus_addr;
2167 MPASS(credits <= fl->ifl_size);
2168 if (++idx == fl->ifl_size) {
2170 fl->ifl_gen = 1;
2177 ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
2178 fl->ifl_pidx = idx;
2179 fl->ifl_credits = credits;
2185 if (n < count - 1) {
2189 ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
2190 fl->ifl_pidx = idx;
2191 fl->ifl_credits = credits;
2194 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
2196 ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id,
2197 fl->ifl_id, fl->ifl_pidx);
2198 if (__predict_true(bit_test(fl->ifl_rx_bitmap, frag_idx))) {
2199 fl->ifl_fragidx = frag_idx + 1;
2200 if (fl->ifl_fragidx == fl->ifl_size)
2201 fl->ifl_fragidx = 0;
2203 fl->ifl_fragidx = frag_idx;
2207 return (n == -1 ? 0 : IFLIB_RXEOF_EMPTY);
2219 * driver to the NIC (RDT - 1 is thus the last valid one).
2224 int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1;
2226 int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1;
2229 MPASS(fl->ifl_credits <= fl->ifl_size);
2243 in_detach = !!(ctx->ifc_flags & IFC_IN_DETACH);
2251 iflib_dma_info_t idi = fl->ifl_ifdi;
2255 for (i = 0; i < fl->ifl_size; i++) {
2256 struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i];
2257 caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i];
2260 sd_map = fl->ifl_sds.ifsd_map[i];
2261 bus_dmamap_sync(fl->ifl_buf_tag, sd_map,
2263 bus_dmamap_unload(fl->ifl_buf_tag, sd_map);
2264 uma_zfree(fl->ifl_zone, *sd_cl);
2275 fl->ifl_m_dequeued++;
2276 fl->ifl_cl_dequeued++;
2280 for (i = 0; i < fl->ifl_size; i++) {
2281 MPASS(fl->ifl_sds.ifsd_cl[i] == NULL);
2282 MPASS(fl->ifl_sds.ifsd_m[i] == NULL);
2288 fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0;
2289 bzero(idi->idi_vaddr, idi->idi_size);
2300 iflib_rxq_t rxq = fl->ifl_rxq;
2301 if_ctx_t ctx = rxq->ifr_ctx;
2302 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2305 bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1);
2311 MPASS(fl->ifl_credits == 0);
2312 qidx = rxq->ifr_fl_offset + fl->ifl_id;
2313 if (scctx->isc_rxd_buf_size[qidx] != 0)
2314 fl->ifl_buf_size = scctx->isc_rxd_buf_size[qidx];
2316 fl->ifl_buf_size = ctx->ifc_rx_mbuf_sz;
2318 * ifl_buf_size may be a driver-supplied value, so pull it up
2321 fl->ifl_buf_size = iflib_get_mbuf_size_for(fl->ifl_buf_size);
2322 if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size)
2323 ctx->ifc_max_fl_buf_size = fl->ifl_buf_size;
2324 fl->ifl_cltype = m_gettype(fl->ifl_buf_size);
2325 fl->ifl_zone = m_getzone(fl->ifl_buf_size);
2328 * Avoid pre-allocating zillions of clusters to an idle card
2333 MPASS(fl->ifl_size > 0);
2334 (void)iflib_fl_refill(ctx, fl, min(128, fl->ifl_size - 1));
2335 if (min(128, fl->ifl_size - 1) != fl->ifl_credits)
2341 MPASS(fl->ifl_ifdi != NULL);
2342 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
2358 if (rxq->ifr_fl != NULL) {
2359 for (i = 0; i < rxq->ifr_nfl; i++) {
2360 fl = &rxq->ifr_fl[i];
2361 if (fl->ifl_buf_tag != NULL) {
2362 if (fl->ifl_sds.ifsd_map != NULL) {
2363 for (j = 0; j < fl->ifl_size; j++) {
2365 fl->ifl_buf_tag,
2366 fl->ifl_sds.ifsd_map[j],
2369 fl->ifl_buf_tag,
2370 fl->ifl_sds.ifsd_map[j]);
2372 fl->ifl_buf_tag,
2373 fl->ifl_sds.ifsd_map[j]);
2376 bus_dma_tag_destroy(fl->ifl_buf_tag);
2377 fl->ifl_buf_tag = NULL;
2379 free(fl->ifl_sds.ifsd_m, M_IFLIB);
2380 free(fl->ifl_sds.ifsd_cl, M_IFLIB);
2381 free(fl->ifl_sds.ifsd_ba, M_IFLIB);
2382 free(fl->ifl_sds.ifsd_map, M_IFLIB);
2383 free(fl->ifl_rx_bitmap, M_IFLIB);
2384 fl->ifl_sds.ifsd_m = NULL;
2385 fl->ifl_sds.ifsd_cl = NULL;
2386 fl->ifl_sds.ifsd_ba = NULL;
2387 fl->ifl_sds.ifsd_map = NULL;
2388 fl->ifl_rx_bitmap = NULL;
2390 free(rxq->ifr_fl, M_IFLIB);
2391 rxq->ifr_fl = NULL;
2392 free(rxq->ifr_ifdi, M_IFLIB);
2393 rxq->ifr_ifdi = NULL;
2394 rxq->ifr_cq_cidx = 0;
2405 if_ctx_t ctx = txq->ift_ctx;
2406 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
2409 if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
2417 if (this_tick - txq->ift_last_timer_tick >= iflib_timer_default) {
2418 txq->ift_last_timer_tick = this_tick;
2419 IFDI_TIMER(ctx, txq->ift_id);
2420 if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) &&
2421 ((txq->ift_cleaned_prev == txq->ift_cleaned) ||
2422 (sctx->isc_pause_frames == 0)))
2425 if (txq->ift_qstatus != IFLIB_QUEUE_IDLE &&
2426 ifmp_ring_is_stalled(txq->ift_br)) {
2427 KASSERT(ctx->ifc_link_state == LINK_STATE_UP,
2429 txq->ift_qstatus = IFLIB_QUEUE_HUNG;
2431 txq->ift_cleaned_prev = txq->ift_cleaned;
2434 if (txq->ift_db_pending)
2435 GROUPTASK_ENQUEUE(&txq->ift_task);
2437 sctx->isc_pause_frames = 0;
2438 if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)
2439 callout_reset_on(&txq->ift_timer, iflib_timer_default, iflib_timer,
2440 txq, txq->ift_timer.c_cpu);
2444 device_printf(ctx->ifc_dev,
2445 "Watchdog timeout (TX: %d desc avail: %d pidx: %d) -- resetting\n",
2446 txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx);
2448 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2449 ctx->ifc_flags |= (IFC_DO_WATCHDOG | IFC_DO_RESET);
2467 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
2473 ctx->ifc_rx_mbuf_sz =
2474 iflib_get_mbuf_size_for(sctx->isc_max_frame_size);
2481 return (ctx->ifc_rx_mbuf_sz);
2487 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2488 if_t ifp = ctx->ifc_ifp;
2503 tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP);
2504 tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP);
2516 for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) {
2518 callout_stop(&txq->ift_timer);
2520 callout_stop(&txq->ift_netmap_timer);
2538 for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) {
2543 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
2545 device_printf(ctx->ifc_dev,
2546 "setting up free list %d failed - "
2553 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2555 txq = ctx->ifc_txqs;
2556 for (i = 0; i < scctx->isc_ntxqsets; i++, txq++)
2557 callout_reset_on(&txq->ift_timer, iflib_timer_default, iflib_timer, txq,
2558 txq->ift_timer.c_cpu);
2560 /* Re-enable txsync/rxsync. */
2591 iflib_txq_t txq = ctx->ifc_txqs;
2592 iflib_rxq_t rxq = ctx->ifc_rxqs;
2593 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2594 if_shared_ctx_t sctx = ctx->ifc_sctx;
2600 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2612 netmap_disable_all_rings(ctx->ifc_ifp);
2616 for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) {
2620 callout_stop(&txq->ift_timer);
2622 callout_stop(&txq->ift_netmap_timer);
2629 for (j = 0; j < txq->ift_size; j++) {
2632 txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0;
2633 txq->ift_in_use = txq->ift_gen = txq->ift_no_desc_avail = 0;
2634 if (sctx->isc_flags & IFLIB_PRESERVE_TX_INDICES)
2635 txq->ift_cidx = txq->ift_pidx;
2637 txq->ift_cidx = txq->ift_pidx = 0;
2639 txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0;
2640 txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0;
2641 txq->ift_pullups = 0;
2642 ifmp_ring_reset_stats(txq->ift_br);
2643 for (j = 0, di = txq->ift_ifdi; j < sctx->isc_ntxqs; j++, di++)
2644 bzero((void *)di->idi_vaddr, di->idi_size);
2646 for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) {
2647 if (rxq->ifr_task.gt_taskqueue != NULL)
2648 gtaskqueue_drain(rxq->ifr_task.gt_taskqueue,
2649 &rxq->ifr_task.gt_task);
2651 rxq->ifr_cq_cidx = 0;
2652 for (j = 0, di = rxq->ifr_ifdi; j < sctx->isc_nrxqs; j++, di++)
2653 bzero((void *)di->idi_vaddr, di->idi_size);
2655 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
2667 nrxd = fl->ifl_size;
2668 size = fl->ifl_rxd_size;
2669 start = fl->ifl_ifdi->idi_vaddr;
2683 int nrxd = fl->ifl_size;
2686 nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd - 1);
2687 prefetch(&fl->ifl_sds.ifsd_m[nextptr]);
2688 prefetch(&fl->ifl_sds.ifsd_cl[nextptr]);
2691 prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd - 1)]);
2692 prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd - 1)]);
2693 prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd - 1)]);
2694 prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd - 1)]);
2695 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd - 1)]);
2696 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd - 1)]);
2697 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd - 1)]);
2698 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd - 1)]);
2712 flid = irf->irf_flid;
2713 cidx = irf->irf_idx;
2714 fl = &rxq->ifr_fl[flid];
2715 sd->ifsd_fl = fl;
2716 sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx];
2717 fl->ifl_credits--;
2719 fl->ifl_m_dequeued++;
2721 if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH)
2723 next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size - 1);
2724 prefetch(&fl->ifl_sds.ifsd_map[next]);
2725 map = fl->ifl_sds.ifsd_map[cidx];
2727 bus_dmamap_sync(fl->ifl_buf_tag, map, BUS_DMASYNC_POSTREAD);
2729 if (rxq->pfil != NULL && PFIL_HOOKED_IN(rxq->pfil) && pf_rv != NULL &&
2730 irf->irf_len != 0) {
2731 payload = *sd->ifsd_cl;
2732 payload += ri->iri_pad;
2733 len = ri->iri_len - ri->iri_pad;
2734 *pf_rv = pfil_mem_in(rxq->pfil, payload, len, ri->iri_ifp, &m);
2756 m = fl->ifl_sds.ifsd_m[cidx];
2757 fl->ifl_sds.ifsd_m[cidx] = NULL;
2763 m = fl->ifl_sds.ifsd_m[cidx];
2764 fl->ifl_sds.ifsd_m[cidx] = NULL;
2769 if (unload && irf->irf_len != 0)
2770 bus_dmamap_unload(fl->ifl_buf_tag, map);
2771 fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size - 1);
2772 if (__predict_false(fl->ifl_cidx == 0))
2773 fl->ifl_gen = 0;
2774 bit_clear(fl->ifl_rx_bitmap, cidx);
2792 m = rxd_frag_to_sd(rxq, &ri->iri_frags[i], !consumed, sd,
2795 MPASS(*sd->ifsd_cl != NULL);
2798 * Exclude zero-length frags & frags from
2801 if (ri->iri_frags[i].irf_len == 0 || consumed ||
2817 padlen = ri->iri_pad;
2820 mt->m_next = m;
2825 cl = *sd->ifsd_cl;
2826 *sd->ifsd_cl = NULL;
2830 m_cljset(m, cl, sd->ifsd_fl->ifl_cltype);
2834 m->m_data += padlen;
2835 ri->iri_len -= padlen;
2836 m->m_len = ri->iri_frags[i].irf_len;
2837 } while (++i < ri->iri_nfrags);
2853 if (ri->iri_nfrags == 1 &&
2854 ri->iri_frags[0].irf_len != 0 &&
2855 ri->iri_frags[0].irf_len <= MIN(IFLIB_RX_COPY_THRESH, MHLEN)) {
2856 m = rxd_frag_to_sd(rxq, &ri->iri_frags[0], false, &sd,
2863 if (!IP_ALIGNED(m) && ri->iri_pad == 0)
2864 m->m_data += 2;
2866 memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len);
2867 m->m_len = ri->iri_frags[0].irf_len;
2868 m->m_data += ri->iri_pad;
2869 ri->iri_len -= ri->iri_pad;
2878 m->m_pkthdr.len = ri->iri_len;
2879 m->m_pkthdr.rcvif = ri->iri_ifp;
2880 m->m_flags |= ri->iri_flags;
2881 m->m_pkthdr.ether_vtag = ri->iri_vtag;
2882 m->m_pkthdr.flowid = ri->iri_flowid;
2884 m->m_pkthdr.numa_domain = if_getnumadomain(ri->iri_ifp);
2886 M_HASHTYPE_SET(m, ri->iri_rsstype);
2887 m->m_pkthdr.csum_flags = ri->iri_csum_flags;
2888 m->m_pkthdr.csum_data = ri->iri_csum_data;
2896 CURVNET_SET(if_getvnet(lc->ifp));
2917 switch (eh->ether_type) {
2942 GROUPTASK_ENQUEUE(&rxq->ifr_task);
2949 if_ctx_t ctx = rxq->ifr_ctx;
2950 if_shared_ctx_t sctx = ctx->ifc_sctx;
2951 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2970 ifp = ctx->ifc_ifp;
2974 if (sctx->isc_flags & IFLIB_HAS_RXCQ)
2975 cidxp = &rxq->ifr_cq_cidx;
2977 cidxp = &rxq->ifr_fl[0].ifl_cidx;
2979 for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
2996 ri.iri_qsidx = rxq->ifr_id;
2999 ri.iri_frags = rxq->ifr_frags;
3000 err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
3006 if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
3009 /* XXX NB: shurd - check if this is still safe */
3010 while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0])
3011 rxq->ifr_cq_cidx -= scctx->isc_nrxd[0];
3021 avail--;
3022 budget_left--;
3029 /* imm_pkt: -- cxgb */
3033 mt->m_nextpkt = m;
3039 for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
3044 iflib_get_ip_forwarding(&rxq->ifr_lc, &v4_forwarding, &v6_forwarding);
3048 mh = mh->m_nextpkt;
3049 m->m_nextpkt = NULL;
3064 if ((m->m_pkthdr.csum_flags & (CSUM_L4_CALC | CSUM_L4_VALID)) ==
3066 if (lro_possible && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0)
3080 mt->m_nextpkt = m;
3095 tcp_lro_flush_all(&rxq->ifr_lc);
3102 ctx->ifc_flags |= IFC_DO_RESET;
3108 #define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq) - 1)
3113 qidx_t minthresh = txq->ift_size / 8;
3127 qidx_t minthresh = txq->ift_size / 8;
3128 if (txq->ift_in_use > 4 * minthresh)
3130 if (txq->ift_in_use > 2 * minthresh)
3132 if (txq->ift_in_use > minthresh)
3137 #define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags)
3138 #define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG)
3146 #define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets)
3147 #define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets)
3148 #define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx))
3149 #define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments))
3152 #define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh)
3153 #define MAX_TX_DESC(ctx) MAX((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max, \
3154 (ctx)->ifc_softc_ctx.isc_tx_nsegments)
3159 if_ctx_t ctx = txq->ift_ctx;
3162 max = TXQ_MAX_DB_DEFERRED(txq, txq->ift_in_use);
3165 if (ring || (txq->ift_db_pending >= max) || (TXQ_AVAIL(txq) <= MAX_TX_DESC(ctx) + 2)) {
3172 dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx;
3173 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
3175 ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval);
3180 txq->ift_db_pending = txq->ift_npending = 0;
3191 pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx);
3193 pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag);
3195 pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto);
3199 #define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO)
3200 #define IS_TX_OFFLOAD4(pi) ((pi)->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP_TSO))
3201 #define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO)
3202 #define IS_TX_OFFLOAD6(pi) ((pi)->ipi_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_TSO))
3220 if (__predict_false(m->m_len < sizeof(*eh))) {
3226 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3227 pi->ipi_etype = ntohs(eh->evl_proto);
3228 pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3230 pi->ipi_etype = ntohs(eh->evl_encap_proto);
3231 pi->ipi_ehdrlen = ETHER_HDR_LEN;
3263 /* Fills out pi->ipi_etype */
3269 switch (pi->ipi_etype) {
3277 miniplen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip));
3278 if (__predict_false(m->m_len < miniplen)) {
3283 if (m->m_len == pi->ipi_ehdrlen) {
3284 n = m->m_next;
3287 if (n->m_len >= sizeof(*ip)) {
3288 ip = (struct ip *)n->m_data;
3293 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
3299 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
3302 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
3306 pi->ipi_ip_hlen = ip->ip_hl << 2;
3307 pi->ipi_ipproto = ip->ip_p;
3308 pi->ipi_ip_tos = ip->ip_tos;
3309 pi->ipi_flags |= IPI_TX_IPV4;
3319 if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) {
3321 if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL))
3324 ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen);
3327 pi->ipi_ip_hlen = sizeof(struct ip6_hdr);
3328 pi->ipi_ipproto = ip6->ip6_nxt;
3329 pi->ipi_ip_tos = IPV6_TRAFFIC_CLASS(ip6);
3330 pi->ipi_flags |= IPI_TX_IPV6;
3336 pi->ipi_csum_flags &= ~CSUM_OFFLOAD;
3337 pi->ipi_ip_hlen = 0;
3349 if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx;
3354 if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) &&
3365 /* Fills out pi->ipi_etype */
3366 err = iflib_parse_ether_header(pi, mp, &txq->ift_pullups);
3371 switch (pi->ipi_etype) {
3380 minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th));
3381 if (__predict_false(m->m_len < minthlen)) {
3386 if (m->m_len == pi->ipi_ehdrlen) {
3387 n = m->m_next;
3389 if (n->m_len >= sizeof(*ip)) {
3390 ip = (struct ip *)n->m_data;
3391 if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th))
3392 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
3394 txq->ift_pullups++;
3397 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
3400 txq->ift_pullups++;
3403 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
3404 if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
3405 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
3408 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
3409 if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
3410 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
3412 pi->ipi_ip_hlen = ip->ip_hl << 2;
3413 pi->ipi_ipproto = ip->ip_p;
3414 pi->ipi_ip_tos = ip->ip_tos;
3415 pi->ipi_flags |= IPI_TX_IPV4;
3419 if (__predict_true(pi->ipi_ipproto == IPPROTO_TCP)) {
3421 txq->ift_pullups++;
3422 if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL))
3424 th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen);
3426 pi->ipi_tcp_hflags = tcp_get_flags(th);
3427 pi->ipi_tcp_hlen = th->th_off << 2;
3428 pi->ipi_tcp_seq = th->th_seq;
3431 if (__predict_false(ip->ip_p != IPPROTO_TCP))
3436 pi->ipi_csum_flags |= (CSUM_IP_TCP | CSUM_IP);
3437 th->th_sum = in_pseudo(ip->ip_src.s_addr,
3438 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3439 pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
3440 if (sctx->isc_flags & IFLIB_TSO_INIT_IP) {
3441 ip->ip_sum = 0;
3442 ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz);
3446 if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP))
3447 ip->ip_sum = 0;
3455 struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen);
3457 pi->ipi_ip_hlen = sizeof(struct ip6_hdr);
3459 if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) {
3460 txq->ift_pullups++;
3461 if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL))
3464 th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen);
3466 /* XXX-BZ this will go badly in case of ext hdrs. */
3467 pi->ipi_ipproto = ip6->ip6_nxt;
3468 pi->ipi_ip_tos = IPV6_TRAFFIC_CLASS(ip6);
3469 pi->ipi_flags |= IPI_TX_IPV6;
3473 if (pi->ipi_ipproto == IPPROTO_TCP) {
3474 if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) {
3475 txq->ift_pullups++;
3476 if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL))
3479 pi->ipi_tcp_hflags = tcp_get_flags(th);
3480 pi->ipi_tcp_hlen = th->th_off << 2;
3481 pi->ipi_tcp_seq = th->th_seq;
3484 if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP))
3489 pi->ipi_csum_flags |= CSUM_IP6_TCP;
3490 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
3491 pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
3498 pi->ipi_csum_flags &= ~CSUM_OFFLOAD;
3499 pi->ipi_ip_hlen = 0;
3518 ifsd_m = txq->ift_sds.ifsd_m;
3519 ntxd = txq->ift_size;
3520 pidx = txq->ift_pidx & (ntxd - 1);
3521 ifsd_m = txq->ift_sds.ifsd_m;
3524 bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[pidx]);
3525 if (txq->ift_sds.ifsd_tso_map != NULL)
3526 bus_dmamap_unload(txq->ift_tso_buf_tag,
3527 txq->ift_sds.ifsd_tso_map[pidx]);
3529 txq->ift_dequeued++;
3541 ntxd = txq->ift_size;
3542 size = txq->ift_txd_size[qid];
3543 start = txq->ift_ifdi[qid].idi_vaddr;
3581 for (n = min_frame_size - (*m_head)->m_pkthdr.len;
3582 n > 0; n -= sizeof(pad))
3612 ctx = txq->ift_ctx;
3613 sctx = ctx->ifc_sctx;
3614 scctx = &ctx->ifc_softc_ctx;
3615 segs = txq->ift_segs;
3616 ntxd = txq->ift_size;
3623 cidx = txq->ift_cidx;
3624 pidx = txq->ift_pidx;
3625 if (ctx->ifc_flags & IFC_PREFETCH) {
3626 next = (cidx + CACHE_PTR_INCREMENT) & (ntxd - 1);
3627 if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) {
3633 prefetch(&txq->ift_sds.ifsd_m[next]);
3634 prefetch(&txq->ift_sds.ifsd_map[next]);
3635 next = (cidx + CACHE_LINE_SIZE) & (ntxd - 1);
3637 map = txq->ift_sds.ifsd_map[pidx];
3638 ifsd_m = txq->ift_sds.ifsd_m;
3640 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3641 buf_tag = txq->ift_tso_buf_tag;
3642 max_segs = scctx->isc_tx_tso_segments_max;
3643 map = txq->ift_sds.ifsd_tso_map[pidx];
3647 buf_tag = txq->ift_buf_tag;
3648 max_segs = scctx->isc_tx_nsegments;
3649 map = txq->ift_sds.ifsd_map[pidx];
3651 if ((sctx->isc_flags & IFLIB_NEED_ETHER_PAD) &&
3652 __predict_false(m_head->m_pkthdr.len < scctx->isc_min_frame_size)) {
3653 err = iflib_ether_pad(ctx->ifc_dev, m_headp, scctx->isc_min_frame_size);
3662 pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG | M_BCAST | M_MCAST));
3664 pi.ipi_qsidx = txq->ift_id;
3665 pi.ipi_len = m_head->m_pkthdr.len;
3666 pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags;
3667 pi.ipi_vtag = M_HAS_VLANTAG(m_head) ? m_head->m_pkthdr.ether_vtag : 0;
3693 txq->ift_mbuf_defrag++;
3708 txq->ift_no_tx_dma_setup++;
3711 txq->ift_no_tx_dma_setup++;
3717 txq->ift_map_failed++;
3725 * descriptors - this does not hold true on all drivers, e.g.
3729 txq->ift_no_desc_avail++;
3733 if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0)
3734 GROUPTASK_ENQUEUE(&txq->ift_task);
3743 txq->ift_rs_pending += nsegs + 1;
3744 if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) ||
3745 iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs) <= MAX_TX_DESC(ctx) + 2) {
3747 txq->ift_rs_pending = 0;
3753 MPASS(pidx >= 0 && pidx < txq->ift_size);
3757 if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) {
3760 MPASS(pi.ipi_new_pidx < txq->ift_size);
3762 ndesc = pi.ipi_new_pidx - pi.ipi_pidx;
3764 ndesc += txq->ift_size;
3765 txq->ift_gen = 1;
3774 txq->ift_in_use += ndesc;
3775 txq->ift_db_pending += ndesc;
3781 txq->ift_pidx = pi.ipi_new_pidx;
3782 txq->ift_npending += pi.ipi_ndescs;
3786 txq->ift_txd_encap_efbig++;
3795 * err can't possibly be non-zero here, so we don't neet to test it
3801 txq->ift_mbuf_defrag_failed++;
3802 txq->ift_map_failed++;
3817 cidx = txq->ift_cidx;
3818 gen = txq->ift_gen;
3819 qsize = txq->ift_size;
3820 mask = qsize - 1;
3821 ifsd_m = txq->ift_sds.ifsd_m;
3822 do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH);
3824 while (n-- > 0) {
3831 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
3832 bus_dmamap_sync(txq->ift_tso_buf_tag,
3833 txq->ift_sds.ifsd_tso_map[cidx],
3835 bus_dmamap_unload(txq->ift_tso_buf_tag,
3836 txq->ift_sds.ifsd_tso_map[cidx]);
3838 bus_dmamap_sync(txq->ift_buf_tag,
3839 txq->ift_sds.ifsd_map[cidx],
3841 bus_dmamap_unload(txq->ift_buf_tag,
3842 txq->ift_sds.ifsd_map[cidx]);
3845 MPASS(m->m_nextpkt == NULL);
3849 txq->ift_dequeued++;
3858 txq->ift_cidx = cidx;
3859 txq->ift_gen = gen;
3866 if_ctx_t ctx = txq->ift_ctx;
3869 MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size);
3872 * Need a rate-limiting check so that this isn't called every time
3877 if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) {
3881 txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments,
3888 txq->ift_cleaned += reclaim;
3889 txq->ift_in_use -= reclaim;
3900 size = r->size;
3901 next = (cidx + CACHE_PTR_INCREMENT) & (size - 1);
3902 items = __DEVOLATILE(struct mbuf **, &r->items[0]);
3904 prefetch(items[(cidx + offset) & (size - 1)]);
3907 prefetch2cachelines(items[(cidx + offset + 1) & (size - 1)]);
3908 prefetch2cachelines(items[(cidx + offset + 2) & (size - 1)]);
3909 prefetch2cachelines(items[(cidx + offset + 3) & (size - 1)]);
3911 return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size - 1)]));
3918 ifmp_ring_check_drainage(txq->ift_br, budget);
3924 iflib_txq_t txq = r->cookie;
3925 if_ctx_t ctx = txq->ift_ctx;
3929 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
3931 return (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id,
3938 iflib_txq_t txq = r->cookie;
3939 if_ctx_t ctx = txq->ift_ctx;
3940 if_t ifp = ctx->ifc_ifp;
3952 rang = iflib_txd_db_check(txq, reclaimed && txq->ift_db_pending);
3953 avail = IDXDIFF(pidx, cidx, r->size);
3955 if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) {
3961 if (__predict_true(r->items[(cidx + i) & (r->size - 1)] != (void *)txq))
3962 m_freem(r->items[(cidx + i) & (r->size - 1)]);
3963 r->items[(cidx + i) & (r->size - 1)] = NULL;
3968 if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) {
3969 txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3971 callout_stop(&txq->ift_timer);
3981 txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3987 avail, ctx->ifc_flags, TXQ_AVAIL(txq));
3989 do_prefetch = (ctx->ifc_flags & IFC_PREFETCH);
3992 int rem = do_prefetch ? count - i : 0;
4010 /* no room - bail out */
4014 /* we can't send this packet - skip it */
4020 bytes_sent += m->m_pkthdr.len;
4021 mcast_sent += !!(m->m_flags & M_MCAST);
4029 /* deliberate use of bitwise or to avoid gratuitous short-circuit */
4056 txq = r->cookie;
4058 txq->ift_qstatus = IFLIB_QUEUE_IDLE;
4060 callout_stop(&txq->ift_timer);
4063 avail = IDXDIFF(pidx, cidx, r->size);
4065 mp = _ring_peek_one(r, cidx, i, avail - i);
4080 r = txq->ift_br;
4081 r->drain = iflib_txq_drain_free;
4082 r->can_drain = iflib_txq_drain_always;
4084 ifmp_ring_check_drainage(r, r->size);
4086 r->drain = iflib_txq_drain;
4087 r->can_drain = iflib_txq_can_drain;
4094 if_ctx_t ctx = txq->ift_ctx;
4095 if_t ifp = ctx->ifc_ifp;
4096 int abdicate = ctx->ifc_sysctl_tx_abdicate;
4099 txq->ift_cpu_exec_count[curcpu]++;
4105 netmap_tx_irq(ifp, txq->ift_id))
4112 if (txq->ift_db_pending)
4113 ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE, abdicate);
4115 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
4120 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
4124 if (ctx->ifc_flags & IFC_LEGACY)
4127 IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
4134 if_ctx_t ctx = rxq->ifr_ctx;
4143 rxq->ifr_cpu_exec_count[curcpu]++;
4146 if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
4149 nmirq = netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work);
4155 budget = ctx->ifc_sysctl_rx_budget;
4163 if (ctx->ifc_flags & IFC_LEGACY)
4166 IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
4169 if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
4173 GROUPTASK_ENQUEUE(&rxq->ifr_task);
4175 callout_reset_curcpu(&rxq->ifr_watchdog, 1, &_task_fn_rx_watchdog, rxq);
4182 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
4188 running = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING);
4189 oactive = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE);
4190 do_reset = (ctx->ifc_flags & IFC_DO_RESET);
4191 do_watchdog = (ctx->ifc_flags & IFC_DO_WATCHDOG);
4192 in_detach = (ctx->ifc_flags & IFC_IN_DETACH);
4193 ctx->ifc_flags &= ~(IFC_DO_RESET | IFC_DO_WATCHDOG);
4196 if ((!running && !oactive) && !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN))
4202 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
4204 callout_stop(&txq->ift_timer);
4207 if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_ADMINCQ)
4210 ctx->ifc_watchdog_events++;
4214 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
4215 callout_reset_on(&txq->ift_timer, iflib_timer_default, iflib_timer, txq,
4216 txq->ift_timer.c_cpu);
4225 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++)
4234 if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) &&
4235 !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN))
4251 ctx = info->iidi_ctx;
4252 info->iidi_req = req;
4253 info->iidi_oidp = oidp;
4297 MPASS(m->m_nextpkt == NULL);
4298 /* ALTQ-enabled interfaces always use queue 0. */
4300 /* Use driver-supplied queue selection method if it exists */
4301 if (ctx->isc_txq_select_v2) {
4309 ctx->ifc_txqs[0].ift_pullups += early_pullups;
4314 qidx = ctx->isc_txq_select_v2(ctx->ifc_softc, m, &pi);
4315 ctx->ifc_txqs[qidx].ift_pullups += early_pullups;
4318 else if (ctx->isc_txq_select)
4319 qidx = ctx->isc_txq_select(ctx->ifc_softc, m);
4325 txq = &ctx->ifc_txqs[qidx];
4328 if (txq->ift_closed) {
4330 next = m->m_nextpkt;
4331 m->m_nextpkt = NULL;
4345 next = next->m_nextpkt;
4358 next = next->m_nextpkt;
4359 mp[i]->m_nextpkt = NULL;
4363 abdicate = ctx->ifc_sysctl_tx_abdicate;
4365 err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE, abdicate);
4368 GROUPTASK_ENQUEUE(&txq->ift_task);
4371 GROUPTASK_ENQUEUE(&txq->ift_task);
4374 txq->ift_closed = TRUE;
4376 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
4390 * ALTQ-specific code required in iflib. It is assumed that the overhead of
4409 struct ifaltq *ifq = &ifp->if_snd; /* XXX - DRVAPI */
4427 IFQ_ENQUEUE(&ifp->if_snd, m, err); /* XXX - DRVAPI */
4441 iflib_txq_t txq = ctx->ifc_txqs;
4445 ctx->ifc_flags |= IFC_QFLUSH;
4448 while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br)))
4451 ctx->ifc_flags &= ~IFC_QFLUSH;
4480 if (ifa->ifa_addr->sa_family == AF_INET)
4484 if (ifa->ifa_addr->sa_family == AF_INET6)
4504 if (ifr->ifr_mtu == if_getmtu(ifp)) {
4512 if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) {
4514 if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size)
4515 ctx->ifc_flags |= IFC_MULTISEG;
4517 ctx->ifc_flags &= ~IFC_MULTISEG;
4519 err = if_setmtu(ifp, ifr->ifr_mtu);
4531 if ((if_getflags(ifp) ^ ctx->ifc_if_flags) &
4542 ctx->ifc_if_flags = if_getflags(ifp);
4562 err = ifmedia_ioctl(ifp, ifr, ctx->ifc_mediap, command);
4590 mask = ifr->ifr_reqcap ^ oldmask;
4591 mask &= ctx->ifc_softc_ctx.isc_capabilities | IFCAP_MEXTPG;
4604 setmask |= ctx->ifc_softc_ctx.isc_capabilities &
4618 ctx->ifc_softc_ctx.isc_capenable ^= setmask;
4679 /* Re-init to load the changes, if required */
4701 /* Re-init to load the changes, if required */
4731 if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
4739 if (sctx->isc_parse_devinfo != NULL)
4740 sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id);
4742 ent = sctx->isc_vendor_info;
4743 while (ent->pvi_vendor_id != 0) {
4744 if (pci_vendor_id != ent->pvi_vendor_id) {
4748 if ((pci_device_id == ent->pvi_device_id) &&
4749 ((pci_subvendor_id == ent->pvi_subvendor_id) ||
4750 (ent->pvi_subvendor_id == 0)) &&
4751 ((pci_subdevice_id == ent->pvi_subdevice_id) ||
4752 (ent->pvi_subdevice_id == 0)) &&
4753 ((pci_rev_id == ent->pvi_rev_id) ||
4754 (ent->pvi_rev_id == 0))) {
4755 device_set_desc_copy(dev, ent->pvi_name);
4757 * ever stops re-probing on best match because the sctx
4783 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
4784 if_shared_ctx_t sctx = ctx->ifc_sctx;
4785 device_t dev = ctx->ifc_dev;
4788 if (ctx->ifc_sysctl_ntxqs != 0)
4789 scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs;
4790 if (ctx->ifc_sysctl_nrxqs != 0)
4791 scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs;
4793 for (i = 0; i < sctx->isc_ntxqs; i++) {
4794 if (ctx->ifc_sysctl_ntxds[i] != 0)
4795 scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i];
4797 scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i];
4800 for (i = 0; i < sctx->isc_nrxqs; i++) {
4801 if (ctx->ifc_sysctl_nrxds[i] != 0)
4802 scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i];
4804 scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i];
4807 for (i = 0; i < sctx->isc_nrxqs; i++) {
4808 if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) {
4809 device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n",
4810 i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]);
4811 scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i];
4813 if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) {
4814 device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n",
4815 i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]);
4816 scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i];
4818 if (!powerof2(scctx->isc_nrxd[i])) {
4819 device_printf(dev, "nrxd%d: %d is not a power of 2 - using default value of %d\n",
4820 i, scctx->isc_nrxd[i], sctx->isc_nrxd_default[i]);
4821 scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i];
4825 for (i = 0; i < sctx->isc_ntxqs; i++) {
4826 if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) {
4827 device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n",
4828 i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]);
4829 scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i];
4831 if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) {
4832 device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n",
4833 i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]);
4834 scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i];
4836 if (!powerof2(scctx->isc_ntxd[i])) {
4837 device_printf(dev, "ntxd%d: %d is not a power of 2 - using default value of %d\n",
4838 i, scctx->isc_ntxd[i], sctx->isc_ntxd_default[i]);
4839 scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i];
4855 pa.pa_headname = if_name(ctx->ifc_ifp);
4858 for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
4859 rxq->pfil = pfil;
4870 rxq = ctx->ifc_rxqs;
4871 pfil = rxq->pfil;
4873 rxq->pfil = NULL;
4880 * Advance forward by n members of the cpuset ctx->ifc_cpus starting from
4890 MPASS(CPU_ISSET(cpuid, &ctx->ifc_cpus));
4893 MPASS(!CPU_EMPTY(&ctx->ifc_cpus));
4895 first_valid = CPU_FFS(&ctx->ifc_cpus) - 1;
4896 last_valid = CPU_FLS(&ctx->ifc_cpus) - 1;
4897 n = n % CPU_COUNT(&ctx->ifc_cpus);
4903 } while (!CPU_ISSET(cpuid, &ctx->ifc_cpus));
4904 n--;
4918 if (grp->cg_children == 0)
4919 return (-1);
4921 MPASS(grp->cg_child);
4922 for (i = 0; i < grp->cg_children; i++) {
4923 if (CPU_ISSET(cpu, &grp->cg_child[i].cg_mask))
4927 return (-1);
4932 * Find an L2 neighbor of the given CPU or return -1 if none found. This
4944 return (-1);
4950 while ((i = find_child_with_core(cpu, grp)) != -1) {
4956 if (grp->cg_child[i].cg_count <= 1)
4957 return (-1);
4958 grp = &grp->cg_child[i];
4962 if (grp->cg_level > CG_SHARE_L2 || grp->cg_level == CG_SHARE_NONE)
4963 return (-1);
4970 if (CPU_ISSET(i, &grp->cg_mask) && i != cpu)
4975 return (-1);
4983 return (-1);
4989 * ---------------------
4996 * ---------- --------- ------ ------------------------------------------------
4997 * - - X RX and TX queues mapped to consecutive physical
5000 * - X X RX and TX queues mapped to consecutive cores
5003 * X - X RX and TX queues mapped to consecutive physical
5009 * - n/a - RX and TX queues mapped to consecutive cores of
5012 * X n/a - RX and TX queues mapped to consecutive cores of
5019 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
5022 if (ctx->ifc_sysctl_separate_txrx) {
5032 if (ctx->ifc_sysctl_use_logical_cores &&
5033 ctx->ifc_cpus_are_physical_cores &&
5034 is_tx && qid < scctx->isc_nrxqsets) {
5040 if (l2_neighbor != -1) {
5045 * consecutive-after-RX assignment scheme.
5055 core_index = scctx->isc_nrxqsets + qid;
5068 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
5072 unsigned int base_cpuid = ctx->ifc_sysctl_core_offset;
5077 first_valid = CPU_FFS(&ctx->ifc_cpus) - 1;
5078 last_valid = CPU_FLS(&ctx->ifc_cpus) - 1;
5082 * Align the user-chosen base CPU ID to the next valid CPU
5087 * zero-based reference frame, and so we shift the given
5095 /* shift from zero-based to first_valid-based */
5098 base_cpuid = (base_cpuid - first_valid) %
5099 (last_valid - first_valid + 1);
5101 if (!CPU_ISSET(base_cpuid, &ctx->ifc_cpus)) {
5109 while (!CPU_ISSET(base_cpuid, &ctx->ifc_cpus))
5126 * neighbors of CPUs that RX queues have been mapped to - in this
5133 for (i = 0; i < scctx->isc_ntxqsets; i++)
5136 for (i = 0; i < scctx->isc_nrxqsets; i++)
5139 CPU_AND(&assigned_cpus, &assigned_cpus, &ctx->ifc_cpus);
5144 if (CPU_CMP(&ctx->ifc_cpus, &op->set) == 0) {
5145 base_cpuid = op->next_cpuid;
5146 op->next_cpuid = cpuid_advance(ctx, op->next_cpuid,
5148 MPASS(op->refcount < UINT_MAX);
5149 op->refcount++;
5158 device_printf(ctx->ifc_dev,
5161 op->next_cpuid = cpuid_advance(ctx, base_cpuid,
5163 op->refcount = 1;
5164 CPU_COPY(&ctx->ifc_cpus, &op->set);
5180 if (CPU_CMP(&ctx->ifc_cpus, &op->set) == 0) {
5181 MPASS(op->refcount > 0);
5182 op->refcount--;
5183 if (op->refcount == 0) {
5208 sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK | M_ZERO);
5210 ctx->ifc_flags |= IFC_SC_ALLOCATED;
5213 ctx->ifc_sctx = sctx;
5214 ctx->ifc_dev = dev;
5215 ctx->ifc_softc = sc;
5223 scctx = &ctx->ifc_softc_ctx;
5224 ifp = ctx->ifc_ifp;
5234 ctx->ifc_txrx = *scctx->isc_txrx;
5236 MPASS(scctx->isc_dma_width <= flsll(BUS_SPACE_MAXADDR));
5238 if (sctx->isc_flags & IFLIB_DRIVER_MEDIA)
5239 ctx->ifc_mediap = scctx->isc_media;
5242 if (scctx->isc_capabilities & IFCAP_TXCSUM)
5243 MPASS(scctx->isc_tx_csum_flags);
5247 scctx->isc_capabilities | IFCAP_HWSTATS | IFCAP_MEXTPG);
5249 scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_MEXTPG);
5251 if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets))
5252 scctx->isc_ntxqsets = scctx->isc_ntxqsets_max;
5253 if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets))
5254 scctx->isc_nrxqsets = scctx->isc_nrxqsets_max;
5259 /* XXX change for per-queue sizes */
5263 if (scctx->isc_tx_nsegments > num_txd / MAX_SINGLE_PACKET_FRACTION)
5264 scctx->isc_tx_nsegments = max(1, num_txd /
5266 if (scctx->isc_tx_tso_segments_max > num_txd /
5268 scctx->isc_tx_tso_segments_max = max(1,
5271 /* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */
5277 if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max,
5287 if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3);
5288 if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max);
5290 if (scctx->isc_rss_table_size == 0)
5291 scctx->isc_rss_table_size = 64;
5292 scctx->isc_rss_table_mask = scctx->isc_rss_table_size - 1;
5296 ctx->ifc_tq = taskqueue_create_fast(namebuf, M_NOWAIT,
5297 taskqueue_thread_enqueue, &ctx->ifc_tq);
5298 if (ctx->ifc_tq == NULL) {
5303 err = taskqueue_start_threads(&ctx->ifc_tq, 1, PI_NET, "%s", namebuf);
5308 taskqueue_free(ctx->ifc_tq);
5312 TASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
5315 if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) {
5317 CPU_COPY(&all_cpus, &ctx->ifc_cpus);
5318 ctx->ifc_cpus_are_physical_cores = false;
5320 ctx->ifc_cpus_are_physical_cores = true;
5321 MPASS(CPU_COUNT(&ctx->ifc_cpus) > 0);
5324 * Now set up MSI or MSI-X, should return us the number of supported
5327 if (sctx->isc_flags & IFLIB_SKIP_MSIX) {
5328 msix = scctx->isc_vectors;
5329 } else if (scctx->isc_msix_bar != 0)
5336 scctx->isc_vectors = 1;
5337 scctx->isc_ntxqsets = 1;
5338 scctx->isc_nrxqsets = 1;
5339 scctx->isc_intr = IFLIB_INTR_LEGACY;
5354 ctx->ifc_sysctl_core_offset = get_ctx_core_offset(ctx);
5358 * When using MSI-X, ensure that ifdi_{r,t}x_queue_intr_enable
5362 kobj_method = kobj_lookup_method(((kobj_t)ctx)->ops->cls, NULL,
5364 if (kobj_method == &kobj_desc->deflt) {
5366 "MSI-X requires ifdi_rx_queue_intr_enable method");
5371 kobj_method = kobj_lookup_method(((kobj_t)ctx)->ops->cls, NULL,
5373 if (kobj_method == &kobj_desc->deflt) {
5375 "MSI-X requires ifdi_tx_queue_intr_enable method");
5381 * Assign the MSI-X vectors.
5391 } else if (scctx->isc_intr != IFLIB_INTR_MSIX) {
5393 if (scctx->isc_intr == IFLIB_INTR_MSI) {
5397 if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) {
5403 "Cannot use iflib with only 1 MSI-X interrupt!\n");
5409 * It prevents a double-locking panic with iflib_media_status when
5413 ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac.octet);
5430 device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err);
5435 DEBUGNET_SET(ctx->ifc_ifp, iflib);
5437 if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
5440 ctx->ifc_flags |= IFC_INIT_DONE;
5447 ether_ifdetach(ctx->ifc_ifp);
5449 taskqueue_free(ctx->ifc_tq);
5462 device_set_softc(ctx->ifc_dev, NULL);
5463 if (ctx->ifc_flags & IFC_SC_ALLOCATED)
5464 free(ctx->ifc_softc, M_IFLIB);
5475 if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
5486 if_t ifp = ctx->ifc_ifp;
5487 device_t dev = ctx->ifc_dev;
5496 device_printf(dev, "SR-IOV in use; detach first.\n");
5502 ctx->ifc_flags |= IFC_IN_DETACH;
5516 if (ctx->ifc_led_dev != NULL)
5517 led_destroy(ctx->ifc_led_dev);
5528 taskqueue_free(ctx->ifc_tq);
5529 ctx->ifc_tq = NULL;
5531 /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
5538 device_set_softc(ctx->ifc_dev, NULL);
5539 if (ctx->ifc_flags & IFC_SC_ALLOCATED)
5540 free(ctx->ifc_softc, M_IFLIB);
5556 for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
5557 callout_drain(&txq->ift_timer);
5559 callout_drain(&txq->ift_netmap_timer);
5561 if (txq->ift_task.gt_uniq != NULL)
5562 taskqgroup_detach(tqg, &txq->ift_task);
5564 for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
5565 if (rxq->ifr_task.gt_uniq != NULL)
5566 taskqgroup_detach(tqg, &rxq->ifr_task);
5574 if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) {
5575 iflib_irq_free(ctx, &ctx->ifc_legacy_irq);
5577 if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) {
5578 pci_release_msi(ctx->ifc_dev);
5580 if (ctx->ifc_msix_mem != NULL) {
5581 bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY,
5582 rman_get_rid(ctx->ifc_msix_mem), ctx->ifc_msix_mem);
5583 ctx->ifc_msix_mem = NULL;
5622 iflib_txq_t txq = ctx->ifc_txqs;
5677 * - Start a fast taskqueue thread for each core
5678 * - Start a taskqueue for control operations
5718 MPASS(sctx->isc_tx_maxsize);
5719 MPASS(sctx->isc_tx_maxsegsize);
5721 MPASS(sctx->isc_rx_maxsize);
5722 MPASS(sctx->isc_rx_nsegments);
5723 MPASS(sctx->isc_rx_maxsegsize);
5725 MPASS(sctx->isc_nrxqs >= 1 && sctx->isc_nrxqs <= 8);
5726 for (i = 0; i < sctx->isc_nrxqs; i++) {
5727 MPASS(sctx->isc_nrxd_min[i]);
5728 MPASS(powerof2(sctx->isc_nrxd_min[i]));
5729 MPASS(sctx->isc_nrxd_max[i]);
5730 MPASS(powerof2(sctx->isc_nrxd_max[i]));
5731 MPASS(sctx->isc_nrxd_default[i]);
5732 MPASS(powerof2(sctx->isc_nrxd_default[i]));
5735 MPASS(sctx->isc_ntxqs >= 1 && sctx->isc_ntxqs <= 8);
5736 for (i = 0; i < sctx->isc_ntxqs; i++) {
5737 MPASS(sctx->isc_ntxd_min[i]);
5738 MPASS(powerof2(sctx->isc_ntxd_min[i]));
5739 MPASS(sctx->isc_ntxd_max[i]);
5740 MPASS(powerof2(sctx->isc_ntxd_max[i]));
5741 MPASS(sctx->isc_ntxd_default[i]);
5742 MPASS(powerof2(sctx->isc_ntxd_default[i]));
5750 MPASS(scctx->isc_txrx->ift_txd_encap);
5751 MPASS(scctx->isc_txrx->ift_txd_flush);
5752 MPASS(scctx->isc_txrx->ift_txd_credits_update);
5753 MPASS(scctx->isc_txrx->ift_rxd_available);
5754 MPASS(scctx->isc_txrx->ift_rxd_pkt_get);
5755 MPASS(scctx->isc_txrx->ift_rxd_refill);
5756 MPASS(scctx->isc_txrx->ift_rxd_flush);
5762 if_shared_ctx_t sctx = ctx->ifc_sctx;
5763 driver_t *driver = sctx->isc_driver;
5764 device_t dev = ctx->ifc_dev;
5770 STATE_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev));
5771 ifp = ctx->ifc_ifp = if_alloc_dev(IFT_ETHER, dev);
5793 ctx->ifc_vlan_attach_event =
5796 ctx->ifc_vlan_detach_event =
5800 if ((sctx->isc_flags & IFLIB_DRIVER_MEDIA) == 0) {
5801 ctx->ifc_mediap = &ctx->ifc_media;
5802 ifmedia_init(ctx->ifc_mediap, IFM_IMASK,
5812 if (ctx->ifc_vlan_attach_event != NULL) {
5813 EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event);
5814 ctx->ifc_vlan_attach_event = NULL;
5816 if (ctx->ifc_vlan_detach_event != NULL) {
5817 EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event);
5818 ctx->ifc_vlan_detach_event = NULL;
5826 if_t ifp = ctx->ifc_ifp;
5829 ifmedia_removeall(&ctx->ifc_media);
5842 /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
5849 if_shared_ctx_t sctx = ctx->ifc_sctx;
5850 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
5851 device_t dev = ctx->ifc_dev;
5852 int nrxqsets = scctx->isc_nrxqsets;
5853 int ntxqsets = scctx->isc_ntxqsets;
5859 uint32_t *rxqsizes = scctx->isc_rxqsizes;
5860 uint32_t *txqsizes = scctx->isc_txqsizes;
5861 uint8_t nrxqs = sctx->isc_nrxqs;
5862 uint8_t ntxqs = sctx->isc_ntxqs;
5863 int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1;
5864 int fl_offset = (sctx->isc_flags & IFLIB_HAS_RXCQ ? 1 : 0);
5874 if (!(ctx->ifc_txqs =
5883 if (!(ctx->ifc_rxqs =
5891 txq = ctx->ifc_txqs;
5892 rxq = ctx->ifc_rxqs;
5907 txq->ift_ifdi = ifdip;
5915 txq->ift_txd_size[j] = scctx->isc_txd_size[j];
5916 bzero((void *)ifdip->idi_vaddr, txqsizes[j]);
5918 txq->ift_ctx = ctx;
5919 txq->ift_id = i;
5920 if (sctx->isc_flags & IFLIB_HAS_TXCQ) {
5921 txq->ift_br_offset = 1;
5923 txq->ift_br_offset = 0;
5933 snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:TX(%d):callout",
5934 device_get_nameunit(dev), txq->ift_id);
5935 mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF);
5936 callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0);
5937 txq->ift_timer.c_cpu = cpu;
5939 callout_init_mtx(&txq->ift_netmap_timer, &txq->ift_mtx, 0);
5940 txq->ift_netmap_timer.c_cpu = cpu;
5943 err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain,
5954 callout_init(&rxq->ifr_watchdog, 1);
5964 rxq->ifr_ifdi = ifdip;
5966 rxq->ifr_ntxqirq = 1;
5967 rxq->ifr_txqid[0] = i;
5975 bzero((void *)ifdip->idi_vaddr, rxqsizes[j]);
5977 rxq->ifr_ctx = ctx;
5978 rxq->ifr_id = i;
5979 rxq->ifr_fl_offset = fl_offset;
5980 rxq->ifr_nfl = nfree_lists;
5987 rxq->ifr_fl = fl;
5991 fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset];
5992 fl[j].ifl_rxd_size = scctx->isc_rxd_size[j];
6002 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
6003 fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB,
6011 iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi;
6014 vaddrs[i * ntxqs + j] = di->idi_vaddr;
6015 paddrs[i * ntxqs + j] = di->idi_paddr;
6019 device_printf(ctx->ifc_dev,
6033 iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi;
6036 vaddrs[i * nrxqs + j] = di->idi_vaddr;
6037 paddrs[i * nrxqs + j] = di->idi_paddr;
6041 device_printf(ctx->ifc_dev,
6057 if (ctx->ifc_rxqs != NULL)
6058 free(ctx->ifc_rxqs, M_IFLIB);
6059 ctx->ifc_rxqs = NULL;
6060 if (ctx->ifc_txqs != NULL)
6061 free(ctx->ifc_txqs, M_IFLIB);
6062 ctx->ifc_txqs = NULL;
6070 iflib_txq_t txq = ctx->ifc_txqs;
6082 iflib_txq_t txq = ctx->ifc_txqs;
6083 if_shared_ctx_t sctx = ctx->ifc_sctx;
6087 for (j = 0; j < sctx->isc_ntxqs; j++)
6088 iflib_dma_free(&txq->ift_ifdi[j]);
6091 free(ctx->ifc_txqs, M_IFLIB);
6092 ctx->ifc_txqs = NULL;
6103 iflib_rxq_t rxq = ctx->ifc_rxqs;
6109 for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) {
6111 err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp,
6113 ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset]));
6115 device_printf(ctx->ifc_dev,
6120 IFDI_RXQ_SETUP(ctx, rxq->ifr_id);
6130 rxq = ctx->ifc_rxqs;
6132 tcp_lro_free(&rxq->ifr_lc);
6146 iflib_rxq_t rxq = ctx->ifc_rxqs;
6147 if_shared_ctx_t sctx = ctx->ifc_sctx;
6150 for (i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) {
6151 for (j = 0; j < sctx->isc_nrxqs; j++)
6152 iflib_dma_free(&rxq->ifr_ifdi[j]);
6155 tcp_lro_free(&rxq->ifr_lc);
6158 free(ctx->ifc_rxqs, M_IFLIB);
6159 ctx->ifc_rxqs = NULL;
6172 device_printf(ctx->ifc_dev, "iflib_tx_structures_setup failed: %d\n", err);
6177 device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err);
6200 dev = ctx->ifc_dev;
6201 base_cpuid = ctx->ifc_sysctl_core_offset;
6204 irq ? irq->ii_res : NULL, name);
6210 if (cpuid > ctx->ifc_cpuid_highest)
6211 ctx->ifc_cpuid_highest = cpuid;
6245 dev = ctx->ifc_dev;
6246 subdev = subctx->ifc_dev;
6250 q = &subctx->ifc_rxqs[qid];
6251 info = &subctx->ifc_rxqs[qid].ifr_filter_info;
6252 gtask = &subctx->ifc_rxqs[qid].ifr_task;
6264 info->ifi_filter = filter;
6265 info->ifi_filter_arg = filter_arg;
6266 info->ifi_task = gtask;
6267 info->ifi_ctx = q;
6279 if (tqrid != -1) {
6285 taskqgroup_attach(tqg, gtask, q, dev, irq->ii_res, name);
6305 info = &ctx->ifc_filter_info;
6311 q = &ctx->ifc_txqs[qid];
6312 info = &ctx->ifc_txqs[qid].ift_filter_info;
6313 gtask = &ctx->ifc_txqs[qid].ift_task;
6318 ctx->ifc_flags |= IFC_NETMAP_TX_IRQ;
6321 q = &ctx->ifc_rxqs[qid];
6322 info = &ctx->ifc_rxqs[qid].ifr_filter_info;
6323 gtask = &ctx->ifc_rxqs[qid].ifr_task;
6330 q = &ctx->ifc_rxqs[qid];
6331 info = &ctx->ifc_rxqs[qid].ifr_filter_info;
6332 gtask = &ctx->ifc_rxqs[qid].ifr_task;
6340 tqrid = -1;
6341 info = &ctx->ifc_filter_info;
6346 device_printf(ctx->ifc_dev, "%s: unknown net intr type\n",
6351 info->ifi_filter = filter;
6352 info->ifi_filter_arg = filter_arg;
6353 info->ifi_task = gtask;
6354 info->ifi_ctx = q;
6356 dev = ctx->ifc_dev;
6365 if (tqrid != -1) {
6371 taskqgroup_attach(tqg, gtask, q, dev, irq->ii_res, name);
6390 q = &ctx->ifc_txqs[qid];
6391 gtask = &ctx->ifc_txqs[qid].ift_task;
6397 q = &ctx->ifc_rxqs[qid];
6398 gtask = &ctx->ifc_rxqs[qid].ifr_task;
6404 TASK_INIT(&ctx->ifc_vflr_task, 0, _task_fn_iov, ctx);
6411 dev = ctx->ifc_dev;
6412 taskqgroup_attach(tqg, gtask, q, dev, irq ? irq->ii_res : NULL,
6421 if (irq->ii_tag)
6422 bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag);
6424 if (irq->ii_res)
6425 bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ,
6426 rman_get_rid(irq->ii_res), irq->ii_res);
6432 iflib_txq_t txq = ctx->ifc_txqs;
6433 iflib_rxq_t rxq = ctx->ifc_rxqs;
6434 if_irq_t irq = &ctx->ifc_legacy_irq;
6442 info = &rxq->ifr_filter_info;
6443 gtask = &rxq->ifr_task;
6445 rx_only = (ctx->ifc_sctx->isc_flags & IFLIB_SINGLE_IRQ_RX_ONLY) != 0;
6447 ctx->ifc_flags |= IFC_LEGACY;
6448 info->ifi_filter = filter;
6449 info->ifi_filter_arg = filter_arg;
6450 info->ifi_task = gtask;
6451 info->ifi_ctx = rxq;
6453 dev = ctx->ifc_dev;
6460 res = irq->ii_res;
6463 GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq);
6464 taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, dev, res,
6473 ctx->ifc_led_dev = led_create(iflib_led_func, ctx,
6474 device_get_nameunit(ctx->ifc_dev));
6481 GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task);
6488 GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task);
6495 taskqueue_enqueue(ctx->ifc_tq, &ctx->ifc_admin_task);
6502 taskqueue_enqueue(ctx->ifc_tq, &ctx->ifc_vflr_task);
6522 taskqueue_enqueue(ctx->ifc_tq, config_task);
6528 if_t ifp = ctx->ifc_ifp;
6529 iflib_txq_t txq = ctx->ifc_txqs;
6534 ctx->ifc_flags |= IFC_PREFETCH;
6538 if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) {
6539 for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++)
6540 txq->ift_qstatus = IFLIB_QUEUE_IDLE;
6542 ctx->ifc_link_state = link_state;
6551 int credits_pre = txq->ift_cidx_processed;
6554 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
6556 if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0)
6559 txq->ift_processed += credits;
6560 txq->ift_cidx_processed += credits;
6562 MPASS(credits_pre + credits == txq->ift_cidx_processed);
6563 if (txq->ift_cidx_processed >= txq->ift_size)
6564 txq->ift_cidx_processed -= txq->ift_size;
6574 for (i = 0, fl = &rxq->ifr_fl[0]; i < rxq->ifr_nfl; i++, fl++)
6575 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
6577 return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx,
6586 info->iidi_ctx = ctx;
6587 info->iidi_offset = offset;
6588 info->iidi_value = value;
6589 SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev),
6590 SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)),
6599 return (&ctx->ifc_ctx_sx);
6605 device_t dev = ctx->ifc_dev;
6606 if_shared_ctx_t sctx = ctx->ifc_sctx;
6607 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
6611 iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs;
6612 iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs;
6616 imax(scctx->isc_ntxqsets, scctx->isc_nrxqsets));
6619 if (scctx->isc_disable_msix)
6622 /* First try MSI-X */
6625 device_printf(dev, "MSI-X not supported or disabled\n");
6629 bar = ctx->ifc_softc_ctx.isc_msix_bar;
6631 * bar == -1 => "trust me I know what I'm doing"
6635 * allows shoddy garbage to use MSI-X in this framework.
6637 if (bar != -1) {
6638 ctx->ifc_msix_mem = bus_alloc_resource_any(dev,
6640 if (ctx->ifc_msix_mem == NULL) {
6641 device_printf(dev, "Unable to map MSI-X table\n");
6646 admincnt = sctx->isc_admin_intrcnt;
6649 queuemsgs = min(msgs - admincnt, 1);
6651 queuemsgs = msgs - admincnt;
6658 queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues);
6662 CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt);
6668 if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt)
6673 if (rx_queues > scctx->isc_nrxqsets)
6674 rx_queues = scctx->isc_nrxqsets;
6684 if (tx_queues > scctx->isc_ntxqsets)
6685 tx_queues = scctx->isc_ntxqsets;
6687 if (ctx->ifc_sysctl_qs_eq_override == 0) {
6701 "insufficient number of MSI-X vectors "
6712 "Unable to allocate sufficient MSI-X vectors "
6715 if (bar != -1) {
6717 ctx->ifc_msix_mem);
6718 ctx->ifc_msix_mem = NULL;
6722 device_printf(dev, "Using MSI-X interrupts with %d vectors\n",
6724 scctx->isc_vectors = vectors;
6725 scctx->isc_nrxqsets = rx_queues;
6726 scctx->isc_ntxqsets = tx_queues;
6727 scctx->isc_intr = IFLIB_INTR_MSIX;
6732 "failed to allocate %d MSI-X vectors, err: %d\n", vectors,
6734 if (bar != -1) {
6736 ctx->ifc_msix_mem);
6737 ctx->ifc_msix_mem = NULL;
6743 scctx->isc_nrxqsets = 1;
6744 scctx->isc_ntxqsets = 1;
6745 scctx->isc_vectors = vectors;
6748 scctx->isc_intr = IFLIB_INTR_MSI;
6750 scctx->isc_vectors = 1;
6752 scctx->isc_intr = IFLIB_INTR_LEGACY;
6764 uint16_t *state = ((uint16_t *)oidp->oid_arg1);
6805 ndesc = ctx->ifc_sysctl_ntxds;
6806 if (ctx->ifc_sctx)
6807 nqs = ctx->ifc_sctx->isc_ntxqs;
6810 ndesc = ctx->ifc_sysctl_nrxds;
6811 if (ctx->ifc_sctx)
6812 nqs = ctx->ifc_sctx->isc_nrxqs;
6830 if (rc || req->newptr == NULL)
6852 ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child,
6858 CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, "driver version");
6861 CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0,
6864 CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0,
6867 CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0,
6870 CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0,
6871 "disable MSI-X (default 0)");
6873 CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0, "set the RX budget");
6875 CTLFLAG_RWTUN, &ctx->ifc_sysctl_tx_abdicate, 0,
6877 ctx->ifc_sysctl_core_offset = CORE_OFFSET_UNSPECIFIED;
6879 CTLFLAG_RDTUN, &ctx->ifc_sysctl_core_offset, 0,
6882 CTLFLAG_RDTUN, &ctx->ifc_sysctl_separate_txrx, 0,
6885 CTLFLAG_RDTUN, &ctx->ifc_sysctl_use_logical_cores, 0,
6888 CTLFLAG_RDTUN, &ctx->ifc_sysctl_extra_msix_vectors, 0,
6889 "attempt to reserve the given number of extra MSI-X vectors during driver load for the creation of additional interfaces later");
6891 CTLFLAG_RDTUN, &ctx->ifc_softc_ctx.isc_vectors, 0,
6892 "total # of MSI-X vectors allocated by driver");
6894 /* XXX change for per-queue sizes */
6908 if_shared_ctx_t sctx = ctx->ifc_sctx;
6909 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
6923 node = ctx->ifc_sysctl_node;
6926 if (scctx->isc_ntxqsets > 100)
6928 else if (scctx->isc_ntxqsets > 10)
6932 for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) {
6938 CTLFLAG_RD, &txq->ift_task.gt_cpu, 0,
6942 CTLFLAG_RD, &txq->ift_dequeued, "total mbufs freed");
6944 CTLFLAG_RD, &txq->ift_enqueued, "total mbufs enqueued");
6947 CTLFLAG_RD, &txq->ift_mbuf_defrag,
6950 CTLFLAG_RD, &txq->ift_pullups,
6954 &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed");
6956 "no_desc_avail", CTLFLAG_RD, &txq->ift_no_desc_avail,
6959 "tx_map_failed", CTLFLAG_RD, &txq->ift_map_failed,
6962 "txd_encap_efbig", CTLFLAG_RD, &txq->ift_txd_encap_efbig,
6965 "no_tx_dma_setup", CTLFLAG_RD, &txq->ift_no_tx_dma_setup,
6968 CTLFLAG_RD, &txq->ift_pidx, 1, "Producer Index");
6970 CTLFLAG_RD, &txq->ift_cidx, 1, "Consumer Index");
6972 "txq_cidx_processed", CTLFLAG_RD, &txq->ift_cidx_processed,
6975 CTLFLAG_RD, &txq->ift_in_use, 1, "descriptors in use");
6977 "txq_processed", CTLFLAG_RD, &txq->ift_processed,
6980 CTLFLAG_RD, &txq->ift_cleaned, "total cleaned");
6983 __DEVOLATILE(uint64_t *, &txq->ift_br->state), 0,
6986 "r_enqueues", CTLFLAG_RD, &txq->ift_br->enqueues,
6989 "r_drops", CTLFLAG_RD, &txq->ift_br->drops,
6992 "r_starts", CTLFLAG_RD, &txq->ift_br->starts,
6995 "r_stalls", CTLFLAG_RD, &txq->ift_br->stalls,
6998 "r_restarts", CTLFLAG_RD, &txq->ift_br->restarts,
7001 "r_abdications", CTLFLAG_RD, &txq->ift_br->abdications,
7005 if (scctx->isc_nrxqsets > 100)
7007 else if (scctx->isc_nrxqsets > 10)
7011 for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) {
7017 CTLFLAG_RD, &rxq->ifr_task.gt_cpu, 0,
7019 if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
7021 "rxq_cq_cidx", CTLFLAG_RD, &rxq->ifr_cq_cidx, 1,
7025 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
7032 CTLFLAG_RD, &fl->ifl_pidx, 1, "Producer Index");
7034 CTLFLAG_RD, &fl->ifl_cidx, 1, "Consumer Index");
7036 CTLFLAG_RD, &fl->ifl_credits, 1,
7039 CTLFLAG_RD, &fl->ifl_buf_size, 1, "buffer size");
7042 "fl_m_enqueued", CTLFLAG_RD, &fl->ifl_m_enqueued,
7045 "fl_m_dequeued", CTLFLAG_RD, &fl->ifl_m_dequeued,
7048 "fl_cl_enqueued", CTLFLAG_RD, &fl->ifl_cl_enqueued,
7051 "fl_cl_dequeued", CTLFLAG_RD, &fl->ifl_cl_dequeued,
7064 ctx->ifc_flags |= IFC_DO_RESET;
7074 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
7075 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
7076 m->m_data += ETHER_HDR_LEN;
7084 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
7085 m->m_data += ETHER_HDR_LEN;
7086 m->m_len -= ETHER_HDR_LEN;
7087 n->m_len = ETHER_HDR_LEN;
7089 n->m_next = m;
7104 *ncl = ctx->ifc_rxqs[0].ifr_fl->ifl_size;
7105 *clsize = ctx->ifc_rxqs[0].ifr_fl->ifl_buf_size;
7119 scctx = &ctx->ifc_softc_ctx;
7123 for (i = 0; i < scctx->isc_nrxqsets; i++) {
7124 rxq = &ctx->ifc_rxqs[i];
7125 for (j = 0; j < rxq->ifr_nfl; j++) {
7126 fl = rxq->ifr_fl;
7127 fl->ifl_zone = m_getzone(fl->ifl_buf_size);
7149 txq = &ctx->ifc_txqs[0];
7166 scctx = &ctx->ifc_softc_ctx;
7172 txq = &ctx->ifc_txqs[0];
7176 for (i = 0; i < scctx->isc_nrxqsets; i++)
7177 (void)iflib_rxeof(&ctx->ifc_rxqs[i], 16 /* XXX */);