Lines Matching +full:tx +full:- +full:frame +full:- +full:sync +full:- +full:delay +full:- +full:bits
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
5 * Copyright (c) 2001-2003 Thomas Moestl
42 #if 0 /* XXX: In case of emergency, re-enable this. */
151 device_printf(sc->sc_dev, "flags=0x%x\n", sc->sc_flags);
154 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
155 sc->sc_csum_features = GEM_CSUM_FEATURES;
157 if_initname(ifp, device_get_name(sc->sc_dev),
158 device_get_unit(sc->sc_dev));
166 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
168 callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0);
174 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
177 NULL, &sc->sc_pdmatag);
181 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
183 1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag);
187 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
190 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag);
194 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0,
198 NULL, NULL, &sc->sc_cdmatag);
206 if ((error = bus_dmamem_alloc(sc->sc_cdmatag,
207 (void **)&sc->sc_control_data,
209 &sc->sc_cddmamap)) != 0) {
210 device_printf(sc->sc_dev,
215 sc->sc_cddma = 0;
216 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap,
217 sc->sc_control_data, sizeof(struct gem_control_data),
218 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) {
219 device_printf(sc->sc_dev,
228 STAILQ_INIT(&sc->sc_txfreeq);
229 STAILQ_INIT(&sc->sc_txdirtyq);
236 txs = &sc->sc_txsoft[i];
237 txs->txs_mbuf = NULL;
238 txs->txs_ndescs = 0;
239 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0,
240 &txs->txs_dmamap)) != 0) {
241 device_printf(sc->sc_dev,
242 "unable to create TX DMA map %d, error = %d\n",
246 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
253 if ((error = bus_dmamap_create(sc->sc_rdmatag, 0,
254 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
255 device_printf(sc->sc_dev,
260 sc->sc_rxsoft[i].rxs_mbuf = NULL;
264 if ((sc->sc_flags & GEM_SERDES) != 0)
283 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
299 switch (sc->sc_variant) {
310 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
318 if (error != 0 && sc->sc_variant == GEM_SUN_GEM) {
331 sc->sc_flags |= GEM_SERDES;
332 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
337 device_printf(sc->sc_dev, "attaching PHYs failed\n");
340 sc->sc_mii = device_get_softc(sc->sc_miibus);
349 sc->sc_rxfifosize = 64 *
352 /* Get TX FIFO size. */
354 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n",
355 sc->sc_rxfifosize / 1024, v / 16);
358 ether_ifattach(ifp, sc->sc_enaddr);
365 if_sethwassistbits(ifp, sc->sc_csum_features, 0);
376 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
377 bus_dmamap_destroy(sc->sc_rdmatag,
378 sc->sc_rxsoft[i].rxs_dmamap);
381 if (sc->sc_txsoft[i].txs_dmamap != NULL)
382 bus_dmamap_destroy(sc->sc_tdmatag,
383 sc->sc_txsoft[i].txs_dmamap);
384 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
386 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
387 sc->sc_cddmamap);
389 bus_dma_tag_destroy(sc->sc_cdmatag);
391 bus_dma_tag_destroy(sc->sc_tdmatag);
393 bus_dma_tag_destroy(sc->sc_rdmatag);
395 bus_dma_tag_destroy(sc->sc_pdmatag);
404 if_t ifp = sc->sc_ifp;
411 callout_drain(&sc->sc_tick_ch);
413 callout_drain(&sc->sc_rx_ch);
416 bus_generic_detach(sc->sc_dev);
419 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
420 bus_dmamap_destroy(sc->sc_rdmatag,
421 sc->sc_rxsoft[i].rxs_dmamap);
423 if (sc->sc_txsoft[i].txs_dmamap != NULL)
424 bus_dmamap_destroy(sc->sc_tdmatag,
425 sc->sc_txsoft[i].txs_dmamap);
427 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
428 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
429 sc->sc_cddmamap);
430 bus_dma_tag_destroy(sc->sc_cdmatag);
431 bus_dma_tag_destroy(sc->sc_tdmatag);
432 bus_dma_tag_destroy(sc->sc_rdmatag);
433 bus_dma_tag_destroy(sc->sc_pdmatag);
439 if_t ifp = sc->sc_ifp;
449 if_t ifp = sc->sc_ifp;
454 * after power-on.
456 sc->sc_flags &= ~GEM_INITED;
473 pktlen = m->m_pkthdr.len;
477 if (eh->ether_type != htons(ETHERTYPE_IP))
480 if (ip->ip_v != IPVERSION)
483 hlen = ip->ip_hl << 2;
484 pktlen -= sizeof(struct ether_header);
487 if (ntohs(ip->ip_len) < hlen)
489 if (ntohs(ip->ip_len) != pktlen)
491 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
494 switch (ip->ip_p) {
503 if (uh->uh_sum == 0)
512 len = hlen - sizeof(struct ip);
515 for (; len > 0; len -= sizeof(uint16_t), opts++) {
516 temp32 = cksum - *opts;
521 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
522 m->m_pkthdr.csum_data = cksum;
534 sc->sc_cddma = segs[0].ds_addr;
541 if_t ifp = sc->sc_ifp;
574 mii_tick(sc->sc_mii);
579 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
588 for (i = GEM_TRIES; i--; DELAY(100)) {
601 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
611 device_printf(sc->sc_dev, "cannot reset device\n");
621 rxs = &sc->sc_rxsoft[i];
622 if (rxs->rxs_mbuf != NULL) {
623 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
625 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
626 m_freem(rxs->rxs_mbuf);
627 rxs->rxs_mbuf = NULL;
639 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
642 callout_stop(&sc->sc_tick_ch);
644 callout_stop(&sc->sc_rx_ch);
653 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
654 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
655 if (txs->txs_ndescs != 0) {
656 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
658 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
659 if (txs->txs_mbuf != NULL) {
660 m_freem(txs->txs_mbuf);
661 txs->txs_mbuf = NULL;
664 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
674 sc->sc_flags &= ~GEM_LINK;
675 sc->sc_wdog_timer = 0;
691 device_printf(sc->sc_dev, "cannot disable RX DMA\n");
694 DELAY(5000);
701 device_printf(sc->sc_dev, "cannot reset receiver\n");
710 device_printf(sc->sc_dev, "cannot reset RX MAC\n");
730 if_setdrvflagbits(sc->sc_ifp, 0, IFF_DRV_RUNNING);
734 if (sc->sc_rxsoft[i].rxs_mbuf != NULL)
736 sc->sc_rxptr = 0;
739 /* NOTE: we use only 32-bit DMA addresses here. */
742 GEM_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4);
750 ((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) <<
753 (3 * sc->sc_rxfifosize / 256) |
754 ((sc->sc_rxfifosize / 256) << 12));
779 device_printf(sc->sc_dev, "cannot disable TX DMA\n");
782 DELAY(5000);
789 device_printf(sc->sc_dev, "cannot reset transmitter\n");
805 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
819 device_printf(sc->sc_dev, "cannot disable TX MAC\n");
835 sc->sc_txdescs[i].gd_flags = 0;
836 sc->sc_txdescs[i].gd_addr = 0;
838 sc->sc_txfree = GEM_MAXTXFREE;
839 sc->sc_txnext = 0;
840 sc->sc_txwin = 0;
847 rxs = &sc->sc_rxsoft[i];
848 if (rxs->rxs_mbuf == NULL) {
850 device_printf(sc->sc_dev,
863 sc->sc_rxptr = 0;
916 if_t ifp = sc->sc_ifp;
925 CTR2(KTR_GEM, "%s: %s: calling stop", device_get_name(sc->sc_dev),
932 * See also the STP2002-STQ document from Sun Microsystems.
939 CTR2(KTR_GEM, "%s: %s: restarting", device_get_name(sc->sc_dev),
943 if ((sc->sc_flags & GEM_SERDES) == 0)
944 /* Re-initialize the MIF. */
951 /* step 4. TX MAC registers & counters */
957 /* NOTE: we use only 32-bit DMA addresses here. */
964 CTR3(KTR_GEM, "loading RX ring %lx, TX ring %lx, cddma %lx",
965 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma);
1007 /* Set TX FIFO threshold and enable DMA. */
1025 ((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) <<
1033 (3 * sc->sc_rxfifosize / 256) |
1034 ((sc->sc_rxfifosize / 256) << 12));
1042 sc->sc_mac_rxcfg = v;
1058 GEM_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4);
1063 mii_mediachg(sc->sc_mii);
1066 sc->sc_wdog_timer = 0;
1067 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
1083 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
1089 if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) {
1104 offset += (ip->ip_hl << 2);
1106 ((offset + m->m_pkthdr.csum_data) <<
1111 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap,
1121 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag,
1122 txs->txs_dmamap, *m_head, txsegs, &nsegs,
1144 * order to prevent wrap-around.
1146 if (nsegs > sc->sc_txfree - 1) {
1147 txs->txs_ndescs = 0;
1148 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1152 txs->txs_ndescs = nsegs;
1153 txs->txs_firstdesc = sc->sc_txnext;
1154 nexttx = txs->txs_firstdesc;
1162 sc->sc_txdescs[nexttx].gd_addr = htole64(txsegs[seg].ds_addr);
1166 sc->sc_txdescs[nexttx].gd_flags = htole64(flags | cflags);
1167 txs->txs_lastdesc = nexttx;
1172 CTR3(KTR_GEM, "%s: end of packet at segment %d, TX %d",
1175 sc->sc_txdescs[txs->txs_lastdesc].gd_flags |=
1180 CTR3(KTR_GEM, "%s: start of packet at segment %d, TX %d",
1183 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) {
1184 sc->sc_txwin = 0;
1185 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
1188 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
1191 /* Sync the DMA map. */
1192 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1197 __func__, txs->txs_firstdesc, txs->txs_lastdesc,
1198 txs->txs_ndescs);
1200 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1201 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1202 txs->txs_mbuf = *m_head;
1204 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc);
1205 sc->sc_txfree -= txs->txs_ndescs;
1213 const u_char *laddr = if_getlladdr(sc->sc_ifp);
1218 if ((sc->sc_flags & GEM_INITED) == 0) {
1224 /* min frame length */
1226 /* max frame length and max burst size */
1257 sc->sc_flags |= GEM_INITED;
1300 * Update the TX kick register. This register has to point to the
1306 CTR3(KTR_GEM, "%s: %s: kicking TX %d",
1307 device_get_name(sc->sc_dev), __func__, sc->sc_txnext);
1310 GEM_WRITE_4(sc, GEM_TX_KICK, sc->sc_txnext);
1323 IFF_DRV_RUNNING || (sc->sc_flags & GEM_LINK) == 0)
1328 device_get_name(sc->sc_dev), __func__, sc->sc_txfree,
1329 sc->sc_txnext);
1333 for (; !if_sendq_empty(ifp) && sc->sc_txfree > 1;) {
1344 if ((sc->sc_txnext % 4) == 0) {
1358 device_get_name(sc->sc_dev), sc->sc_txnext);
1362 sc->sc_wdog_timer = 5;
1365 device_get_name(sc->sc_dev), __func__,
1366 sc->sc_wdog_timer);
1374 if_t ifp = sc->sc_ifp;
1383 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
1387 * Go through our TX list and free mbufs for those
1392 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1396 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) {
1400 sc->sc_txdescs[i].gd_flags));
1403 sc->sc_txdescs[i].gd_addr));
1404 if (i == txs->txs_lastdesc)
1419 CTR4(KTR_GEM, "%s: txs->txs_firstdesc = %d, "
1420 "txs->txs_lastdesc = %d, txlast = %d",
1421 __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast);
1423 if (txs->txs_firstdesc <= txs->txs_lastdesc) {
1424 if ((txlast >= txs->txs_firstdesc) &&
1425 (txlast <= txs->txs_lastdesc))
1428 /* Ick -- this command wraps. */
1429 if ((txlast >= txs->txs_firstdesc) ||
1430 (txlast <= txs->txs_lastdesc))
1437 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1439 sc->sc_txfree += txs->txs_ndescs;
1441 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1443 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1444 if (txs->txs_mbuf != NULL) {
1445 m_freem(txs->txs_mbuf);
1446 txs->txs_mbuf = NULL;
1449 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1465 if (sc->sc_txfree == GEM_NTXDESC - 1)
1466 sc->sc_txwin = 0;
1473 if (STAILQ_EMPTY(&sc->sc_txdirtyq))
1474 sc->sc_wdog_timer = 0;
1480 device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer);
1499 if_t ifp = sc->sc_ifp;
1507 callout_stop(&sc->sc_rx_ch);
1510 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
1519 CTR3(KTR_GEM, "%s: sc->sc_rxptr %d, complete %d",
1520 __func__, sc->sc_rxptr, rxcomp);
1523 for (; sc->sc_rxptr != rxcomp;) {
1524 m = sc->sc_rxsoft[sc->sc_rxptr].rxs_mbuf;
1525 rxstat = le64toh(sc->sc_rxdescs[sc->sc_rxptr].gd_flags);
1537 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS,
1546 device_printf(sc->sc_dev, "receive error: CRC error\n");
1547 GEM_INIT_RXDESC(sc, sc->sc_rxptr);
1555 &sc->sc_rxsoft[sc->sc_rxptr], sc->sc_rxptr);
1558 sc->sc_rxdescs[sc->sc_rxptr].gd_flags));
1561 sc->sc_rxdescs[sc->sc_rxptr].gd_addr));
1570 if (gem_add_rxbuf(sc, sc->sc_rxptr) != 0) {
1572 GEM_INIT_RXDESC(sc, sc->sc_rxptr);
1584 sc->sc_rxptr = GEM_NEXTRX(sc->sc_rxptr);
1585 if ((sc->sc_rxptr % 4) == 0) {
1589 (sc->sc_rxptr + GEM_NRXDESC - 4) &
1600 m->m_data += ETHER_ALIGN; /* first byte offset */
1601 m->m_pkthdr.rcvif = ifp;
1602 m->m_pkthdr.len = m->m_len = GEM_RD_BUFLEN(rxstat);
1614 CTR3(KTR_GEM, "%s: done sc->sc_rxptr %d, complete %d", __func__,
1615 sc->sc_rxptr, GEM_READ_4(sc, GEM_RX_COMPLETION));
1622 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
1632 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
1636 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
1639 if (rxs->rxs_mbuf != NULL) {
1640 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
1642 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
1645 error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap,
1648 device_printf(sc->sc_dev,
1656 rxs->rxs_mbuf = m;
1657 rxs->rxs_paddr = segs[0].ds_addr;
1659 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
1671 if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1);
1677 device_printf(sc->sc_dev, "%s: status 0x%x", __func__, status);
1696 device_get_name(sc->sc_dev), __func__,
1707 device_printf(sc->sc_dev,
1713 device_printf(sc->sc_dev,
1717 device_printf(sc->sc_dev,
1720 device_printf(sc->sc_dev,
1721 "%s: transited to non-PAUSE state\n", __func__);
1724 device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__);
1742 device_printf(sc->sc_dev,
1743 "MAC TX fault, status %x\n", status2);
1746 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
1747 if_setdrvflagbits(sc->sc_ifp, 0, IFF_DRV_RUNNING);
1760 if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1);
1764 device_printf(sc->sc_dev,
1773 if_t ifp = sc->sc_ifp;
1790 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
1793 if ((sc->sc_flags & GEM_LINK) != 0)
1794 device_printf(sc->sc_dev, "device timeout\n");
1796 device_printf(sc->sc_dev, "device timeout (no link)\n");
1810 /* Configure the MIF in frame mode. */
1824 * Frame mode is implemented by loading a complete frame into the frame
1827 * Polling mode uses the frame register but completion is indicated by
1843 if ((sc->sc_flags & GEM_SERDES) != 0) {
1863 device_printf(sc->sc_dev,
1870 /* Construct the frame command. */
1879 DELAY(1);
1885 device_printf(sc->sc_dev, "%s: timed out\n", __func__);
1901 if ((sc->sc_flags & GEM_SERDES) != 0) {
1915 device_printf(sc->sc_dev,
1938 device_printf(sc->sc_dev,
1948 /* Construct the frame command. */
1958 DELAY(1);
1964 device_printf(sc->sc_dev, "%s: timed out\n", __func__);
1980 if ((sc->sc_if_getflags(ifp) & IFF_DEBUG) != 0)
1981 device_printf(sc->sc_dev, "%s: status change\n", __func__);
1984 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 &&
1985 IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE)
1986 sc->sc_flags |= GEM_LINK;
1988 sc->sc_flags &= ~GEM_LINK;
1990 switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) {
2003 * G) and as far as enabling of RX and TX MAC goes also step H)
2008 rxcfg = sc->sc_mac_rxcfg;
2011 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
2024 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2027 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2032 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 &&
2043 if ((sc->sc_flags & GEM_SERDES) == 0) {
2047 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2060 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
2064 sc->sc_mac_rxcfg = rxcfg;
2065 if ((if_getdrvflags(sc->sc_ifp) & IFF_DRV_RUNNING) != 0 &&
2066 (sc->sc_flags & GEM_LINK) != 0) {
2083 error = mii_mediachg(sc->sc_mii);
2099 mii_pollstat(sc->sc_mii);
2100 ifmr->ifm_active = sc->sc_mii->mii_media_active;
2101 ifmr->ifm_status = sc->sc_mii->mii_media_status;
2118 ((if_getflags(ifp) ^ sc->sc_ifflags) &
2126 sc->sc_csum_features |= CSUM_UDP;
2128 sc->sc_csum_features &= ~CSUM_UDP;
2130 if_sethwassist(ifp, sc->sc_csum_features);
2131 sc->sc_ifflags = if_getflags(ifp);
2143 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
2147 if_setcapenable(ifp, ifr->ifr_reqcap);
2149 if_sethwassist(ifp, sc->sc_csum_features);
2168 /* We just want the 8 most significant bits. */
2171 hash[crc >> 4] |= 1 << (15 - (crc & 15));
2179 if_t ifp = sc->sc_ifp;
2190 v = sc->sc_mac_rxcfg & ~GEM_MAC_RX_HASH_FILTER;
2196 device_printf(sc->sc_dev,
2212 * order 8 bits as an index into the 256 bit logical address
2213 * filter. The high order 4 bits selects the word, while the
2214 * other 4 bits select the bit within the word (where bit 0
2226 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0),
2230 sc->sc_mac_rxcfg = v;