Lines Matching +full:rx +full:- +full:frame +full:- +full:sync +full:- +full:delay +full:- +full:bits
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
5 * Copyright (c) 2001-2003 Thomas Moestl
6 * Copyright (c) 2007-2009 Marius Strobl <marius@FreeBSD.org>
31 * from: FreeBSD: if_gem.c 182060 2008-08-23 15:03:26Z marius
108 CTASSERT((offsetof(struct cas_control_data, m) & ((a) - 1)) == 0)
190 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
192 if_initname(ifp, device_get_name(sc->sc_dev),
193 device_get_unit(sc->sc_dev));
201 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
202 callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0);
204 NET_TASK_INIT(&sc->sc_intr_task, 0, cas_intr_task, sc);
205 TASK_INIT(&sc->sc_tx_task, 1, cas_tx_task, ifp);
206 sc->sc_tq = taskqueue_create_fast("cas_taskq", M_WAITOK,
207 taskqueue_thread_enqueue, &sc->sc_tq);
208 error = taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
209 device_get_nameunit(sc->sc_dev));
211 device_printf(sc->sc_dev, "could not start threads\n");
218 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
221 &sc->sc_pdmatag);
225 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
227 CAS_PAGE_SIZE, 1, CAS_PAGE_SIZE, 0, NULL, NULL, &sc->sc_rdmatag);
231 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
234 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag);
238 error = bus_dma_tag_create(sc->sc_pdmatag, CAS_TX_DESC_ALIGN, 0,
242 NULL, NULL, &sc->sc_cdmatag);
250 if ((error = bus_dmamem_alloc(sc->sc_cdmatag,
251 (void **)&sc->sc_control_data,
253 &sc->sc_cddmamap)) != 0) {
254 device_printf(sc->sc_dev,
259 sc->sc_cddma = 0;
260 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap,
261 sc->sc_control_data, sizeof(struct cas_control_data),
262 cas_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) {
263 device_printf(sc->sc_dev,
272 STAILQ_INIT(&sc->sc_txfreeq);
273 STAILQ_INIT(&sc->sc_txdirtyq);
280 txs = &sc->sc_txsoft[i];
281 txs->txs_mbuf = NULL;
282 txs->txs_ndescs = 0;
283 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0,
284 &txs->txs_dmamap)) != 0) {
285 device_printf(sc->sc_dev,
290 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
298 if ((error = bus_dmamem_alloc(sc->sc_rdmatag,
299 &sc->sc_rxdsoft[i].rxds_buf, BUS_DMA_WAITOK,
300 &sc->sc_rxdsoft[i].rxds_dmamap)) != 0) {
301 device_printf(sc->sc_dev,
302 "unable to allocate RX buffer %d, error = %d\n",
307 sc->sc_rxdptr = i;
308 sc->sc_rxdsoft[i].rxds_paddr = 0;
309 if ((error = bus_dmamap_load(sc->sc_rdmatag,
310 sc->sc_rxdsoft[i].rxds_dmamap, sc->sc_rxdsoft[i].rxds_buf,
312 sc->sc_rxdsoft[i].rxds_paddr == 0) {
313 device_printf(sc->sc_dev,
314 "unable to load RX DMA map %d, error = %d\n",
320 if ((sc->sc_flags & CAS_SERDES) == 0) {
336 if (sc->sc_variant == CAS_SATURN) {
343 DELAY(10000);
345 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
358 if (sc->sc_variant == CAS_SATURN) {
365 DELAY(10000);
367 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
378 if (sc->sc_variant == CAS_SATURN) {
389 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
394 device_printf(sc->sc_dev, "attaching PHYs failed\n");
397 sc->sc_mii = device_get_softc(sc->sc_miibus);
407 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n",
411 ether_ifattach(ifp, sc->sc_enaddr);
418 if ((sc->sc_flags & CAS_NO_CSUM) == 0) {
432 if (sc->sc_rxdsoft[i].rxds_paddr != 0)
433 bus_dmamap_unload(sc->sc_rdmatag,
434 sc->sc_rxdsoft[i].rxds_dmamap);
437 if (sc->sc_rxdsoft[i].rxds_buf != NULL)
438 bus_dmamem_free(sc->sc_rdmatag,
439 sc->sc_rxdsoft[i].rxds_buf,
440 sc->sc_rxdsoft[i].rxds_dmamap);
443 if (sc->sc_txsoft[i].txs_dmamap != NULL)
444 bus_dmamap_destroy(sc->sc_tdmatag,
445 sc->sc_txsoft[i].txs_dmamap);
446 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
448 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
449 sc->sc_cddmamap);
451 bus_dma_tag_destroy(sc->sc_cdmatag);
453 bus_dma_tag_destroy(sc->sc_tdmatag);
455 bus_dma_tag_destroy(sc->sc_rdmatag);
457 bus_dma_tag_destroy(sc->sc_pdmatag);
459 taskqueue_free(sc->sc_tq);
467 if_t ifp = sc->sc_ifp;
474 callout_drain(&sc->sc_tick_ch);
475 callout_drain(&sc->sc_rx_ch);
476 taskqueue_drain(sc->sc_tq, &sc->sc_intr_task);
477 taskqueue_drain(sc->sc_tq, &sc->sc_tx_task);
479 taskqueue_free(sc->sc_tq);
480 bus_generic_detach(sc->sc_dev);
483 if (sc->sc_rxdsoft[i].rxds_dmamap != NULL)
484 bus_dmamap_sync(sc->sc_rdmatag,
485 sc->sc_rxdsoft[i].rxds_dmamap,
488 if (sc->sc_rxdsoft[i].rxds_paddr != 0)
489 bus_dmamap_unload(sc->sc_rdmatag,
490 sc->sc_rxdsoft[i].rxds_dmamap);
492 if (sc->sc_rxdsoft[i].rxds_buf != NULL)
493 bus_dmamem_free(sc->sc_rdmatag,
494 sc->sc_rxdsoft[i].rxds_buf,
495 sc->sc_rxdsoft[i].rxds_dmamap);
497 if (sc->sc_txsoft[i].txs_dmamap != NULL)
498 bus_dmamap_destroy(sc->sc_tdmatag,
499 sc->sc_txsoft[i].txs_dmamap);
501 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
502 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
503 sc->sc_cddmamap);
504 bus_dma_tag_destroy(sc->sc_cdmatag);
505 bus_dma_tag_destroy(sc->sc_tdmatag);
506 bus_dma_tag_destroy(sc->sc_rdmatag);
507 bus_dma_tag_destroy(sc->sc_pdmatag);
513 if_t ifp = sc->sc_ifp;
523 if_t ifp = sc->sc_ifp;
528 * after power-on.
530 sc->sc_flags &= ~CAS_INITED;
546 pktlen = m->m_pkthdr.len;
550 if (eh->ether_type != htons(ETHERTYPE_IP))
553 if (ip->ip_v != IPVERSION)
556 hlen = ip->ip_hl << 2;
557 pktlen -= sizeof(struct ether_header);
560 if (ntohs(ip->ip_len) < hlen)
562 if (ntohs(ip->ip_len) != pktlen)
564 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
567 switch (ip->ip_p) {
576 if (uh->uh_sum == 0)
585 len = hlen - sizeof(struct ip);
588 for (; len > 0; len -= sizeof(uint16_t), opts++) {
589 temp32 = cksum - *opts;
594 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
595 m->m_pkthdr.csum_data = cksum;
607 sc->sc_cddma = segs[0].ds_addr;
618 panic("%s: bad RX buffer segment count", __func__);
619 sc->sc_rxdsoft[sc->sc_rxdptr].rxds_paddr = segs[0].ds_addr;
626 if_t ifp = sc->sc_ifp;
659 mii_tick(sc->sc_mii);
661 if (sc->sc_txfree != CAS_MAXTXFREE)
666 callout_reset(&sc->sc_tick_ch, hz, cas_tick, sc);
675 for (i = CAS_TRIES; i--; DELAY(100)) {
688 CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__);
697 * Do a full reset modulo the result of the last auto-negotiation
701 ((sc->sc_flags & CAS_SERDES) != 0 ? CAS_RESET_PCS_DIS : 0));
704 DELAY(3000);
706 device_printf(sc->sc_dev, "cannot reset device\n");
716 CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__);
719 callout_stop(&sc->sc_tick_ch);
720 callout_stop(&sc->sc_rx_ch);
731 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
732 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
733 if (txs->txs_ndescs != 0) {
734 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
736 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
737 if (txs->txs_mbuf != NULL) {
738 m_freem(txs->txs_mbuf);
739 txs->txs_mbuf = NULL;
742 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
749 sc->sc_flags &= ~CAS_LINK;
750 sc->sc_wdog_timer = 0;
766 device_printf(sc->sc_dev, "cannot disable RX DMA\n");
770 ((sc->sc_flags & CAS_SERDES) != 0 ? CAS_RESET_PCS_DIS : 0));
774 device_printf(sc->sc_dev, "cannot reset receiver\n");
793 device_printf(sc->sc_dev, "cannot disable TX DMA\n");
797 ((sc->sc_flags & CAS_SERDES) != 0 ? CAS_RESET_PCS_DIS : 0));
801 device_printf(sc->sc_dev, "cannot reset transmitter\n");
818 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
833 device_printf(sc->sc_dev, "cannot disable TX MAC\n");
841 rxcomp->crc_word1 = 0;
842 rxcomp->crc_word2 = 0;
843 rxcomp->crc_word3 =
845 rxcomp->crc_word4 = htole64(CAS_RC4_ZERO);
859 sc->sc_txdescs[i].cd_flags = 0;
860 sc->sc_txdescs[i].cd_buf_ptr = 0;
862 sc->sc_txfree = CAS_MAXTXFREE;
863 sc->sc_txnext = 0;
864 sc->sc_txwin = 0;
870 cas_rxcompinit(&sc->sc_rxcomps[i]);
871 sc->sc_rxcptr = 0;
879 sc->sc_rxdptr = 0;
959 if_t ifp = sc->sc_ifp;
968 CTR2(KTR_CAS, "%s: %s: calling stop", device_get_name(sc->sc_dev),
975 * See also the STP2002-STQ document from Sun Microsystems.
982 CTR2(KTR_CAS, "%s: %s: restarting", device_get_name(sc->sc_dev),
986 if ((sc->sc_flags & CAS_SERDES) == 0)
987 /* Re-initialize the MIF. */
996 /* step 5. RX MAC registers & counters */
1014 if ((sc->sc_flags & CAS_REG_PLUS) != 0) {
1025 CAS_CDRXD2ADDR(sc, 0), sc->sc_cddma);
1038 (sc->sc_flags & CAS_TABORT) == 0 ? CAS_INF_BURST_EN :
1078 * enable DMA and disable pre-interrupt writeback completion.
1092 if ((sc->sc_flags & CAS_REG_PLUS) != 0)
1101 /* RX blanking */
1105 /* Set RX_COMP_AFULL threshold to half of the RX completions. */
1109 /* Initialize the RX page size register as appropriate for 8k. */
1116 /* Disable RX random early detection. */
1119 /* Zero the RX reassembly DMA table. */
1127 /* Ensure the RX control FIFO and RX IPP FIFO addresses are zero. */
1131 /* Finally, enable RX DMA. */
1141 sc->sc_mac_rxcfg = v;
1143 * Clear the RX filter and reprogram it. This will also set the
1144 * current RX MAC configuration and enable it.
1157 CAS_WRITE_4(sc, CAS_RX_KICK, CAS_NRXDESC - 4);
1159 if ((sc->sc_flags & CAS_REG_PLUS) != 0)
1160 CAS_WRITE_4(sc, CAS_RX_KICK2, CAS_NRXDESC2 - 4);
1165 mii_mediachg(sc->sc_mii);
1168 sc->sc_wdog_timer = 0;
1169 callout_reset(&sc->sc_tick_ch, hz, cas_tick, sc);
1185 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
1191 if (((*m_head)->m_pkthdr.csum_flags & CAS_CSUM_FEATURES) != 0) {
1206 offset += (ip->ip_hl << 2);
1208 ((offset + m->m_pkthdr.csum_data) <<
1213 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap,
1223 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag,
1224 txs->txs_dmamap, *m_head, txsegs, &nsegs,
1246 * order to prevent wrap-around.
1248 if (nsegs > sc->sc_txfree - 1) {
1249 txs->txs_ndescs = 0;
1250 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1254 txs->txs_ndescs = nsegs;
1255 txs->txs_firstdesc = sc->sc_txnext;
1256 nexttx = txs->txs_firstdesc;
1264 sc->sc_txdescs[nexttx].cd_buf_ptr =
1269 sc->sc_txdescs[nexttx].cd_flags =
1271 txs->txs_lastdesc = nexttx;
1276 CTR3(KTR_CAS, "%s: end of frame at segment %d, TX %d",
1279 sc->sc_txdescs[txs->txs_lastdesc].cd_flags |=
1284 CTR3(KTR_CAS, "%s: start of frame at segment %d, TX %d",
1287 if (sc->sc_txwin += nsegs > CAS_MAXTXFREE * 2 / 3) {
1288 sc->sc_txwin = 0;
1289 sc->sc_txdescs[txs->txs_firstdesc].cd_flags |=
1292 sc->sc_txdescs[txs->txs_firstdesc].cd_flags |=
1295 /* Sync the DMA map. */
1296 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1301 __func__, txs->txs_firstdesc, txs->txs_lastdesc,
1302 txs->txs_ndescs);
1304 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1305 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1306 txs->txs_mbuf = *m_head;
1308 sc->sc_txnext = CAS_NEXTTX(txs->txs_lastdesc);
1309 sc->sc_txfree -= txs->txs_ndescs;
1318 const u_char *laddr = if_getlladdr(sc->sc_ifp);
1323 if ((sc->sc_flags & CAS_INITED) == 0) {
1329 /* min frame length */
1331 /* max frame length and max burst size */
1349 i += CAS_MAC_ADDR4 - CAS_MAC_ADDR3)
1366 i += CAS_MAC_HASH1 - CAS_MAC_HASH0)
1369 sc->sc_flags |= CAS_INITED;
1418 device_get_name(sc->sc_dev), __func__, sc->sc_txnext);
1421 CAS_WRITE_4(sc, CAS_TX_KICK3, sc->sc_txnext);
1434 IFF_DRV_RUNNING || (sc->sc_flags & CAS_LINK) == 0) {
1439 if (sc->sc_txfree < CAS_MAXTXFREE / 4)
1444 device_get_name(sc->sc_dev), __func__, sc->sc_txfree,
1445 sc->sc_txnext);
1449 for (; !if_sendq_empty(ifp) && sc->sc_txfree > 1;) {
1460 if ((sc->sc_txnext % 4) == 0) {
1474 device_get_name(sc->sc_dev), sc->sc_txnext);
1478 sc->sc_wdog_timer = 5;
1481 device_get_name(sc->sc_dev), __func__,
1482 sc->sc_wdog_timer);
1492 if_t ifp = sc->sc_ifp;
1501 CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__);
1510 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1514 for (i = txs->txs_firstdesc;; i = CAS_NEXTTX(i)) {
1518 sc->sc_txdescs[i].cd_flags));
1521 sc->sc_txdescs[i].cd_buf_ptr));
1522 if (i == txs->txs_lastdesc)
1537 CTR4(KTR_CAS, "%s: txs->txs_firstdesc = %d, "
1538 "txs->txs_lastdesc = %d, txlast = %d",
1539 __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast);
1541 if (txs->txs_firstdesc <= txs->txs_lastdesc) {
1542 if ((txlast >= txs->txs_firstdesc) &&
1543 (txlast <= txs->txs_lastdesc))
1546 /* Ick -- this command wraps. */
1547 if ((txlast >= txs->txs_firstdesc) ||
1548 (txlast <= txs->txs_lastdesc))
1555 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1557 sc->sc_txfree += txs->txs_ndescs;
1559 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1561 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1562 if (txs->txs_mbuf != NULL) {
1563 m_freem(txs->txs_mbuf);
1564 txs->txs_mbuf = NULL;
1567 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1585 if (STAILQ_EMPTY(&sc->sc_txdirtyq))
1586 sc->sc_wdog_timer = 0;
1591 device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer);
1612 if_t ifp = sc->sc_ifp;
1620 callout_stop(&sc->sc_rx_ch);
1623 CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__);
1630 KASSERT(sc->sc_rxcomps[sc->sc_rxcptr].crc_word ## n == 0, \
1634 word ## n = le64toh(sc->sc_rxcomps[sc->sc_rxcptr].crc_word ## n)
1642 CTR4(KTR_CAS, "%s: sc->sc_rxcptr %d, sc->sc_rxdptr %d, head %d",
1643 __func__, sc->sc_rxcptr, sc->sc_rxdptr, rxhead);
1647 for (; sc->sc_rxcptr != rxhead;
1648 sc->sc_rxcptr = CAS_NEXTRXCOMP(sc->sc_rxcptr)) {
1654 --skip;
1665 printf(" completion %d: ", sc->sc_rxcptr);
1684 callout_reset(&sc->sc_rx_ch, CAS_RXOWN_TICKS,
1692 device_printf(sc->sc_dev,
1717 rxds = &sc->sc_rxdsoft[idx];
1720 refcount_acquire(&rxds->rxds_refcount);
1721 bus_dmamap_sync(sc->sc_rdmatag,
1722 rxds->rxds_dmamap, BUS_DMASYNC_POSTREAD);
1723 m_extadd(m, (char *)rxds->rxds_buf +
1727 if ((m->m_flags & M_EXT) == 0) {
1733 m->m_pkthdr.rcvif = ifp;
1734 m->m_pkthdr.len = m->m_len = len;
1747 refcount_release(&rxds->rxds_refcount) != 0)
1756 rxds = &sc->sc_rxdsoft[idx];
1759 refcount_acquire(&rxds->rxds_refcount);
1761 m->m_len = min(CAS_PAGE_SIZE - off, len);
1762 bus_dmamap_sync(sc->sc_rdmatag,
1763 rxds->rxds_dmamap, BUS_DMASYNC_POSTREAD);
1764 m_extadd(m, (char *)rxds->rxds_buf + off,
1765 m->m_len, cas_free, sc,
1768 if ((m->m_flags & M_EXT) == 0) {
1786 rxds2 = &sc->sc_rxdsoft[idx2];
1791 &rxds2->rxds_refcount);
1792 m2->m_len = len - m->m_len;
1794 sc->sc_rdmatag,
1795 rxds2->rxds_dmamap,
1798 (char *)rxds2->rxds_buf,
1799 m2->m_len, cas_free, sc,
1802 if ((m2->m_flags & M_EXT) ==
1810 m->m_next = m2;
1817 m->m_pkthdr.rcvif = ifp;
1818 m->m_pkthdr.len = len;
1831 refcount_release(&rxds->rxds_refcount) != 0)
1834 refcount_release(&rxds2->rxds_refcount) != 0)
1841 cas_rxcompinit(&sc->sc_rxcomps[sc->sc_rxcptr]);
1846 CAS_WRITE_4(sc, CAS_RX_COMP_TAIL, sc->sc_rxcptr);
1853 CTR4(KTR_CAS, "%s: done sc->sc_rxcptr %d, sc->sc_rxdptr %d, head %d",
1854 __func__, sc->sc_rxcptr, sc->sc_rxdptr,
1866 sc = m->m_ext.ext_arg1;
1867 idx = (uintptr_t)m->m_ext.ext_arg2;
1868 rxds = &sc->sc_rxdsoft[idx];
1869 if (refcount_release(&rxds->rxds_refcount) == 0)
1889 bus_dmamap_sync(sc->sc_rdmatag, sc->sc_rxdsoft[idx].rxds_dmamap,
1891 CAS_UPDATE_RXDESC(sc, sc->sc_rxdptr, idx);
1892 sc->sc_rxdptr = CAS_NEXTRXDESC(sc->sc_rxdptr);
1895 * Update the RX kick register. This register has to point to the
1900 if ((sc->sc_rxdptr % 4) == 0) {
1903 (sc->sc_rxdptr + CAS_NRXDESC - 4) & CAS_NRXDESC_MASK);
1910 if_t ifp = sc->sc_ifp;
1916 device_printf(sc->sc_dev, "%s: status 0x%x", __func__, status);
1921 status = pci_read_config(sc->sc_dev, PCIR_STATUS, 2);
1923 pci_write_config(sc->sc_dev, PCIR_STATUS, status, 2);
1931 taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task);
1945 taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task);
1954 if_t ifp = sc->sc_ifp;
1969 device_get_name(sc->sc_dev), __func__,
1980 device_printf(sc->sc_dev,
1986 device_printf(sc->sc_dev,
1992 device_printf(sc->sc_dev,
1995 device_printf(sc->sc_dev,
1996 "%s: transited to non-PAUSE state\n", __func__);
1999 device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__);
2016 device_printf(sc->sc_dev,
2025 device_printf(sc->sc_dev,
2026 "MAC RX fault, status %x\n", status2);
2037 device_printf(sc->sc_dev,
2038 "RX fault, status %x\n", status);
2050 taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task);
2055 taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task);
2060 /* Re-enable interrupts. */
2076 if_t ifp = sc->sc_ifp;
2093 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
2096 if ((sc->sc_flags & CAS_LINK) != 0)
2097 device_printf(sc->sc_dev, "device timeout\n");
2099 device_printf(sc->sc_dev, "device timeout (no link)\n");
2106 taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task);
2113 /* Configure the MIF in frame mode. */
2127 * Frame mode is implemented by loading a complete frame into the frame
2130 * Polling mode uses the frame register but completion is indicated by
2146 if ((sc->sc_flags & CAS_SERDES) != 0) {
2166 device_printf(sc->sc_dev,
2173 /* Construct the frame command. */
2182 DELAY(1);
2188 device_printf(sc->sc_dev, "%s: timed out\n", __func__);
2204 if ((sc->sc_flags & CAS_SERDES) != 0) {
2218 device_printf(sc->sc_dev,
2241 device_printf(sc->sc_dev,
2251 /* Construct the frame command. */
2261 DELAY(1);
2267 device_printf(sc->sc_dev, "%s: timed out\n", __func__);
2280 ifp = sc->sc_ifp;
2286 device_printf(sc->sc_dev, "%s: status changen", __func__);
2289 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 &&
2290 IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE)
2291 sc->sc_flags |= CAS_LINK;
2293 sc->sc_flags &= ~CAS_LINK;
2295 switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) {
2308 * G) and as far as enabling of RX and TX MAC goes also step H)
2313 rxcfg = sc->sc_mac_rxcfg;
2317 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
2330 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2333 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2340 * to be calculated when letting them strip the FCS in half-
2344 * hardware checksumming in half-duplex mode though.
2346 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0) {
2349 } else if ((sc->sc_flags & CAS_NO_CSUM) == 0) {
2354 if (sc->sc_variant == CAS_SATURN) {
2355 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0)
2362 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 &&
2372 if ((sc->sc_flags & CAS_SERDES) == 0) {
2373 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0)
2379 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
2383 sc->sc_mac_rxcfg = rxcfg;
2385 (sc->sc_flags & CAS_LINK) != 0) {
2402 error = mii_mediachg(sc->sc_mii);
2418 mii_pollstat(sc->sc_mii);
2419 ifmr->ifm_active = sc->sc_mii->mii_media_active;
2420 ifmr->ifm_status = sc->sc_mii->mii_media_status;
2437 ((if_getflags(ifp) ^ sc->sc_ifflags) &
2444 sc->sc_ifflags = if_getflags(ifp);
2449 if ((sc->sc_flags & CAS_NO_CSUM) != 0) {
2454 if_setcapenable(ifp, ifr->ifr_reqcap);
2469 if ((ifr->ifr_mtu < ETHERMIN) ||
2470 (ifr->ifr_mtu > ETHERMTU_JUMBO))
2473 if_setmtu(ifp, ifr->ifr_mtu);
2477 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
2493 /* We just want the 8 most significant bits. */
2496 hash[crc >> 4] |= 1 << (15 - (crc & 15));
2504 if_t ifp = sc->sc_ifp;
2512 * Turn off the RX MAC and the hash filter as required by the Sun
2515 v = sc->sc_mac_rxcfg & ~(CAS_MAC_RX_CONF_HFILTER |
2522 device_printf(sc->sc_dev,
2523 "cannot disable RX MAC or hash filter\n");
2538 * order 8 bits as an index into the 256 bit logical address
2539 * filter. The high order 4 bits selects the word, while the
2540 * other 4 bits select the bit within the word (where bit 0
2552 CAS_MAC_HASH0 + i * (CAS_MAC_HASH1 - CAS_MAC_HASH0),
2556 sc->sc_mac_rxcfg = v;
2604 nitems(cas_pci_devlist) - 1);
2627 { -1, 0 }
2630 #define CAS_LOCAL_MAC_ADDRESS "local-mac-address"
2631 #define CAS_PHY_INTERFACE "phy-interface"
2632 #define CAS_PHY_TYPE "phy-type"
2647 sc->sc_variant = CAS_UNKNOWN;
2651 sc->sc_variant = cas_pci_devlist[i].cpd_variant;
2655 if (sc->sc_variant == CAS_UNKNOWN) {
2665 sc->sc_dev = dev;
2666 if (sc->sc_variant == CAS_CAS && pci_get_devid(dev) < 0x02)
2668 sc->sc_flags |= CAS_NO_CSUM;
2669 if (sc->sc_variant == CAS_CASPLUS || sc->sc_variant == CAS_SATURN)
2670 sc->sc_flags |= CAS_REG_PLUS;
2671 if (sc->sc_variant == CAS_CAS ||
2672 (sc->sc_variant == CAS_CASPLUS && pci_get_revid(dev) < 0x11))
2673 sc->sc_flags |= CAS_TABORT;
2675 device_printf(dev, "flags=0x%x\n", sc->sc_flags);
2677 if (bus_alloc_resources(dev, cas_pci_res_spec, sc->sc_res)) {
2679 bus_release_resources(dev, cas_pci_res_spec, sc->sc_res);
2686 OF_getetheraddr(dev, sc->sc_enaddr);
2690 buf[sizeof(buf) - 1] = '\0';
2692 sc->sc_flags |= CAS_SERDES;
2699 * SUNW,pci-ce and SUNW,pci-qge use the Enhanced VPD format described
2778 i -= PCI_VPD_SIZE + CAS_ROM_READ_1(sc, j + PCI_VPD_LEN),
2791 bus_read_region_1(sc->sc_res[CAS_RES_MEM],
2794 buf[sizeof(buf) - 1] = '\0';
2797 bus_read_region_1(sc->sc_res[CAS_RES_MEM],
2811 bus_read_region_1(sc->sc_res[CAS_RES_MEM],
2814 buf[sizeof(buf) - 1] = '\0';
2821 bus_read_region_1(sc->sc_res[CAS_RES_MEM],
2824 buf[sizeof(buf) - 1] = '\0';
2848 memcpy(sc->sc_enaddr, enaddr[i], ETHER_ADDR_LEN);
2858 sc->sc_flags |= CAS_SERDES;
2866 if (bus_setup_intr(dev, sc->sc_res[CAS_RES_INTR], INTR_TYPE_NET |
2867 INTR_MPSAFE, cas_intr, NULL, sc, &sc->sc_ih) != 0) {
2876 bus_release_resources(dev, cas_pci_res_spec, sc->sc_res);
2886 bus_teardown_intr(dev, sc->sc_res[CAS_RES_INTR], sc->sc_ih);
2889 bus_release_resources(dev, cas_pci_res_spec, sc->sc_res);