Lines Matching +full:zynqmp +full:- +full:gem

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2012-2014 Thomas Skibo <thomasskibo@yahoo.com>
30 * A network interface driver for Cadence GEM Gigabit Ethernet
31 * interface such as the one used in Xilinx Zynq-7000 SoC.
33 * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual.
34 * (v1.4) November 16, 2012. Xilinx doc UG585. GEM is covered in Ch. 16
106 { "cdns,zynq-gem", HWQUIRK_RXHANGWAR }, /* Deprecated */
107 { "cdns,zynqmp-gem", HWQUIRK_NEEDNULLQS }, /* Deprecated */
108 { "xlnx,zynq-gem", HWQUIRK_RXHANGWAR },
109 { "xlnx,zynqmp-gem", HWQUIRK_NEEDNULLQS },
110 { "microchip,mpfs-mss-gem", HWQUIRK_NEEDNULLQS },
111 { "sifive,fu540-c000-gem", HWQUIRK_NONE },
112 { "sifive,fu740-c000-gem", HWQUIRK_NONE },
150 int rxhangwar; /* rx hang work-around */
220 #define RD4(sc, off) (bus_read_4((sc)->mem_res, (off)))
221 #define WR4(sc, off, val) (bus_write_4((sc)->mem_res, (off), (val)))
223 (bus_barrier((sc)->mem_res, (off), (len), (flags))
225 #define CGEM_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
226 #define CGEM_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
227 #define CGEM_LOCK_INIT(sc) mtx_init(&(sc)->sc_mtx, \
228 device_get_nameunit((sc)->dev), MTX_NETWORK_LOCK, MTX_DEF)
229 #define CGEM_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx)
230 #define CGEM_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED)
275 device_printf(sc->dev, "no mac address found, assigning "
292 * cgem_mac_hash(): map 48-bit address to a 6-bit hash. The 6-bit hash
293 * corresponds to a bit in a 64-bit hash register. Setting that bit in the
295 * that hashes to that 6-bit value.
297 * The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech
298 * Reference Manual. Bits 0-5 in the hash are the exclusive-or of
324 hashes[0] |= (1U << (index - 32));
332 * After any change in rx flags or multi-cast addresses, set up hash registers
338 if_t ifp = sc->ifp;
341 sc->net_cfg_shadow &= ~(CGEM_NET_CFG_MULTI_HASH_EN |
345 sc->net_cfg_shadow |= CGEM_NET_CFG_COPY_ALL;
348 sc->net_cfg_shadow |= CGEM_NET_CFG_NO_BCAST;
356 sc->net_cfg_shadow |= CGEM_NET_CFG_MULTI_HASH_EN;
361 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
390 memset(sc->null_qs, 0, sizeof(struct cgem_rx_desc) +
392 rx_desc = sc->null_qs;
393 rx_desc->addr = CGEM_RXDESC_OWN | CGEM_RXDESC_WRAP;
395 tx_desc->ctl = CGEM_TXDESC_USED | CGEM_TXDESC_WRAP;
399 WR4(sc, CGEM_RX_QN_BAR(n), sc->null_qs_physaddr);
400 WR4(sc, CGEM_TX_QN_BAR(n), sc->null_qs_physaddr +
413 if (sc->neednullqs)
417 sc->txring = NULL;
418 sc->rxring = NULL;
420 /* Allocate non-cached DMA space for RX and TX descriptors. */
421 err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1,
429 busdma_lock_mutex, &sc->sc_mtx, &sc->desc_dma_tag);
434 err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
436 TX_MAX_DMA_SEGS, MCLBYTES, 0, busdma_lock_mutex, &sc->sc_mtx,
437 &sc->mbuf_dma_tag);
447 err = bus_dmamem_alloc(sc->desc_dma_tag, (void **)&sc->rxring,
449 &sc->rxring_dma_map);
454 err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map,
455 (void *)sc->rxring, desc_rings_size,
456 cgem_getaddr, &sc->rxring_physaddr, BUS_DMA_NOWAIT);
462 sc->rxring[i].addr = CGEM_RXDESC_OWN;
463 sc->rxring[i].ctl = 0;
464 sc->rxring_m[i] = NULL;
465 sc->rxring_m_dmamap[i] = NULL;
467 sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
469 sc->rxring_hd_ptr = 0;
470 sc->rxring_tl_ptr = 0;
471 sc->rxring_queued = 0;
473 sc->txring = (struct cgem_tx_desc *)(sc->rxring + CGEM_NUM_RX_DESCS);
474 sc->txring_physaddr = sc->rxring_physaddr + CGEM_NUM_RX_DESCS *
479 sc->txring[i].addr = 0;
480 sc->txring[i].ctl = CGEM_TXDESC_USED;
481 sc->txring_m[i] = NULL;
482 sc->txring_m_dmamap[i] = NULL;
484 sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
486 sc->txring_hd_ptr = 0;
487 sc->txring_tl_ptr = 0;
488 sc->txring_queued = 0;
490 if (sc->neednullqs) {
491 sc->null_qs = (void *)(sc->txring + CGEM_NUM_TX_DESCS);
492 sc->null_qs_physaddr = sc->txring_physaddr +
511 while (sc->rxring_queued < sc->rxbufs) {
517 m->m_len = MCLBYTES;
518 m->m_pkthdr.len = MCLBYTES;
519 m->m_pkthdr.rcvif = sc->ifp;
522 if (bus_dmamap_create(sc->mbuf_dma_tag, 0,
523 &sc->rxring_m_dmamap[sc->rxring_hd_ptr])) {
524 sc->rxdmamapfails++;
528 if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
529 sc->rxring_m_dmamap[sc->rxring_hd_ptr], m,
531 sc->rxdmamapfails++;
532 bus_dmamap_destroy(sc->mbuf_dma_tag,
533 sc->rxring_m_dmamap[sc->rxring_hd_ptr]);
534 sc->rxring_m_dmamap[sc->rxring_hd_ptr] = NULL;
538 sc->rxring_m[sc->rxring_hd_ptr] = m;
541 bus_dmamap_sync(sc->mbuf_dma_tag,
542 sc->rxring_m_dmamap[sc->rxring_hd_ptr],
546 sc->rxring[sc->rxring_hd_ptr].ctl = 0;
548 sc->rxring[sc->rxring_hd_ptr].addrhi = segs[0].ds_addr >> 32;
550 if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) {
551 sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr |
553 sc->rxring_hd_ptr = 0;
555 sc->rxring[sc->rxring_hd_ptr++].addr = segs[0].ds_addr;
557 sc->rxring_queued++;
565 if_t ifp = sc->ifp;
574 while (sc->rxring_queued > 0 &&
575 (sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) {
576 ctl = sc->rxring[sc->rxring_tl_ptr].ctl;
579 m = sc->rxring_m[sc->rxring_tl_ptr];
580 sc->rxring_m[sc->rxring_tl_ptr] = NULL;
583 bus_dmamap_sync(sc->mbuf_dma_tag,
584 sc->rxring_m_dmamap[sc->rxring_tl_ptr],
588 bus_dmamap_unload(sc->mbuf_dma_tag,
589 sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
590 bus_dmamap_destroy(sc->mbuf_dma_tag,
591 sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
592 sc->rxring_m_dmamap[sc->rxring_tl_ptr] = NULL;
595 if (++sc->rxring_tl_ptr == CGEM_NUM_RX_DESCS)
596 sc->rxring_tl_ptr = 0;
597 sc->rxring_queued--;
614 m->m_data += ETHER_ALIGN;
615 m->m_len = (ctl & CGEM_RXDESC_LENGTH_MASK);
616 m->m_pkthdr.rcvif = ifp;
617 m->m_pkthdr.len = m->m_len;
629 m->m_pkthdr.csum_flags |=
632 m->m_pkthdr.csum_data = 0xffff;
636 m->m_pkthdr.csum_flags |=
638 m->m_pkthdr.csum_data = 0xffff;
644 m_tl = &m->m_next;
654 m_hd = m_hd->m_next;
655 m->m_next = NULL;
672 while (sc->txring_queued > 0 &&
673 ((ctl = sc->txring[sc->txring_tl_ptr].ctl) &
676 bus_dmamap_sync(sc->mbuf_dma_tag,
677 sc->txring_m_dmamap[sc->txring_tl_ptr],
681 bus_dmamap_unload(sc->mbuf_dma_tag,
682 sc->txring_m_dmamap[sc->txring_tl_ptr]);
683 bus_dmamap_destroy(sc->mbuf_dma_tag,
684 sc->txring_m_dmamap[sc->txring_tl_ptr]);
685 sc->txring_m_dmamap[sc->txring_tl_ptr] = NULL;
688 m = sc->txring_m[sc->txring_tl_ptr];
689 sc->txring_m[sc->txring_tl_ptr] = NULL;
696 device_printf(sc->dev,
698 sc->txring[sc->txring_tl_ptr].addrhi,
699 sc->txring[sc->txring_tl_ptr].addr);
701 device_printf(sc->dev,
703 sc->txring[sc->txring_tl_ptr].addr);
707 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
709 if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
714 * start-of-frame descriptors are processed.
718 sc->txring_tl_ptr = 0;
720 sc->txring_tl_ptr++;
721 sc->txring_queued--;
723 ctl = sc->txring[sc->txring_tl_ptr].ctl;
725 sc->txring[sc->txring_tl_ptr].ctl =
731 sc->txring_tl_ptr = 0;
733 sc->txring_tl_ptr++;
734 sc->txring_queued--;
736 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_OACTIVE);
757 if (sc->txring_queued >=
758 CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) {
763 if (sc->txring_queued >=
764 CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) {
766 sc->txfull++;
777 if (bus_dmamap_create(sc->mbuf_dma_tag, 0,
778 &sc->txring_m_dmamap[sc->txring_hd_ptr])) {
780 sc->txdmamapfails++;
783 err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
784 sc->txring_m_dmamap[sc->txring_hd_ptr], m, segs, &nsegs,
791 sc->txdefragfails++;
793 bus_dmamap_destroy(sc->mbuf_dma_tag,
794 sc->txring_m_dmamap[sc->txring_hd_ptr]);
795 sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL;
799 err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
800 sc->txring_m_dmamap[sc->txring_hd_ptr], m, segs,
802 sc->txdefrags++;
807 bus_dmamap_destroy(sc->mbuf_dma_tag,
808 sc->txring_m_dmamap[sc->txring_hd_ptr]);
809 sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL;
810 sc->txdmamapfails++;
813 sc->txring_m[sc->txring_hd_ptr] = m;
816 bus_dmamap_sync(sc->mbuf_dma_tag,
817 sc->txring_m_dmamap[sc->txring_hd_ptr],
821 wrap = sc->txring_hd_ptr + nsegs + TX_MAX_DMA_SEGS >=
828 for (i = nsegs - 1; i >= 0; i--) {
830 sc->txring[sc->txring_hd_ptr + i].addr =
833 sc->txring[sc->txring_hd_ptr + i].addrhi =
838 if (i == nsegs - 1) {
843 sc->txring[sc->txring_hd_ptr + i].ctl = ctl;
846 sc->txring_m[sc->txring_hd_ptr + i] = NULL;
850 sc->txring_hd_ptr = 0;
852 sc->txring_hd_ptr += nsegs;
853 sc->txring_queued += nsegs;
856 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow |
881 sc->stats.tx_bytes += RD4(sc, CGEM_OCTETS_TX_BOT);
882 sc->stats.tx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_TX_TOP) << 32;
884 sc->stats.tx_frames += RD4(sc, CGEM_FRAMES_TX);
885 sc->stats.tx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_TX);
886 sc->stats.tx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_TX);
887 sc->stats.tx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_TX);
888 sc->stats.tx_frames_64b += RD4(sc, CGEM_FRAMES_64B_TX);
889 sc->stats.tx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_TX);
890 sc->stats.tx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_TX);
891 sc->stats.tx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_TX);
892 sc->stats.tx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_TX);
893 sc->stats.tx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_TX);
894 sc->stats.tx_under_runs += RD4(sc, CGEM_TX_UNDERRUNS);
897 sc->stats.tx_single_collisn += n;
898 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
900 sc->stats.tx_multi_collisn += n;
901 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
903 sc->stats.tx_excsv_collisn += n;
904 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
906 sc->stats.tx_late_collisn += n;
907 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
909 sc->stats.tx_deferred_frames += RD4(sc, CGEM_DEFERRED_TX_FRAMES);
910 sc->stats.tx_carrier_sense_errs += RD4(sc, CGEM_CARRIER_SENSE_ERRS);
912 sc->stats.rx_bytes += RD4(sc, CGEM_OCTETS_RX_BOT);
913 sc->stats.rx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_RX_TOP) << 32;
915 sc->stats.rx_frames += RD4(sc, CGEM_FRAMES_RX);
916 sc->stats.rx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_RX);
917 sc->stats.rx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_RX);
918 sc->stats.rx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_RX);
919 sc->stats.rx_frames_64b += RD4(sc, CGEM_FRAMES_64B_RX);
920 sc->stats.rx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_RX);
921 sc->stats.rx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_RX);
922 sc->stats.rx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_RX);
923 sc->stats.rx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_RX);
924 sc->stats.rx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_RX);
925 sc->stats.rx_frames_undersize += RD4(sc, CGEM_UNDERSZ_RX);
926 sc->stats.rx_frames_oversize += RD4(sc, CGEM_OVERSZ_RX);
927 sc->stats.rx_frames_jabber += RD4(sc, CGEM_JABBERS_RX);
928 sc->stats.rx_frames_fcs_errs += RD4(sc, CGEM_FCS_ERRS);
929 sc->stats.rx_frames_length_errs += RD4(sc, CGEM_LENGTH_FIELD_ERRS);
930 sc->stats.rx_symbol_errs += RD4(sc, CGEM_RX_SYMBOL_ERRS);
931 sc->stats.rx_align_errs += RD4(sc, CGEM_ALIGN_ERRS);
932 sc->stats.rx_resource_errs += RD4(sc, CGEM_RX_RESOURCE_ERRS);
933 sc->stats.rx_overrun_errs += RD4(sc, CGEM_RX_OVERRUN_ERRS);
934 sc->stats.rx_ip_hdr_csum_errs += RD4(sc, CGEM_IP_HDR_CKSUM_ERRS);
935 sc->stats.rx_tcp_csum_errs += RD4(sc, CGEM_TCP_CKSUM_ERRS);
936 sc->stats.rx_udp_csum_errs += RD4(sc, CGEM_UDP_CKSUM_ERRS);
948 if (sc->miibus != NULL) {
949 mii = device_get_softc(sc->miibus);
957 if (sc->rxhangwar && sc->rx_frames_prev == sc->stats.rx_frames) {
963 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow &
966 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
968 sc->rx_frames_prev = sc->stats.rx_frames;
971 callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
979 if_t ifp = sc->ifp;
1002 device_printf(sc->dev,
1012 sc->rxoverruns++;
1017 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow |
1020 sc->rxnobufs++;
1041 sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_64;
1044 sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_128;
1047 sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_32;
1051 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
1062 sc->net_cfg_shadow |= CGEM_NET_CFG_MDC_CLK_DIV_48;
1063 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
1065 sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN;
1066 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
1073 if_t ifp = sc->ifp;
1080 sc->net_cfg_shadow &= (CGEM_NET_CFG_MDC_CLK_DIV_MASK |
1082 sc->net_cfg_shadow |= (CGEM_NET_CFG_FCS_REMOVE |
1088 if (sc->phy_contype == MII_CONTYPE_SGMII) {
1089 sc->net_cfg_shadow |= CGEM_NET_CFG_SGMII_EN;
1090 sc->net_cfg_shadow |= CGEM_NET_CFG_PCS_SEL;
1095 sc->net_cfg_shadow |= CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
1097 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
1116 WR4(sc, CGEM_RX_QBAR, (uint32_t)sc->rxring_physaddr);
1117 WR4(sc, CGEM_TX_QBAR, (uint32_t)sc->txring_physaddr);
1119 WR4(sc, CGEM_RX_QBAR_HI, (uint32_t)(sc->rxring_physaddr >> 32));
1120 WR4(sc, CGEM_TX_QBAR_HI, (uint32_t)(sc->txring_physaddr >> 32));
1124 sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN);
1125 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
1146 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != 0)
1152 if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
1154 if (sc->miibus != NULL) {
1155 mii = device_get_softc(sc->miibus);
1159 callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
1180 callout_stop(&sc->tick_ch);
1186 memset(sc->txring, 0, CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc));
1188 sc->txring[i].ctl = CGEM_TXDESC_USED;
1189 if (sc->txring_m[i]) {
1191 bus_dmamap_unload(sc->mbuf_dma_tag,
1192 sc->txring_m_dmamap[i]);
1193 bus_dmamap_destroy(sc->mbuf_dma_tag,
1194 sc->txring_m_dmamap[i]);
1195 sc->txring_m_dmamap[i] = NULL;
1196 m_freem(sc->txring_m[i]);
1197 sc->txring_m[i] = NULL;
1200 sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
1202 sc->txring_hd_ptr = 0;
1203 sc->txring_tl_ptr = 0;
1204 sc->txring_queued = 0;
1207 memset(sc->rxring, 0, CGEM_NUM_RX_DESCS * sizeof(struct cgem_rx_desc));
1209 sc->rxring[i].addr = CGEM_RXDESC_OWN;
1210 if (sc->rxring_m[i]) {
1212 bus_dmamap_unload(sc->mbuf_dma_tag,
1213 sc->rxring_m_dmamap[i]);
1214 bus_dmamap_destroy(sc->mbuf_dma_tag,
1215 sc->rxring_m_dmamap[i]);
1216 sc->rxring_m_dmamap[i] = NULL;
1218 m_freem(sc->rxring_m[i]);
1219 sc->rxring_m[i] = NULL;
1222 sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
1224 sc->rxring_hd_ptr = 0;
1225 sc->rxring_tl_ptr = 0;
1226 sc->rxring_queued = 0;
1229 sc->mii_media_active = 0;
1245 if (((if_getflags(ifp) ^ sc->if_old_flags) &
1256 sc->if_old_flags = if_getflags(ifp);
1262 /* Set up multi-cast filters. */
1272 if (sc->miibus == NULL)
1274 mii = device_get_softc(sc->miibus);
1275 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1280 mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
1283 if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0) {
1304 if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) {
1308 sc->net_cfg_shadow |=
1310 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
1315 sc->net_cfg_shadow &=
1317 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
1346 mii = device_get_softc(sc->miibus);
1349 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1364 mii = device_get_softc(sc->miibus);
1367 ifmr->ifm_active = mii->mii_media_active;
1368 ifmr->ifm_status = mii->mii_media_status;
1389 return (-1);
1397 * MAC does not support half-duplex at gig speeds.
1423 return (-1);
1434 struct mii_data *mii = device_get_softc(sc->miibus);
1438 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1440 sc->mii_media_active != mii->mii_media_active)
1448 struct mii_data *mii = device_get_softc(sc->miibus);
1452 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1454 sc->mii_media_active != mii->mii_media_active)
1479 sc->net_cfg_shadow &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN |
1482 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1484 sc->net_cfg_shadow |= (CGEM_NET_CFG_SPEED100 |
1489 sc->net_cfg_shadow |= CGEM_NET_CFG_SPEED100;
1496 if ((mii->mii_media_active & IFM_FDX) != 0)
1497 sc->net_cfg_shadow |= CGEM_NET_CFG_FULL_DUPLEX;
1499 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
1501 if (sc->clk_pclk != NULL) {
1503 if (clk_set_freq(sc->clk_pclk, ref_clk_freq, 0))
1504 device_printf(sc->dev, "could not set ref clk to %d\n",
1509 sc->mii_media_active = mii->mii_media_active;
1524 &sc->rxbufs, 0, "Number receive buffers to provide");
1527 &sc->rxhangwar, 0, "Enable receive hang work-around");
1530 &sc->rxoverruns, 0, "Receive overrun events");
1533 &sc->rxnobufs, 0, "Receive buf queue empty events");
1536 &sc->rxdmamapfails, 0, "Receive DMA map failures");
1539 &sc->txfull, 0, "Transmit ring full events");
1542 &sc->txdmamapfails, 0, "Transmit DMA map failures");
1545 &sc->txdefrags, 0, "Transmit m_defrag() calls");
1548 &sc->txdefragfails, 0, "Transmit m_defrag() failures");
1551 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "GEM statistics");
1555 &sc->stats.tx_bytes, "Total bytes transmitted");
1558 &sc->stats.tx_frames, 0, "Total frames transmitted");
1561 &sc->stats.tx_frames_bcast, 0,
1565 &sc->stats.tx_frames_multi, 0,
1569 CTLFLAG_RD, &sc->stats.tx_frames_pause, 0,
1573 &sc->stats.tx_frames_64b, 0,
1577 &sc->stats.tx_frames_65to127b, 0,
1578 "Number frames transmitted of size 65-127 bytes");
1581 CTLFLAG_RD, &sc->stats.tx_frames_128to255b, 0,
1582 "Number frames transmitted of size 128-255 bytes");
1585 CTLFLAG_RD, &sc->stats.tx_frames_256to511b, 0,
1586 "Number frames transmitted of size 256-511 bytes");
1589 CTLFLAG_RD, &sc->stats.tx_frames_512to1023b, 0,
1590 "Number frames transmitted of size 512-1023 bytes");
1593 CTLFLAG_RD, &sc->stats.tx_frames_1024to1536b, 0,
1594 "Number frames transmitted of size 1024-1536 bytes");
1597 CTLFLAG_RD, &sc->stats.tx_under_runs, 0,
1598 "Number transmit under-run events");
1601 CTLFLAG_RD, &sc->stats.tx_single_collisn, 0,
1602 "Number single-collision transmit frames");
1605 CTLFLAG_RD, &sc->stats.tx_multi_collisn, 0,
1606 "Number multi-collision transmit frames");
1609 CTLFLAG_RD, &sc->stats.tx_excsv_collisn, 0,
1613 CTLFLAG_RD, &sc->stats.tx_late_collisn, 0,
1614 "Number late-collision transmit frames");
1617 CTLFLAG_RD, &sc->stats.tx_deferred_frames, 0,
1621 CTLFLAG_RD, &sc->stats.tx_carrier_sense_errs, 0,
1625 &sc->stats.rx_bytes, "Total bytes received");
1628 &sc->stats.rx_frames, 0, "Total frames received");
1631 CTLFLAG_RD, &sc->stats.rx_frames_bcast, 0,
1635 CTLFLAG_RD, &sc->stats.rx_frames_multi, 0,
1639 CTLFLAG_RD, &sc->stats.rx_frames_pause, 0,
1643 CTLFLAG_RD, &sc->stats.rx_frames_64b, 0,
1647 CTLFLAG_RD, &sc->stats.rx_frames_65to127b, 0,
1648 "Number frames received of size 65-127 bytes");
1651 CTLFLAG_RD, &sc->stats.rx_frames_128to255b, 0,
1652 "Number frames received of size 128-255 bytes");
1655 CTLFLAG_RD, &sc->stats.rx_frames_256to511b, 0,
1656 "Number frames received of size 256-511 bytes");
1659 CTLFLAG_RD, &sc->stats.rx_frames_512to1023b, 0,
1660 "Number frames received of size 512-1023 bytes");
1663 CTLFLAG_RD, &sc->stats.rx_frames_1024to1536b, 0,
1664 "Number frames received of size 1024-1536 bytes");
1667 CTLFLAG_RD, &sc->stats.rx_frames_undersize, 0,
1671 CTLFLAG_RD, &sc->stats.rx_frames_oversize, 0,
1675 CTLFLAG_RD, &sc->stats.rx_frames_jabber, 0,
1679 CTLFLAG_RD, &sc->stats.rx_frames_fcs_errs, 0,
1683 CTLFLAG_RD, &sc->stats.rx_frames_length_errs, 0,
1687 CTLFLAG_RD, &sc->stats.rx_symbol_errs, 0,
1691 CTLFLAG_RD, &sc->stats.rx_align_errs, 0,
1695 CTLFLAG_RD, &sc->stats.rx_resource_errs, 0,
1699 CTLFLAG_RD, &sc->stats.rx_overrun_errs, 0,
1703 CTLFLAG_RD, &sc->stats.rx_ip_hdr_csum_errs, 0,
1707 CTLFLAG_RD, &sc->stats.rx_tcp_csum_errs, 0,
1711 CTLFLAG_RD, &sc->stats.rx_udp_csum_errs, 0,
1722 if (ofw_bus_search_compatible(dev, compat_data)->ocd_str == NULL)
1739 sc->dev = dev;
1742 /* Key off of compatible string and set hardware-specific options. */
1743 hwquirks = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
1745 sc->neednullqs = 1;
1747 sc->rxhangwar = 1;
1753 if (clk_get_by_ofw_name(dev, 0, "pclk", &sc->clk_pclk) != 0)
1757 if (clk_enable(sc->clk_pclk) != 0)
1760 if (clk_get_by_ofw_name(dev, 0, "hclk", &sc->clk_hclk) != 0)
1764 if (clk_enable(sc->clk_hclk) != 0)
1769 if (clk_get_by_ofw_name(dev, 0, "tx_clk", &sc->clk_txclk) == 0) {
1770 if (clk_enable(sc->clk_txclk) != 0) {
1776 if (clk_get_by_ofw_name(dev, 0, "rx_clk", &sc->clk_rxclk) == 0) {
1777 if (clk_enable(sc->clk_rxclk) != 0) {
1783 if (clk_get_by_ofw_name(dev, 0, "tsu_clk", &sc->clk_tsuclk) == 0) {
1784 if (clk_enable(sc->clk_tsuclk) != 0) {
1792 sc->phy_contype = mii_fdt_get_contype(node);
1796 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1798 if (sc->mem_res == NULL) {
1806 sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1808 if (sc->irq_res == NULL) {
1815 ifp = sc->ifp = if_alloc(IFT_ETHER);
1832 sc->if_old_flags = if_getflags(ifp);
1833 sc->rxbufs = DEFAULT_NUM_RX_BUFS;
1841 err = mii_attach(dev, &sc->miibus, ifp,
1859 callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
1863 err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE |
1864 INTR_EXCL, NULL, cgem_intr, sc, &sc->intrhand);
1877 if (sc->clk_tsuclk)
1878 clk_release(sc->clk_tsuclk);
1880 if (sc->clk_rxclk)
1881 clk_release(sc->clk_rxclk);
1883 if (sc->clk_txclk)
1884 clk_release(sc->clk_txclk);
1886 if (sc->clk_pclk)
1887 clk_release(sc->clk_pclk);
1888 if (sc->clk_hclk)
1889 clk_release(sc->clk_hclk);
1907 callout_drain(&sc->tick_ch);
1908 if_setflagbits(sc->ifp, 0, IFF_UP);
1909 ether_ifdetach(sc->ifp);
1915 if (sc->mem_res != NULL) {
1917 rman_get_rid(sc->mem_res), sc->mem_res);
1918 sc->mem_res = NULL;
1920 if (sc->irq_res != NULL) {
1921 if (sc->intrhand)
1922 bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
1924 rman_get_rid(sc->irq_res), sc->irq_res);
1925 sc->irq_res = NULL;
1929 if (sc->rxring != NULL) {
1930 if (sc->rxring_physaddr != 0) {
1931 bus_dmamap_unload(sc->desc_dma_tag,
1932 sc->rxring_dma_map);
1933 sc->rxring_physaddr = 0;
1934 sc->txring_physaddr = 0;
1935 sc->null_qs_physaddr = 0;
1937 bus_dmamem_free(sc->desc_dma_tag, sc->rxring,
1938 sc->rxring_dma_map);
1939 sc->rxring = NULL;
1940 sc->txring = NULL;
1941 sc->null_qs = NULL;
1944 if (sc->rxring_m_dmamap[i] != NULL) {
1945 bus_dmamap_destroy(sc->mbuf_dma_tag,
1946 sc->rxring_m_dmamap[i]);
1947 sc->rxring_m_dmamap[i] = NULL;
1950 if (sc->txring_m_dmamap[i] != NULL) {
1951 bus_dmamap_destroy(sc->mbuf_dma_tag,
1952 sc->txring_m_dmamap[i]);
1953 sc->txring_m_dmamap[i] = NULL;
1956 if (sc->desc_dma_tag != NULL) {
1957 bus_dma_tag_destroy(sc->desc_dma_tag);
1958 sc->desc_dma_tag = NULL;
1960 if (sc->mbuf_dma_tag != NULL) {
1961 bus_dma_tag_destroy(sc->mbuf_dma_tag);
1962 sc->mbuf_dma_tag = NULL;
1965 if (sc->clk_tsuclk)
1966 clk_release(sc->clk_tsuclk);
1967 if (sc->clk_rxclk)
1968 clk_release(sc->clk_rxclk);
1969 if (sc->clk_txclk)
1970 clk_release(sc->clk_txclk);
1971 if (sc->clk_pclk)
1972 clk_release(sc->clk_pclk);
1973 if (sc->clk_hclk)
1974 clk_release(sc->clk_hclk);