Lines Matching +full:gpio +full:- +full:7 +full:- +full:segment
1 /*-
42 #include <sys/gpio.h>
71 #define RD4(sc, reg) bus_read_4((sc)->res[_RES_EMAC], (reg))
72 #define WR4(sc, reg, val) bus_write_4((sc)->res[_RES_EMAC], (reg), (val))
74 #define AWG_LOCK(sc) mtx_lock(&(sc)->mtx)
75 #define AWG_UNLOCK(sc) mtx_unlock(&(sc)->mtx);
76 #define AWG_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED)
77 #define AWG_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED)
86 #define TX_NEXT(n) (((n) + 1) & (TX_DESC_COUNT - 1))
87 #define TX_SKIP(n, o) (((n) + (o)) & (TX_DESC_COUNT - 1))
88 #define RX_NEXT(n) (((n) + 1) & (RX_DESC_COUNT - 1))
150 { "allwinner,sun8i-a83t-emac", EMAC_A83T },
151 { "allwinner,sun8i-h3-emac", EMAC_H3 },
152 { "allwinner,sun50i-a64-emac", EMAC_A64 },
153 { "allwinner,sun20i-d1-emac", EMAC_D1 },
213 { -1, 0 }
242 (sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) |
246 for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
271 (sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) |
275 for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
299 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
301 mii = device_get_softc(sc->miibus);
303 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
305 switch (IFM_SUBTYPE(mii->mii_media_active)) {
310 sc->link = 1;
313 sc->link = 0;
317 sc->link = 0;
319 if (sc->link == 0)
325 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
326 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
328 else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
333 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
340 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
346 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
348 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
364 mii = device_get_softc(sc->miibus);
368 ifmr->ifm_active = mii->mii_media_active;
369 ifmr->ifm_status = mii->mii_media_status;
381 mii = device_get_softc(sc->miibus);
394 /* Bit Reversal - http://aggregate.org/MAGIC/#Bit%20Reversal */
429 ifp = sc->ifp;
559 if ((if_getcapenable(sc->ifp) & IFCAP_POLLING) == 0)
611 cur = first = sc->tx.cur;
612 map = sc->tx.buf_map[first].map;
615 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, map, m, segs,
620 device_printf(sc->dev, "awg_encap: m_collapse failed\n");
626 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, map, m,
634 device_printf(sc->dev, "awg_encap: bus_dmamap_load_mbuf_sg failed\n");
643 if (sc->tx.queued + nsegs > TX_DESC_COUNT) {
644 bus_dmamap_unload(sc->tx.buf_tag, map);
648 bus_dmamap_sync(sc->tx.buf_tag, map, BUS_DMASYNC_PREWRITE);
652 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) {
653 if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0)
661 sc->tx.segs++;
662 if (i == nsegs - 1) {
668 if (sc->tx.segs >= awg_tx_interval) {
669 sc->tx.segs = 0;
674 sc->tx.desc_ring[cur].addr = htole32((uint32_t)segs[i].ds_addr);
675 sc->tx.desc_ring[cur].size = htole32(flags | segs[i].ds_len);
676 sc->tx.desc_ring[cur].status = htole32(status);
685 ++sc->tx.queued;
689 sc->tx.cur = cur;
691 /* Store mapping and mbuf in the last segment */
692 last = TX_SKIP(cur, TX_DESC_COUNT - 1);
693 sc->tx.buf_map[first].map = sc->tx.buf_map[last].map;
694 sc->tx.buf_map[last].map = map;
695 sc->tx.buf_map[last].mbuf = m;
701 sc->tx.desc_ring[first].status = htole32(TX_DESC_CTL);
711 --sc->tx.queued;
713 bmap = &sc->tx.buf_map[index];
714 if (bmap->mbuf != NULL) {
715 bus_dmamap_sync(sc->tx.buf_tag, bmap->map,
717 bus_dmamap_unload(sc->tx.buf_tag, bmap->map);
718 m_freem(bmap->mbuf);
719 bmap->mbuf = NULL;
729 size = MCLBYTES - 1;
731 sc->rx.desc_ring[index].addr = htole32((uint32_t)paddr);
732 sc->rx.desc_ring[index].size = htole32(size);
733 sc->rx.desc_ring[index].status = htole32(status);
740 sc->rx.desc_ring[index].status = htole32(RX_DESC_CTL);
755 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
758 if (bus_dmamap_load_mbuf_sg(sc->rx.buf_tag, sc->rx.buf_spare_map,
764 if (sc->rx.buf_map[index].mbuf != NULL) {
765 bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map,
767 bus_dmamap_unload(sc->rx.buf_tag, sc->rx.buf_map[index].map);
769 map = sc->rx.buf_map[index].map;
770 sc->rx.buf_map[index].map = sc->rx.buf_spare_map;
771 sc->rx.buf_spare_map = map;
772 bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map,
775 sc->rx.buf_map[index].mbuf = m;
808 &sc->tx.desc_tag);
814 error = bus_dmamem_alloc(sc->tx.desc_tag, (void **)&sc->tx.desc_ring,
815 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->tx.desc_map);
821 error = bus_dmamap_load(sc->tx.desc_tag, sc->tx.desc_map,
822 sc->tx.desc_ring, TX_DESC_SIZE, awg_dmamap_cb,
823 &sc->tx.desc_ring_paddr, 0);
830 sc->tx.desc_ring[i].next =
831 htole32(sc->tx.desc_ring_paddr + DESC_OFF(TX_NEXT(i)));
843 &sc->tx.buf_tag);
849 sc->tx.queued = 0;
851 error = bus_dmamap_create(sc->tx.buf_tag, 0,
852 &sc->tx.buf_map[i].map);
870 &sc->rx.desc_tag);
876 error = bus_dmamem_alloc(sc->rx.desc_tag, (void **)&sc->rx.desc_ring,
877 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->rx.desc_map);
883 error = bus_dmamap_load(sc->rx.desc_tag, sc->rx.desc_map,
884 sc->rx.desc_ring, RX_DESC_SIZE, awg_dmamap_cb,
885 &sc->rx.desc_ring_paddr, 0);
901 &sc->rx.buf_tag);
907 error = bus_dmamap_create(sc->rx.buf_tag, 0, &sc->rx.buf_spare_map);
915 sc->rx.desc_ring[i].next =
916 htole32(sc->rx.desc_ring_paddr + DESC_OFF(RX_NEXT(i)));
918 error = bus_dmamap_create(sc->rx.buf_tag, 0,
919 &sc->rx.buf_map[i].map);
924 sc->rx.buf_map[i].mbuf = NULL;
931 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map,
935 WR4(sc, EMAC_TX_DMA_LIST, sc->tx.desc_ring_paddr);
936 WR4(sc, EMAC_RX_DMA_LIST, sc->rx.desc_ring_paddr);
966 if (!sc->link)
969 ifp = sc->ifp;
992 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map,
1017 mii = device_get_softc(sc->miibus);
1018 ifp = sc->ifp;
1033 callout_reset(&sc->stat_ch, hz, awg_tick, sc);
1057 ifp = sc->ifp;
1059 callout_stop(&sc->stat_ch);
1064 sc->link = 0;
1070 for (i = sc->tx.next; sc->tx.queued > 0; i = TX_NEXT(i)) {
1071 val = le32toh(sc->tx.desc_ring[i].status);
1076 sc->tx.next = i;
1077 for (; sc->tx.queued > 0; i = TX_NEXT(i)) {
1078 sc->tx.desc_ring[i].status = 0;
1081 sc->tx.cur = sc->tx.next;
1082 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map,
1086 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map,
1089 for (i = sc->rx.cur; ; i = RX_NEXT(i)) {
1090 val = le32toh(sc->rx.desc_ring[i].status);
1095 sc->rx.cur = i;
1096 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map,
1111 mii = device_get_softc(sc->miibus);
1120 flags = if_getflags(ifp) ^ sc->if_flags;
1129 sc->if_flags = if_getflags(ifp);
1142 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1145 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
1148 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1196 ifp = sc->ifp;
1201 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map,
1204 for (index = sc->rx.cur; ; index = RX_NEXT(index)) {
1205 status = le32toh(sc->rx.desc_ring[index].status);
1218 m = sc->rx.buf_map[index].mbuf;
1227 m->m_pkthdr.rcvif = ifp;
1228 m->m_pkthdr.len = len;
1229 m->m_len = len;
1234 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1236 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1238 m->m_pkthdr.csum_flags |=
1240 m->m_pkthdr.csum_data = 0xffff;
1244 m->m_nextpkt = NULL;
1248 mt->m_nextpkt = m;
1262 if (index != sc->rx.cur) {
1263 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map,
1273 sc->rx.cur = index;
1288 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map,
1291 ifp = sc->ifp;
1294 for (i = sc->tx.next; sc->tx.queued > 0; i = TX_NEXT(i)) {
1295 desc = &sc->tx.desc_ring[i];
1296 status = le32toh(desc->status);
1299 size = le32toh(desc->size);
1311 sc->tx.next = i;
1335 if (!if_sendq_empty(sc->ifp))
1386 if (sc->syscon != NULL)
1387 return (SYSCON_READ_4(sc->syscon, EMAC_CLK_REG));
1388 else if (sc->res[_RES_SYSCON] != NULL)
1389 return (bus_read_4(sc->res[_RES_SYSCON], 0));
1400 if (sc->syscon != NULL)
1401 SYSCON_WRITE_4(sc->syscon, EMAC_CLK_REG, val);
1402 else if (sc->res[_RES_SYSCON] != NULL)
1403 bus_write_4(sc->res[_RES_SYSCON], 0, val);
1417 if (OF_getencprop(node, "phy-handle", (void *)&phy_handle,
1431 if (OF_hasprop(node, "allwinner,use-internal-phy"))
1436 "allwinner,sun8i-h3-mdio-internal") != 0);
1450 if (OF_getencprop(node, "tx-delay", &delay, sizeof(delay)) >= 0)
1452 else if (OF_getencprop(node, "allwinner,tx-delay-ps", &delay,
1455 device_printf(dev, "tx-delay-ps is not a multiple of 100\n");
1460 if (*tx_delay > 7) {
1461 device_printf(dev, "tx-delay out of range\n");
1465 if (OF_getencprop(node, "rx-delay", &delay, sizeof(delay)) >= 0)
1467 else if (OF_getencprop(node, "allwinner,rx-delay-ps", &delay,
1470 device_printf(dev, "rx-delay-ps is not within documented domain\n");
1476 device_printf(dev, "rx-delay out of range\n");
1499 if (OF_getprop_alloc(node, "phy-mode", (void **)&phy_type) == 0)
1502 if (sc->syscon != NULL || sc->res[_RES_SYSCON] != NULL)
1512 * For the pine64, we get dtb from U-Boot and it still uses the
1515 * These abstractions can go away once U-Boot dts is up-to-date.
1543 if (sc->type == EMAC_H3) {
1548 "allwinner,leds-active-low"))
1577 /* Find the desired parent clock based on phy-mode property */
1625 if (phy_node == 0 && OF_hasprop(node, "phy-handle")) {
1656 "syscon", &sc->syscon) != 0) {
1679 /* De-assert reset */
1682 device_printf(dev, "cannot de-assert ahb reset\n");
1687 * The ephy reset is left de-asserted by U-Boot. Assert it
1694 device_printf(dev, "cannot de-assert ephy reset\n");
1700 if (regulator_get_by_ofw_property(dev, 0, "phy-supply", ®) == 0) {
1716 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_16;
1718 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_32;
1720 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_64;
1722 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_128;
1731 (uintmax_t)freq, sc->mdc_div_ratio_m);
1788 device_printf(dev, " %-20s %08x\n", regs[n].name,
1800 device_t gpio;
1805 if (OF_getencprop(node, "allwinner,reset-gpio", gpio_prop,
1809 if (OF_getencprop(node, "allwinner,reset-delays-us", delay_prop,
1814 if ((gpio = OF_device_from_xref(gpio_prop[0])) == NULL)
1817 if (GPIO_MAP_GPIOS(gpio, node, gpio_node, nitems(gpio_prop) - 1,
1822 if (OF_hasprop(node, "allwinner,reset-active-low"))
1828 GPIO_PIN_SETFLAGS(gpio, pin, GPIO_PIN_OUTPUT);
1829 GPIO_PIN_SET(gpio, pin, pin_value);
1831 GPIO_PIN_SET(gpio, pin, !pin_value);
1833 GPIO_PIN_SET(gpio, pin, pin_value);
1856 /* Wait for soft reset bit to self-clear */
1857 for (retry = SOFT_RST_RETRY; retry > 0; retry--) {
1886 ifp = sc->ifp;
1887 mii = device_get_softc(sc->miibus);
1894 link = sc->link;
1896 if (sc->link && !link)
1899 callout_reset(&sc->stat_ch, hz, awg_tick, sc);
1912 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
1927 sc->dev = dev;
1928 sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
1930 if (bus_alloc_resources(dev, awg_spec, sc->res) != 0) {
1935 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
1936 callout_init_mtx(&sc->stat_ch, &sc->mtx, 0);
1957 error = bus_setup_intr(dev, sc->res[_RES_IRQ],
1958 INTR_TYPE_NET | INTR_MPSAFE, NULL, awg_intr, sc, &sc->ih);
1965 sc->ifp = if_alloc(IFT_ETHER);
1966 if_setsoftc(sc->ifp, sc);
1967 if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev));
1968 if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
1969 if_setstartfn(sc->ifp, awg_start);
1970 if_setioctlfn(sc->ifp, awg_ioctl);
1971 if_setinitfn(sc->ifp, awg_init);
1972 if_setsendqlen(sc->ifp, TX_DESC_COUNT - 1);
1973 if_setsendqready(sc->ifp);
1974 if_sethwassist(sc->ifp, CSUM_IP | CSUM_UDP | CSUM_TCP);
1975 if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM);
1976 if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp));
1978 if_setcapabilitiesbit(sc->ifp, IFCAP_POLLING, 0);
1982 error = mii_attach(dev, &sc->miibus, sc->ifp, awg_media_change,
1991 ether_ifattach(sc->ifp, eaddr);