Lines Matching +full:rx +full:- +full:tx
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
29 * $Id: eqos.c 1059 2022-12-08 19:32:32Z sos $
33 * DesignWare Ethernet Quality-of-Service controller
84 #define TX_QUEUED(h, t) ((((h) - (t)) + TX_DESC_COUNT) % TX_DESC_COUNT)
93 #define EQOS_LOCK(sc) mtx_lock(&(sc)->lock)
94 #define EQOS_UNLOCK(sc) mtx_unlock(&(sc)->lock)
95 #define EQOS_ASSERT_LOCKED(sc) mtx_assert(&(sc)->lock, MA_OWNED)
97 #define RD4(sc, o) bus_read_4(sc->res[EQOS_RES_MEM], (o))
98 #define WR4(sc, o, v) bus_write_4(sc->res[EQOS_RES_MEM], (o), (v))
104 { -1, 0 }
117 addr = sc->csr_clock_range |
125 for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
150 addr = sc->csr_clock_range |
158 for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
176 struct mii_data *mii = device_get_softc(sc->miibus);
181 if (mii->mii_media_status & IFM_ACTIVE)
182 sc->link_up = true;
184 sc->link_up = false;
188 switch (IFM_SUBTYPE(mii->mii_media_active)) {
208 sc->link_up = false;
212 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX))
219 IF_EQOS_SET_SPEED(dev, IFM_SUBTYPE(mii->mii_media_active));
221 WR4(sc, GMAC_MAC_1US_TIC_COUNTER, (sc->csr_clock / 1000000) - 1);
228 struct mii_data *mii = device_get_softc(sc->miibus);
232 ifmr->ifm_active = mii->mii_media_active;
233 ifmr->ifm_status = mii->mii_media_status;
244 error = mii_mediachg(device_get_softc(sc->miibus));
262 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, BUS_DMASYNC_PREWRITE);
263 sc->tx.desc_ring[index].des0 = htole32((uint32_t)paddr);
264 sc->tx.desc_ring[index].des1 = htole32((uint32_t)(paddr >> 32));
265 sc->tx.desc_ring[index].des2 = htole32(tdes2 | len);
266 sc->tx.desc_ring[index].des3 = htole32(tdes3 | total_len);
273 int first = sc->tx.head;
277 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag,
278 sc->tx.buf_map[first].map, m, segs, &nsegs, 0);
282 device_printf(sc->dev, "TX packet too big trying defrag\n");
283 bus_dmamap_unload(sc->tx.buf_tag, sc->tx.buf_map[first].map);
287 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag,
288 sc->tx.buf_map[first].map, m, segs, &nsegs, 0);
293 if (TX_QUEUED(sc->tx.head, sc->tx.tail) + nsegs > TX_DESC_COUNT) {
294 bus_dmamap_unload(sc->tx.buf_tag, sc->tx.buf_map[first].map);
295 device_printf(sc->dev, "TX packet no more queue space\n");
299 bus_dmamap_sync(sc->tx.buf_tag, sc->tx.buf_map[first].map,
302 sc->tx.buf_map[first].mbuf = m;
305 if (idx == (nsegs - 1))
307 eqos_setup_txdesc(sc, sc->tx.head, flags, segs[idx].ds_addr,
308 segs[idx].ds_len, m->m_pkthdr.len);
311 sc->tx.head = TX_NEXT(sc->tx.head);
318 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, BUS_DMASYNC_PREWRITE);
319 sc->tx.desc_ring[first].des3 |= htole32(EQOS_TDES3_OWN);
328 sc->rx.desc_ring[index].des0 = htole32((uint32_t)paddr);
329 sc->rx.desc_ring[index].des1 = htole32((uint32_t)(paddr >> 32));
330 sc->rx.desc_ring[index].des2 = htole32(0);
331 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, BUS_DMASYNC_PREWRITE);
332 sc->rx.desc_ring[index].des3 = htole32(EQOS_RDES3_OWN | EQOS_RDES3_IOC |
344 error = bus_dmamap_load_mbuf_sg(sc->rx.buf_tag,
345 sc->rx.buf_map[index].map, m, &seg, &nsegs, 0);
349 bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map,
352 sc->rx.buf_map[index].mbuf = m;
364 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
411 if_t ifp = sc->ifp;
459 for (retry = 2000; retry > 0; retry--) {
473 (uint32_t)(sc->tx.desc_ring_paddr >> 32));
475 (uint32_t)sc->tx.desc_ring_paddr);
476 WR4(sc, GMAC_DMA_CHAN0_TX_RING_LEN, TX_DESC_COUNT - 1);
479 (uint32_t)(sc->rx.desc_ring_paddr >> 32));
481 (uint32_t)sc->rx.desc_ring_paddr);
482 WR4(sc, GMAC_DMA_CHAN0_RX_RING_LEN, RX_DESC_COUNT - 1);
485 (uint32_t)sc->rx.desc_ring_paddr + DESC_OFFSET(RX_DESC_COUNT));
492 if_t ifp = sc->ifp;
493 struct mii_data *mii = device_get_softc(sc->miibus);
505 WR4(sc, GMAC_MAC_1US_TIC_COUNTER, (sc->csr_clock / 1000000) - 1);
510 val |= ((DESC_ALIGN - 16) / 8) << GMAC_DMA_CHAN0_CONTROL_DSL_SHIFT;
547 /* set RX queue mode. must be in DCB mode. */
566 callout_reset(&sc->callout, hz, eqos_tick, sc);
578 if (!sc->link_up)
586 if (TX_QUEUED(sc->tx.head, sc->tx.tail) >=
587 TX_DESC_COUNT - TX_MAX_SEGS) {
605 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map,
608 /* Start and run TX DMA */
610 (uint32_t)sc->tx.desc_ring_paddr + DESC_OFFSET(sc->tx.head));
611 sc->tx_watchdog = WATCHDOG_TIMEOUT_SECS;
628 if_t ifp = sc->ifp;
636 callout_stop(&sc->callout);
653 /* Flush data in the TX FIFO */
657 for (retry = 10000; retry > 0; retry--) {
664 device_printf(sc->dev, "timeout flushing TX queue\n");
679 if_t ifp = sc->ifp;
685 rdes3 = le32toh(sc->rx.desc_ring[sc->rx.head].des3);
692 bus_dmamap_sync(sc->rx.buf_tag,
693 sc->rx.buf_map[sc->rx.head].map, BUS_DMASYNC_POSTREAD);
694 bus_dmamap_unload(sc->rx.buf_tag,
695 sc->rx.buf_map[sc->rx.head].map);
699 m = sc->rx.buf_map[sc->rx.head].mbuf;
700 m->m_pkthdr.rcvif = ifp;
701 m->m_pkthdr.len = length;
702 m->m_len = length;
703 m->m_nextpkt = NULL;
706 m_adj(m, -ETHER_CRC_LEN);
714 if ((error = eqos_setup_rxbuf(sc, sc->rx.head, m)))
715 printf("ERROR: Hole in RX ring!!\n");
723 (uint32_t)sc->rx.desc_ring_paddr + DESC_OFFSET(sc->rx.head));
725 sc->rx.head = RX_NEXT(sc->rx.head);
732 if_t ifp = sc->ifp;
738 while (sc->tx.tail != sc->tx.head) {
739 tdes3 = le32toh(sc->tx.desc_ring[sc->tx.tail].des3);
743 bmap = &sc->tx.buf_map[sc->tx.tail];
744 if (bmap->mbuf) {
745 bus_dmamap_sync(sc->tx.buf_tag, bmap->map,
747 bus_dmamap_unload(sc->tx.buf_tag, bmap->map);
748 m_freem(bmap->mbuf);
749 bmap->mbuf = NULL;
752 eqos_setup_txdesc(sc, sc->tx.tail, 0, 0, 0, 0);
766 sc->tx.tail = TX_NEXT(sc->tx.tail);
768 if (sc->tx.tail == sc->tx.head)
769 sc->tx_watchdog = 0;
770 eqos_start_locked(sc->ifp);
796 device_printf(sc->dev,
806 struct mii_data *mii = device_get_softc(sc->miibus);
811 if (sc->tx_watchdog > 0)
812 if (!--sc->tx_watchdog) {
813 device_printf(sc->dev, "watchdog timeout\n");
817 link_status = sc->link_up;
819 if (sc->link_up && !link_status)
820 eqos_start_locked(sc->ifp);
822 callout_reset(&sc->callout, hz, eqos_tick, sc);
835 device_printf(sc->dev, "MAC interrupt\n");
857 device_printf(sc->dev,
864 device_printf(sc->dev, "RX/TX status interrupt\n");
908 mii = device_get_softc(sc->miibus);
909 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
913 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
997 /* Set up TX descriptor ring, descriptors, and dma maps */
998 if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
1003 NULL, NULL, &sc->tx.desc_tag))) {
1004 device_printf(sc->dev, "could not create TX ring DMA tag\n");
1008 if ((error = bus_dmamem_alloc(sc->tx.desc_tag,
1009 (void**)&sc->tx.desc_ring,
1011 &sc->tx.desc_map))) {
1012 device_printf(sc->dev,
1013 "could not allocate TX descriptor ring.\n");
1017 if ((error = bus_dmamap_load(sc->tx.desc_tag, sc->tx.desc_map,
1018 sc->tx.desc_ring,
1019 TX_DESC_SIZE, eqos_get1paddr, &sc->tx.desc_ring_paddr, 0))) {
1020 device_printf(sc->dev,
1021 "could not load TX descriptor ring map.\n");
1025 if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
1030 &sc->tx.buf_tag))) {
1031 device_printf(sc->dev, "could not create TX buffer DMA tag.\n");
1036 if ((error = bus_dmamap_create(sc->tx.buf_tag, BUS_DMA_COHERENT,
1037 &sc->tx.buf_map[i].map))) {
1038 device_printf(sc->dev, "cannot create TX buffer map\n");
1044 /* Set up RX descriptor ring, descriptors, dma maps, and mbufs */
1045 if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
1050 NULL, NULL, &sc->rx.desc_tag))) {
1051 device_printf(sc->dev, "could not create RX ring DMA tag.\n");
1055 if ((error = bus_dmamem_alloc(sc->rx.desc_tag,
1056 (void **)&sc->rx.desc_ring,
1058 &sc->rx.desc_map))) {
1059 device_printf(sc->dev,
1060 "could not allocate RX descriptor ring.\n");
1064 if ((error = bus_dmamap_load(sc->rx.desc_tag, sc->rx.desc_map,
1065 sc->rx.desc_ring, RX_DESC_SIZE, eqos_get1paddr,
1066 &sc->rx.desc_ring_paddr, 0))) {
1067 device_printf(sc->dev,
1068 "could not load RX descriptor ring map.\n");
1072 if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
1077 &sc->rx.buf_tag))) {
1078 device_printf(sc->dev, "could not create RX buf DMA tag.\n");
1083 if ((error = bus_dmamap_create(sc->rx.buf_tag, BUS_DMA_COHERENT,
1084 &sc->rx.buf_map[i].map))) {
1085 device_printf(sc->dev, "cannot create RX buffer map\n");
1089 device_printf(sc->dev, "cannot allocate RX mbuf\n");
1093 device_printf(sc->dev, "cannot create RX buffer\n");
1099 device_printf(sc->dev, "TX ring @ 0x%lx, RX ring @ 0x%lx\n",
1100 sc->tx.desc_ring_paddr, sc->rx.desc_ring_paddr);
1116 if (bus_alloc_resources(dev, eqos_spec, sc->res)) {
1118 bus_release_resources(dev, eqos_spec, sc->res);
1125 sc->dev = dev;
1138 sc->hw_feature[n] = RD4(sc, GMAC_MAC_HW_FEATURE(n));
1144 sc->hw_feature[0], sc->hw_feature[1],
1145 sc->hw_feature[2], sc->hw_feature[3]);
1148 mtx_init(&sc->lock, "eqos lock", MTX_NETWORK_LOCK, MTX_DEF);
1149 callout_init_mtx(&sc->callout, &sc->lock, 0);
1153 device_printf(sc->dev, "Ethernet address %6D\n", eaddr, ":");
1157 device_printf(sc->dev, "reset timeout!\n");
1166 device_printf(sc->dev, "failed to setup DMA descriptors\n");
1171 if ((bus_setup_intr(dev, sc->res[EQOS_RES_IRQ0], EQOS_INTR_FLAGS,
1172 NULL, eqos_intr, sc, &sc->irq_handle))) {
1174 bus_release_resources(dev, eqos_spec, sc->res);
1179 ifp = sc->ifp = if_alloc(IFT_ETHER);
1181 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
1182 if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
1186 if_setsendqlen(ifp, TX_DESC_COUNT - 1);
1192 if ((error = mii_attach(sc->dev, &sc->miibus, ifp, eqos_media_change,
1195 device_printf(sc->dev, "PHY attach failed\n");
1215 if_setflagbits(sc->ifp, 0, IFF_UP);
1216 ether_ifdetach(sc->ifp);
1221 if (sc->irq_handle)
1222 bus_teardown_intr(dev, sc->res[EQOS_RES_IRQ0],
1223 sc->irq_handle);
1225 if (sc->ifp)
1226 if_free(sc->ifp);
1228 bus_release_resources(dev, eqos_spec, sc->res);
1230 if (sc->tx.desc_tag) {
1231 if (sc->tx.desc_map) {
1232 bus_dmamap_unload(sc->tx.desc_tag, sc->tx.desc_map);
1233 bus_dmamem_free(sc->tx.desc_tag, sc->tx.desc_ring,
1234 sc->tx.desc_map);
1236 bus_dma_tag_destroy(sc->tx.desc_tag);
1238 if (sc->tx.buf_tag) {
1240 m_free(sc->tx.buf_map[i].mbuf);
1241 bus_dmamap_destroy(sc->tx.buf_tag,
1242 sc->tx.buf_map[i].map);
1244 bus_dma_tag_destroy(sc->tx.buf_tag);
1247 if (sc->rx.desc_tag) {
1248 if (sc->rx.desc_map) {
1249 bus_dmamap_unload(sc->rx.desc_tag, sc->rx.desc_map);
1250 bus_dmamem_free(sc->rx.desc_tag, sc->rx.desc_ring,
1251 sc->rx.desc_map);
1253 bus_dma_tag_destroy(sc->rx.desc_tag);
1255 if (sc->rx.buf_tag) {
1257 m_free(sc->rx.buf_map[i].mbuf);
1258 bus_dmamap_destroy(sc->rx.buf_tag,
1259 sc->rx.buf_map[i].map);
1261 bus_dma_tag_destroy(sc->rx.buf_tag);
1264 mtx_destroy(&sc->lock);