Lines Matching full:tx

262 	bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, BUS_DMASYNC_PREWRITE);
263 sc->tx.desc_ring[index].des0 = htole32((uint32_t)paddr);
264 sc->tx.desc_ring[index].des1 = htole32((uint32_t)(paddr >> 32));
265 sc->tx.desc_ring[index].des2 = htole32(tdes2 | len);
266 sc->tx.desc_ring[index].des3 = htole32(tdes3 | total_len);
273 int first = sc->tx.head;
277 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag,
278 sc->tx.buf_map[first].map, m, segs, &nsegs, 0);
282 device_printf(sc->dev, "TX packet too big trying defrag\n");
283 bus_dmamap_unload(sc->tx.buf_tag, sc->tx.buf_map[first].map);
287 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag,
288 sc->tx.buf_map[first].map, m, segs, &nsegs, 0);
293 if (TX_QUEUED(sc->tx.head, sc->tx.tail) + nsegs > TX_DESC_COUNT) {
294 bus_dmamap_unload(sc->tx.buf_tag, sc->tx.buf_map[first].map);
295 device_printf(sc->dev, "TX packet no more queue space\n");
299 bus_dmamap_sync(sc->tx.buf_tag, sc->tx.buf_map[first].map,
302 sc->tx.buf_map[first].mbuf = m;
307 eqos_setup_txdesc(sc, sc->tx.head, flags, segs[idx].ds_addr,
311 sc->tx.head = TX_NEXT(sc->tx.head);
318 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, BUS_DMASYNC_PREWRITE);
319 sc->tx.desc_ring[first].des3 |= htole32(EQOS_TDES3_OWN);
473 (uint32_t)(sc->tx.desc_ring_paddr >> 32));
475 (uint32_t)sc->tx.desc_ring_paddr);
586 if (TX_QUEUED(sc->tx.head, sc->tx.tail) >=
605 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map,
608 /* Start and run TX DMA */
610 (uint32_t)sc->tx.desc_ring_paddr + DESC_OFFSET(sc->tx.head));
653 /* Flush data in the TX FIFO */
664 device_printf(sc->dev, "timeout flushing TX queue\n");
738 while (sc->tx.tail != sc->tx.head) {
739 tdes3 = le32toh(sc->tx.desc_ring[sc->tx.tail].des3);
743 bmap = &sc->tx.buf_map[sc->tx.tail];
745 bus_dmamap_sync(sc->tx.buf_tag, bmap->map,
747 bus_dmamap_unload(sc->tx.buf_tag, bmap->map);
752 eqos_setup_txdesc(sc, sc->tx.tail, 0, 0, 0, 0);
766 sc->tx.tail = TX_NEXT(sc->tx.tail);
768 if (sc->tx.tail == sc->tx.head)
864 device_printf(sc->dev, "RX/TX status interrupt\n");
997 /* Set up TX descriptor ring, descriptors, and dma maps */
1003 NULL, NULL, &sc->tx.desc_tag))) {
1004 device_printf(sc->dev, "could not create TX ring DMA tag\n");
1008 if ((error = bus_dmamem_alloc(sc->tx.desc_tag,
1009 (void**)&sc->tx.desc_ring,
1011 &sc->tx.desc_map))) {
1013 "could not allocate TX descriptor ring.\n");
1017 if ((error = bus_dmamap_load(sc->tx.desc_tag, sc->tx.desc_map,
1018 sc->tx.desc_ring,
1019 TX_DESC_SIZE, eqos_get1paddr, &sc->tx.desc_ring_paddr, 0))) {
1021 "could not load TX descriptor ring map.\n");
1030 &sc->tx.buf_tag))) {
1031 device_printf(sc->dev, "could not create TX buffer DMA tag.\n");
1036 if ((error = bus_dmamap_create(sc->tx.buf_tag, BUS_DMA_COHERENT,
1037 &sc->tx.buf_map[i].map))) {
1038 device_printf(sc->dev, "cannot create TX buffer map\n");
1099 device_printf(sc->dev, "TX ring @ 0x%lx, RX ring @ 0x%lx\n",
1100 sc->tx.desc_ring_paddr, sc->rx.desc_ring_paddr);
1230 if (sc->tx.desc_tag) {
1231 if (sc->tx.desc_map) {
1232 bus_dmamap_unload(sc->tx.desc_tag, sc->tx.desc_map);
1233 bus_dmamem_free(sc->tx.desc_tag, sc->tx.desc_ring,
1234 sc->tx.desc_map);
1236 bus_dma_tag_destroy(sc->tx.desc_tag);
1238 if (sc->tx.buf_tag) {
1240 m_free(sc->tx.buf_map[i].mbuf);
1241 bus_dmamap_destroy(sc->tx.buf_tag,
1242 sc->tx.buf_map[i].map);
1244 bus_dma_tag_destroy(sc->tx.buf_tag);