Lines Matching full:tx

111 /* Rx/Tx Queue Control */
159 /* Tx Subroutines */
296 "tx_good_oct", "Good Octets Tx"},
298 "tx_good_frame", "Good Frames Tx"},
302 "tx_mcast_frame", "Multicast Frames Tx"},
304 "tx_bcast_frame", "Broadcast Frames Tx"},
308 "fc_tx", "Flow Control Tx"},
416 * Create Tx DMA
434 "Failed to create DMA tag for Tx descriptors.\n");
451 "Failed to create DMA tag for Tx mbufs.\n");
1011 struct mvneta_tx_ring *tx;
1023 tx = MVNETA_TX_RING(sc, q);
1025 tx->queue_status = MVNETA_QUEUE_DISABLED;
1044 /* Wait for all Tx activity to terminate. */
1060 "timeout for TX stopped. tqc 0x%x\n", reg);
1067 /* Wait for all Tx FIFO is empty */
1072 "timeout for TX FIFO drained. ps0 0x%x\n", reg);
1103 /* Init TX/RX Queue Registers */
1171 /* Port Configuration Extended: enable Tx CRC generation */
1252 struct mvneta_tx_ring *tx;
1257 tx = MVNETA_TX_RING(sc, q);
1258 mtx_init(&tx->ring_mtx, "mvneta_tx", NULL, MTX_DEF);
1260 (void**)&(tx->desc),
1262 &tx->desc_map);
1263 if (error != 0 || tx->desc == NULL)
1265 error = bus_dmamap_load(sc->tx_dtag, tx->desc_map,
1266 tx->desc,
1268 mvneta_dmamap_cb, &tx->desc_pa, BUS_DMA_NOWAIT);
1273 tx->br = buf_ring_alloc(MVNETA_BUFRING_SIZE, M_DEVBUF, M_NOWAIT,
1274 &tx->ring_mtx);
1275 if (tx->br == NULL) {
1296 struct mvneta_tx_ring *tx;
1304 tx = MVNETA_TX_RING(sc, q);
1306 if (tx->taskq != NULL) {
1308 while (taskqueue_cancel(tx->taskq, &tx->task, NULL) != 0)
1309 taskqueue_drain(tx->taskq, &tx->task);
1312 if (tx->br != NULL)
1313 drbr_free(tx->br, M_DEVBUF);
1318 txbuf = &tx->txbuf[i];
1323 panic("%s: map busy for Tx descriptor (Q%d, %d)",
1330 if (tx->desc_pa != 0)
1331 bus_dmamap_unload(sc->tx_dtag, tx->desc_map);
1333 kva = (void *)tx->desc;
1335 bus_dmamem_free(sc->tx_dtag, tx->desc, tx->desc_map);
1337 if (mtx_name(&tx->ring_mtx) != NULL)
1338 mtx_destroy(&tx->ring_mtx);
1340 memset(tx, 0, sizeof(*tx));
1405 struct mvneta_tx_ring *tx;
1412 tx = MVNETA_TX_RING(sc, q);
1414 /* Tx handle */
1416 txbuf = &tx->txbuf[i];
1418 /* Tx handle needs DMA map for busdma_load_mbuf() */
1423 "can't create dma map (tx ring %d)\n", i);
1427 tx->dma = tx->cpu = 0;
1428 tx->used = 0;
1429 tx->drv_error = 0;
1430 tx->queue_status = MVNETA_QUEUE_DISABLED;
1431 tx->queue_hung = FALSE;
1433 tx->ifp = sc->ifp;
1434 tx->qidx = q;
1435 TASK_INIT(&tx->task, 0, mvneta_tx_task, tx);
1436 tx->taskq = taskqueue_create_fast("mvneta_tx_taskq", M_WAITOK,
1437 taskqueue_thread_enqueue, &tx->taskq);
1438 taskqueue_start_threads(&tx->taskq, 1, PI_NET, "%s: tx_taskq(%d)",
1447 struct mvneta_tx_ring *tx;
1451 tx = MVNETA_TX_RING(sc, q);
1454 /* Tx handle */
1456 txbuf = &tx->txbuf[i];
1463 tx->dma = tx->cpu = 0;
1464 tx->used = 0;
1486 * Rx/Tx Queue Control
1528 struct mvneta_tx_ring *tx;
1533 tx = MVNETA_TX_RING(sc, q);
1534 DASSERT(tx->desc_pa != 0);
1537 MVNETA_WRITE(sc, MVNETA_PTXDQA(q), tx->desc_pa);
1544 DASSERT(MVNETA_READ(sc, MVNETA_PTXDQA(q)) == tx->desc_pa);
1584 struct mvneta_tx_ring *tx;
1587 tx = MVNETA_TX_RING(sc, q);
1590 /* Enable Tx queue */
1593 tx->queue_status = MVNETA_QUEUE_IDLE;
1594 tx->queue_hung = FALSE;
1752 struct mvneta_tx_ring *tx;
1807 tx = MVNETA_TX_RING(sc, q);
1809 if (tx->queue_hung && (fc_curr - fc_prev) == 0)
1833 struct mvneta_tx_ring *tx;
1840 tx = MVNETA_TX_RING(sc, q);
1842 while ((m = buf_ring_dequeue_sc(tx->br)) != NULL)
1854 struct mvneta_tx_ring *tx;
1858 tx = arg;
1859 ifp = tx->ifp;
1862 mvneta_tx_lockq(sc, tx->qidx);
1863 error = mvneta_xmit_locked(sc, tx->qidx);
1864 mvneta_tx_unlockq(sc, tx->qidx);
1869 taskqueue_enqueue(tx->taskq, &tx->task);
1876 struct mvneta_tx_ring *tx;
1881 tx = MVNETA_TX_RING(sc, q);
1887 if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED)) {
1894 if (__predict_true(tx->used > MVNETA_TX_RECLAIM_COUNT))
1898 if (__predict_false(tx->used >
1910 tx->watchdog_time = ticks;
1911 tx->queue_status = MVNETA_QUEUE_WORKING;
1921 struct mvneta_tx_ring *tx;
1933 tx = MVNETA_TX_RING(sc, q);
1936 if (buf_ring_full(tx->br)) {
1946 if (buf_ring_empty(tx->br) && mvneta_tx_trylockq(sc, q) != 0) {
1958 error = drbr_enqueue(ifp, tx->br, m);
1962 taskqueue_enqueue(tx->taskq, &tx->task);
1970 struct mvneta_tx_ring *tx;
1976 tx = MVNETA_TX_RING(sc, q);
1979 while ((m = drbr_peek(ifp, tx->br)) != NULL) {
1983 drbr_putback(ifp, tx->br, m);
1985 drbr_advance(ifp, tx->br);
1988 drbr_advance(ifp, tx->br);
1998 struct mvneta_tx_ring *tx;
2002 tx = MVNETA_TX_RING(sc, 0);
2009 taskqueue_enqueue(tx->taskq, &tx->task);
2664 * Tx Subroutines
2672 struct mvneta_tx_ring *tx;
2679 tx = MVNETA_TX_RING(sc, q);
2680 DASSERT(tx->used >= 0);
2681 DASSERT(tx->used <= MVNETA_TX_RING_CNT);
2688 tx->drv_error++;
2703 tx->drv_error++;
2712 txbuf = &tx->txbuf[tx->cpu];
2722 tx->drv_error++;
2731 || (txnsegs + tx->used) > MVNETA_TX_RING_CNT)) {
2747 /* load to tx descriptors */
2750 t = &tx->desc[tx->cpu];
2762 tx->cpu = tx_counter_adv(tx->cpu, 1);
2764 tx->used++;
2771 bus_dmamap_sync(sc->tx_dtag, tx->desc_map,
2847 struct mvneta_tx_ring *tx;
2855 tx = MVNETA_TX_RING(sc, q);
2856 if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED))
2863 if (tx->used == 0)
2864 tx->queue_status = MVNETA_QUEUE_IDLE;
2865 else if (tx->queue_status == MVNETA_QUEUE_WORKING &&
2866 ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG))
2867 tx->queue_hung = TRUE;
2876 bus_dmamap_sync(sc->tx_dtag, tx->desc_map,
2880 t = &tx->desc[tx->dma];
2883 CTR3(KTR_SPARE2, "%s tx error queue %d desc %d",
2884 if_name(sc->ifp), q, tx->dma);
2886 txbuf = &tx->txbuf[tx->dma];
2895 tx->dma = tx_counter_adv(tx->dma, 1);
2896 tx->used--;
2898 DASSERT(tx->used >= 0);
2899 DASSERT(tx->used <= MVNETA_TX_RING_CNT);
2911 if_name(sc->ifp), q, tx->cpu, tx->dma, tx->used);
2914 tx->watchdog_time = ticks;
2916 if (tx->used == 0)
2917 tx->queue_status = MVNETA_QUEUE_IDLE;
2921 * Do a final TX complete when TX is idle.
2926 struct mvneta_tx_ring *tx;
2930 * Handle trailing mbuf on TX queue.
2931 * Check is done lockess to avoid TX path contention.
2934 tx = MVNETA_TX_RING(sc, q);
2935 if ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG_TXCOMP &&
2936 tx->used > 0) {
3476 CTLFLAG_RD, &sc->counter_watchdog, 0, "TX Watchdog Counter");
3540 struct mvneta_tx_ring *tx;
3592 /* TX watchdog. */
3598 * TX driver errors:
3599 * We do not take queue locks to not disrupt TX path.
3601 * next mib update. We may also clear counter when TX path
3606 tx = MVNETA_TX_RING(sc, i);
3608 if (tx->drv_error > 0) {
3609 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, tx->drv_error);
3610 tx->drv_error = 0;