Lines Matching defs:txr

271 igc_txdesc_sync(struct tx_ring *txr, int id, int ops)
274 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
659 struct tx_ring *txr = &sc->tx_rings[iq];
664 txr->sc = sc;
665 txr->txr_igcq = &sc->queues[iq];
666 txr->me = iq;
667 if (igc_dma_malloc(sc, tsize, &txr->txdma)) {
672 txr->tx_base = (union igc_adv_tx_desc *)txr->txdma.dma_vaddr;
673 memset(txr->tx_base, 0, tsize);
717 q->txr = &sc->tx_rings[iq];
726 for (struct tx_ring *txr = sc->tx_rings; txconf > 0; txr++, txconf--)
727 igc_dma_free(sc, &txr->txdma);
777 struct tx_ring *txr = &sc->tx_rings[iq];
779 igc_dma_free(sc, &txr->txdma);
1639 struct tx_ring *txr = &sc->tx_rings[0]; /* queue 0 */
1640 mutex_enter(&txr->txr_lock);
1641 igc_tx_common_locked(ifp, txr, IGC_TX_START);
1642 mutex_exit(&txr->txr_lock);
1658 struct tx_ring *txr = &sc->tx_rings[qid];
1659 struct igc_queue *q = txr->txr_igcq;
1661 if (__predict_false(!pcq_put(txr->txr_interq, m))) {
1667 mutex_enter(&txr->txr_lock);
1668 igc_tx_common_locked(ifp, txr, IGC_TX_TRANSMIT);
1669 mutex_exit(&txr->txr_lock);
1675 igc_tx_common_locked(struct ifnet *ifp, struct tx_ring *txr, int caller)
1678 struct igc_queue *q = txr->txr_igcq;
1683 prod = txr->next_avail_desc;
1684 free = txr->next_to_clean;
1691 txr->me, prod, txr->next_to_clean, free);
1702 m = pcq_get(txr->txr_interq);
1708 struct igc_tx_buf *txbuf = &txr->tx_buffers[prod];
1712 igc_load_mbuf(q, txr->txdma.dma_tag, map, m))) {
1720 bus_dmamap_sync(txr->txdma.dma_tag, map, 0,
1724 if (igc_tx_ctx_setup(txr, m, prod, &ctx_cmd_type_len,
1732 union igc_adv_tx_desc *txdesc = &txr->tx_base[prod];
1742 igc_txdesc_sync(txr, prod,
1748 igc_txdesc_sync(txr, prod,
1771 txr->next_avail_desc = prod;
1772 IGC_WRITE_REG(&sc->hw, IGC_TDT(txr->me), prod);
1777 txr->me, prod, txr->next_to_clean, free);
1783 igc_txeof(struct tx_ring *txr, u_int limit)
1785 struct igc_softc *sc = txr->sc;
1790 prod = txr->next_avail_desc;
1791 cons = txr->next_to_clean;
1795 txr->me, cons, prod);
1800 struct igc_tx_buf *txbuf = &txr->tx_buffers[cons];
1806 union igc_adv_tx_desc *txdesc = &txr->tx_base[last];
1807 igc_txdesc_sync(txr, last, BUS_DMASYNC_POSTREAD);
1809 igc_txdesc_sync(txr, last, BUS_DMASYNC_PREREAD);
1819 txr->me, cons, last, prod, status);
1826 txr->me, cons, last, prod, status);
1831 bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize,
1833 bus_dmamap_unload(txr->txdma.dma_tag, map);
1842 txr->next_to_clean = cons;
1915 struct tx_ring *txr = &sc->tx_rings[iq];
1917 igc_withdraw_transmit_packets(txr, false);
2761 struct tx_ring *txr = iq->txr;
2771 mutex_enter(&txr->txr_lock);
2772 txmore = igc_txeof(txr, txlimit);
2773 mutex_exit(&txr->txr_lock);
2795 struct tx_ring *txr = iq->txr;
2833 mutex_enter(&txr->txr_lock);
2834 txmore = igc_txeof(txr, txlimit);
2835 mutex_exit(&txr->txr_lock);
2865 struct tx_ring *txr = iq->txr;
2873 mutex_enter(&txr->txr_lock);
2874 txmore = igc_txeof(txr, txlimit);
2876 if (txr->me == 0) {
2879 igc_tx_common_locked(ifp, txr, IGC_TX_START);
2881 mutex_exit(&txr->txr_lock);
2941 igc_allocate_transmit_buffers(struct tx_ring *txr)
2943 struct igc_softc *sc = txr->sc;
2946 txr->tx_buffers =
2948 txr->txtag = txr->txdma.dma_tag;
2952 struct igc_tx_buf *txbuf = &txr->tx_buffers[id];
2954 error = bus_dmamap_create(txr->txdma.dma_tag,
2982 struct tx_ring *txr = &sc->tx_rings[iq];
2984 if (igc_setup_transmit_ring(txr))
3000 igc_setup_transmit_ring(struct tx_ring *txr)
3002 struct igc_softc *sc = txr->sc;
3005 if (igc_allocate_transmit_buffers(txr))
3009 memset(txr->tx_base, 0,
3013 txr->next_avail_desc = 0;
3014 txr->next_to_clean = 0;
3016 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
3017 txr->txdma.dma_map->dm_mapsize,
3020 txr->txr_interq = pcq_create(sc->num_tx_desc, KM_SLEEP);
3022 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
3040 struct tx_ring *txr = &sc->tx_rings[iq];
3042 txr->txdma.dma_map->dm_segs[0].ds_addr;
3051 IGC_WRITE_REG(hw, IGC_TDT(iq), 0 /* XXX txr->next_avail_desc */);
3054 txr->watchdog_timer = 0;
3088 struct tx_ring *txr = &sc->tx_rings[iq];
3090 igc_free_transmit_buffers(txr);
3100 igc_free_transmit_buffers(struct tx_ring *txr)
3102 struct igc_softc *sc = txr->sc;
3104 if (txr->tx_buffers == NULL)
3107 igc_withdraw_transmit_packets(txr, true);
3109 kmem_free(txr->tx_buffers,
3111 txr->tx_buffers = NULL;
3112 txr->txtag = NULL;
3114 pcq_destroy(txr->txr_interq);
3115 mutex_destroy(&txr->txr_lock);
3124 igc_withdraw_transmit_packets(struct tx_ring *txr, bool destroy)
3126 struct igc_softc *sc = txr->sc;
3127 struct igc_queue *q = txr->txr_igcq;
3129 mutex_enter(&txr->txr_lock);
3132 union igc_adv_tx_desc *txdesc = &txr->tx_base[id];
3134 igc_txdesc_sync(txr, id,
3139 igc_txdesc_sync(txr, id,
3142 struct igc_tx_buf *txbuf = &txr->tx_buffers[id];
3146 bus_dmamap_sync(txr->txdma.dma_tag, map,
3148 bus_dmamap_unload(txr->txdma.dma_tag, map);
3153 bus_dmamap_destroy(txr->txdma.dma_tag, map);
3158 txr->next_avail_desc = 0;
3159 txr->next_to_clean = 0;
3163 while ((m = pcq_get(txr->txr_interq)) != NULL) {
3168 mutex_exit(&txr->txr_lock);
3179 igc_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp, int prod,
3193 return igc_tso_setup(txr, mp, prod, cmd_type_len,
3317 (struct igc_adv_tx_context_desc *)&txr->tx_base[prod];
3320 igc_txdesc_sync(txr, prod,
3326 igc_txdesc_sync(txr, prod,
3343 igc_tso_setup(struct tx_ring *txr, struct mbuf *mp, int prod,
3436 (struct igc_adv_tx_context_desc *)&txr->tx_base[prod];
3439 igc_txdesc_sync(txr, prod,
3445 igc_txdesc_sync(txr, prod,