Lines Matching defs:txq
690 * with the actual hardware txq, or all of this will fall apart.
697 ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq,
713 txq->axq_qnum);
716 ATH_TXQ_LOCK(txq);
717 if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) {
718 struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s);
732 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
733 ATH_TXQ_UNLOCK(txq);
740 ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq,
757 KASSERT(txq->axq_qnum != ATH_TXQ_SWQ,
777 ATH_TXQ_LOCK(txq);
802 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
804 "ath_tx_handoff: non-tdma: txq=%u, add bf=%p "
806 txq->axq_qnum,
808 txq->axq_depth);
816 if (txq->axq_link != NULL) {
817 *txq->axq_link = bf->bf_daddr;
820 txq->axq_qnum, txq->axq_link,
822 txq->axq_depth);
826 txq->axq_qnum, txq->axq_link,
842 if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) {
843 bf_first = TAILQ_FIRST(&txq->axq_q);
844 txq->axq_flags |= ATH_TXQ_PUTRUNNING;
845 ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr);
848 __func__, txq->axq_qnum,
850 txq->axq_depth);
854 txq->axq_qnum,
857 txq->axq_depth);
864 if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) {
868 txq->axq_qnum);
875 txq->axq_aggr_depth++;
880 ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link);
897 ath_hal_txstart(ah, txq->axq_qnum);
898 ATH_TXQ_UNLOCK(txq);
900 "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum);
909 ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq)
913 ATH_TXQ_LOCK_ASSERT(txq);
916 bf = TAILQ_FIRST(&txq->axq_q);
917 bf_last = ATH_TXQ_LAST(txq, axq_q_s);
925 txq->axq_qnum,
932 ath_tx_dump(sc, txq);
939 KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)),
942 txq->axq_qnum));
944 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
945 txq->axq_flags |= ATH_TXQ_PUTRUNNING;
948 &txq->axq_link);
949 ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
955 * The relevant hardware txq should be locked.
958 ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq,
968 if (txq->axq_qnum == ATH_TXQ_SWQ)
969 ath_tx_handoff_mcast(sc, txq, bf);
971 ath_tx_handoff_hw(sc, txq, bf);
1502 ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq,
1536 ath_tx_handoff(sc, txq, bf);
1553 struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq)
1737 if (txq != sc->sc_ac2q[pri]) {
1739 "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n",
1741 txq,
1742 txq->axq_qnum,
1825 txq->axq_intrcnt = 0;
1826 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
1828 txq->axq_intrcnt = 0;
1909 struct ath_txq *txq;
1922 * For multicast frames, the txq gets overridden appropriately
1947 txq = sc->sc_ac2q[pri];
2003 bf->bf_state.bfs_tx_queue = txq->axq_qnum;
2016 txq = &avp->av_mcastq;
2071 r = ath_tx_normal_setup(sc, ni, bf, m0, txq);
2105 if (txq == &avp->av_mcastq) {
2109 ath_tx_xmit_normal(sc, txq, bf);
2112 ath_tx_swq(sc, ni, txq, queue_to_head, bf);
2115 ath_tx_xmit_normal(sc, txq, bf);
2129 ath_tx_xmit_normal(sc, txq, bf);
2888 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2911 TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem);
2913 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2929 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2941 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2949 TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem);
3027 struct ath_txq *txq, struct ath_buf *bf)
3105 ath_tx_handoff(sc, txq, bf);
3116 struct ath_txq *txq, int queue_to_head, struct ath_buf *bf)
3138 bf->bf_state.bfs_tx_queue = txq->axq_qnum;
3200 if (txq->axq_depth + txq->fifo.axq_depth == 0) {
3213 ath_tx_xmit_aggr(sc, an, txq, bf);
3238 } else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) &&
3239 (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) {
3255 ath_tx_xmit_normal(sc, txq, bf);
3830 struct ath_txq *txq;
3833 txq = sc->sc_ac2q[tid->ac];
3849 "%s: %s: %6D: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n",
3855 txq->axq_qnum,
3856 txq->axq_depth,
3857 txq->axq_aggr_depth);
4122 ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq)
4132 * Iterate over all active tids for the given txq,
4135 while (! TAILQ_EMPTY(&txq->axq_tidq)) {
4136 tid = TAILQ_FIRST(&txq->axq_tidq);
4706 * Keep the txq lock held for now, as we need to ensure
5409 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5601 /* Punt to txq */
5602 ath_tx_handoff(sc, txq, bf);
5611 * Checking for an empty txq is done above.
5613 * XXX locking on txq here?
5616 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr ||
5640 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5711 /* Punt to hardware or software txq */
5712 ath_tx_handoff(sc, txq, bf);
5728 ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq)
5737 * There's no FIFO, so txq->axq_depth is what's been scheduled
5747 * The FIFO depth is what's in the hardware; the txq->axq_depth
5757 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr) {
5768 if (txq->axq_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_nonaggr) {
5773 last = TAILQ_LAST(&txq->axq_tidq, axq_t_s);
5775 TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) {
5805 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5808 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
6283 * the sched/unsched operations involve walking the per-txq
6290 struct ath_txq *txq;
6308 txq = sc->sc_ac2q[atid->ac];
6327 struct ath_txq *txq;
6352 txq = sc->sc_ac2q[atid->ac];