Lines Matching defs:bf

128     struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0);
133 struct ath_tid *tid, struct ath_buf *bf);
139 struct ath_buf *bf;
144 bf = bf_first;
146 while (bf != NULL) {
148 if (bf->bf_nseg == 0)
150 n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1;
151 for (i = 0, ds = (const char *) bf->bf_desc;
159 bf = bf->bf_next;
196 ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
200 wh = mtod(bf->bf_m, struct ieee80211_frame *);
202 if (bf->bf_state.bfs_isretried == 0) {
204 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
207 bf->bf_state.bfs_isretried = 1;
208 bf->bf_state.bfs_retries ++;
257 struct ath_buf *bf, *next;
261 TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) {
262 /* NB: bf assumed clean */
263 TAILQ_REMOVE(frags, bf, bf_list);
264 ath_returnbuf_head(sc, bf);
279 struct ath_buf *bf;
284 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL);
285 if (bf == NULL) { /* out of buffers, cleanup */
292 TAILQ_INSERT_TAIL(frags, bf, bf_list);
300 ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0)
309 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
310 bf->bf_segs, &bf->bf_nseg,
314 bf->bf_nseg = ATH_MAX_SCATTER + 1;
325 if (bf->bf_nseg > ATH_MAX_SCATTER) { /* too many desc's, linearize */
334 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
335 bf->bf_segs, &bf->bf_nseg,
342 KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER,
343 ("too many segments after defrag; nseg %u", bf->bf_nseg));
344 } else if (bf->bf_nseg == 0) { /* null packet, discard */
351 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
352 bf->bf_m = m0;
364 struct ath_buf *bf, bool is_aggr, int is_first_subframe,
396 ds = (char *) bf->bf_desc;
400 for (i = 0; i < bf->bf_nseg; i++) {
401 bufAddrList[bp] = bf->bf_segs[i].ds_addr;
402 segLenList[bp] = bf->bf_segs[i].ds_len;
409 if ((i != bf->bf_nseg - 1) && (bp < numTxMaps))
417 if (i == bf->bf_nseg - 1)
421 bf->bf_daddr + dd->dd_descsize * (dsp + 1));
432 , bf->bf_descid /* XXX desc id */
433 , bf->bf_state.bfs_tx_queue
435 , i == bf->bf_nseg - 1 /* last segment */
469 bf->bf_state.bfs_ndelim);
472 bf->bf_lastds = (struct ath_desc *) ds;
486 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
506 struct ath_buf *bf)
508 struct ath_rc_series *rc = bf->bf_state.bfs_rc;
511 if (! bf->bf_state.bfs_ismrr)
518 else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) {
533 ath_buf_set_rate(sc, ni, bf);
535 ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc
547 * bf->bf_next.
552 struct ath_buf *bf, *bf_prev = NULL;
559 bf = bf_first;
561 if (bf->bf_state.bfs_txrate0 == 0)
562 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, txrate0=%d\n",
563 __func__, bf, 0);
564 if (bf->bf_state.bfs_rc[0].ratecode == 0)
565 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, rix0=%d\n",
566 __func__, bf, 0);
572 while (bf != NULL) {
574 "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n",
575 __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen,
576 SEQNO(bf->bf_state.bfs_seqno));
582 ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc
583 , bf->bf_state.bfs_pktlen /* packet length */
584 , bf->bf_state.bfs_hdrlen /* header length */
585 , bf->bf_state.bfs_atype /* Atheros packet type */
586 , bf->bf_state.bfs_txpower /* txpower */
587 , bf->bf_state.bfs_txrate0
588 , bf->bf_state.bfs_try0 /* series 0 rate/tries */
589 , bf->bf_state.bfs_keyix /* key cache index */
590 , bf->bf_state.bfs_txantenna /* antenna mode */
591 , bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ /* flags */
592 , bf->bf_state.bfs_ctsrate /* rts/cts rate */
593 , bf->bf_state.bfs_ctsduration /* rts/cts duration */
600 if (bf == bf_first) {
604 ath_tx_set_ratectrl(sc, bf->bf_node, bf);
611 ath_tx_chaindesclist(sc, ds0, bf,
613 !! (bf == bf_first), /* is_first_subframe */
614 !! (bf->bf_next == NULL) /* is_last_subframe */
617 if (bf == bf_first) {
624 bf->bf_state.bfs_al,
625 bf->bf_state.bfs_ndelim);
634 bf->bf_daddr);
637 bf_prev = bf;
638 bf = bf->bf_next;
698 struct ath_buf *bf)
702 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
703 ("%s: busy status 0x%x", __func__, bf->bf_flags));
709 if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) {
711 "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",
712 __func__, bf, bf->bf_state.bfs_tx_queue,
730 bf->bf_daddr);
732 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
741 struct ath_buf *bf)
755 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
756 ("%s: busy status 0x%x", __func__, bf->bf_flags));
802 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
804 "ath_tx_handoff: non-tdma: txq=%u, add bf=%p "
807 bf,
817 *txq->axq_link = bf->bf_daddr;
821 (caddr_t)bf->bf_daddr, bf->bf_desc,
827 (caddr_t)bf->bf_daddr, bf->bf_desc,
828 bf->bf_lastds);
861 * Ensure that the bf TXQ matches this TXQ, so later
864 if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) {
866 "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",
867 __func__, bf, bf->bf_state.bfs_tx_queue,
874 if (bf->bf_state.bfs_aggr)
880 ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link);
911 struct ath_buf *bf, *bf_last;
916 bf = TAILQ_FIRST(&txq->axq_q);
919 if (bf == NULL)
923 "%s: Q%d: bf=%p, bf_last=%p, daddr=0x%08x\n",
926 bf,
928 (uint32_t) bf->bf_daddr);
944 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
959 struct ath_buf *bf)
965 ath_tx_alq_post(sc, bf);
969 ath_tx_handoff_mcast(sc, txq, bf);
971 ath_tx_handoff_hw(sc, txq, bf);
1044 ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf)
1053 flags = bf->bf_state.bfs_txflags;
1054 rix = bf->bf_state.bfs_rc[0].rix;
1055 shortPreamble = bf->bf_state.bfs_shpream;
1056 wh = mtod(bf->bf_m, struct ieee80211_frame *);
1059 if (bf->bf_flags & ATH_BUF_TOA_PROBE) {
1062 bf->bf_state.bfs_doprot = 0;
1074 bf->bf_state.bfs_doprot = 1;
1107 bf->bf_state.bfs_txflags = flags;
1117 ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf)
1125 int isfrag = bf->bf_m->m_flags & M_FRAG;
1127 flags = bf->bf_state.bfs_txflags;
1128 rix = bf->bf_state.bfs_rc[0].rix;
1129 shortPreamble = bf->bf_state.bfs_shpream;
1130 wh = mtod(bf->bf_m, struct ieee80211_frame *);
1155 bf->bf_nextfraglen,
1165 bf->bf_state.bfs_ismrr = 0;
1166 bf->bf_state.bfs_try0 = ATH_TXMGTTRY;
1256 ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf)
1260 uint8_t rix = bf->bf_state.bfs_rc[0].rix;
1267 if ((bf->bf_state.bfs_txflags &
1270 bf->bf_state.bfs_ctsrate = 0;
1271 bf->bf_state.bfs_ctsduration = 0;
1279 if (bf->bf_state.bfs_doprot)
1282 rix = bf->bf_state.bfs_rc[0].rix;
1288 if (bf->bf_state.bfs_ctsrate0 != 0)
1289 cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0);
1296 bf->bf_state.bfs_shpream);
1301 bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen,
1302 rt, bf->bf_state.bfs_txflags);
1305 bf->bf_state.bfs_ctsrate = ctsrate;
1306 bf->bf_state.bfs_ctsduration = ctsduration;
1312 bf->bf_state.bfs_ismrr = 0;
1313 bf->bf_state.bfs_try0 =
1314 bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */
1328 ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf)
1330 struct ath_desc *ds = bf->bf_desc;
1333 if (bf->bf_state.bfs_txrate0 == 0)
1335 "%s: bf=%p, txrate0=%d\n", __func__, bf, 0);
1338 , bf->bf_state.bfs_pktlen /* packet length */
1339 , bf->bf_state.bfs_hdrlen /* header length */
1340 , bf->bf_state.bfs_atype /* Atheros packet type */
1341 , bf->bf_state.bfs_txpower /* txpower */
1342 , bf->bf_state.bfs_txrate0
1343 , bf->bf_state.bfs_try0 /* series 0 rate/tries */
1344 , bf->bf_state.bfs_keyix /* key cache index */
1345 , bf->bf_state.bfs_txantenna /* antenna mode */
1346 , bf->bf_state.bfs_txflags /* flags */
1347 , bf->bf_state.bfs_ctsrate /* rts/cts rate */
1348 , bf->bf_state.bfs_ctsduration /* rts/cts duration */
1354 bf->bf_lastds = ds;
1355 bf->bf_last = bf;
1358 ath_tx_set_ratectrl(sc, bf->bf_node, bf);
1359 ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0);
1376 ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf, int tid,
1384 if (! bf->bf_state.bfs_doratelookup)
1388 bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1390 ATH_NODE_LOCK(ATH_NODE(bf->bf_node));
1391 ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream,
1395 bf->bf_state.bfs_rc[0].rix = rix;
1396 bf->bf_state.bfs_rc[0].ratecode = rate;
1397 bf->bf_state.bfs_rc[0].tries = try0;
1399 if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY)
1400 ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix,
1401 is_aggr, bf->bf_state.bfs_rc);
1402 ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node));
1406 bf->bf_state.bfs_try0 = try0;
1407 bf->bf_state.bfs_txrate0 = rate;
1408 bf->bf_state.bfs_rc_maxpktlen = maxpktlen;
1416 struct ath_buf *bf)
1418 struct ath_node *an = ATH_NODE(bf->bf_node);
1423 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
1503 struct ath_buf *bf)
1505 struct ath_node *an = ATH_NODE(bf->bf_node);
1506 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
1519 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
1522 ath_tx_do_ratelookup(sc, bf, tid->tid, bf->bf_state.bfs_pktlen, false);
1523 ath_tx_calc_duration(sc, bf);
1524 ath_tx_calc_protection(sc, bf);
1525 ath_tx_set_rtscts(sc, bf);
1526 ath_tx_rate_fill_rcflags(sc, bf);
1527 ath_tx_setds(sc, bf);
1533 bf->bf_comp = ath_tx_normal_comp;
1536 ath_tx_handoff(sc, txq, bf);
1553 struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq)
1607 error = ath_tx_dmasetup(sc, bf, m0);
1611 bf->bf_node = ni; /* NB: held reference */
1612 m0 = bf->bf_m; /* NB: may have changed */
1616 ds = bf->bf_desc;
1697 bf->bf_state.bfs_doratelookup = 1;
1797 bf->bf_flags |= ATH_BUF_TOA_PROBE;
1858 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1864 bf->bf_state.bfs_rc[0].rix = rix;
1865 bf->bf_state.bfs_rc[0].tries = try0;
1866 bf->bf_state.bfs_rc[0].ratecode = txrate;
1869 bf->bf_state.bfs_pktlen = pktlen;
1870 bf->bf_state.bfs_hdrlen = hdrlen;
1871 bf->bf_state.bfs_atype = atype;
1872 bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni);
1873 bf->bf_state.bfs_txrate0 = txrate;
1874 bf->bf_state.bfs_try0 = try0;
1875 bf->bf_state.bfs_keyix = keyix;
1876 bf->bf_state.bfs_txantenna = sc->sc_txantenna;
1877 bf->bf_state.bfs_txflags = flags;
1878 bf->bf_state.bfs_shpream = shortPreamble;
1881 bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */
1882 bf->bf_state.bfs_ctsrate = 0; /* calculated later */
1883 bf->bf_state.bfs_ctsduration = 0;
1884 bf->bf_state.bfs_ismrr = ismrr;
1902 struct ath_buf *bf, struct mbuf *m0)
2002 bf->bf_state.bfs_tid = tid;
2003 bf->bf_state.bfs_tx_queue = txq->axq_qnum;
2004 bf->bf_state.bfs_pri = pri;
2022 bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum;
2028 bf->bf_state.bfs_dobaw = 0;
2045 seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0);
2054 bf->bf_state.bfs_dobaw = 1;
2062 bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT;
2071 r = ath_tx_normal_setup(sc, ni, bf, m0, txq);
2077 m0 = bf->bf_m;
2107 "%s: bf=%p: mcastq: TX'ing\n", __func__, bf);
2108 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2109 ath_tx_xmit_normal(sc, txq, bf);
2112 ath_tx_swq(sc, ni, txq, queue_to_head, bf);
2114 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2115 ath_tx_xmit_normal(sc, txq, bf);
2122 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2128 ath_tx_leak_count_update(sc, tid, bf);
2129 ath_tx_xmit_normal(sc, txq, bf);
2137 struct ath_buf *bf, struct mbuf *m0,
2173 "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf);
2216 bf->bf_state.bfs_dobaw = 0;
2218 error = ath_tx_dmasetup(sc, bf, m0);
2221 m0 = bf->bf_m; /* NB: may have changed */
2224 bf->bf_node = ni; /* NB: held reference */
2233 bf->bf_state.bfs_doprot = 1;
2265 bf->bf_flags |= ATH_BUF_TOA_PROBE;
2282 bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate;
2311 ds = bf->bf_desc;
2315 bf->bf_state.bfs_pktlen = pktlen;
2316 bf->bf_state.bfs_hdrlen = hdrlen;
2317 bf->bf_state.bfs_atype = atype;
2318 bf->bf_state.bfs_txpower = MIN(params->ibp_power,
2320 bf->bf_state.bfs_txrate0 = txrate;
2321 bf->bf_state.bfs_try0 = try0;
2322 bf->bf_state.bfs_keyix = keyix;
2323 bf->bf_state.bfs_txantenna = txantenna;
2324 bf->bf_state.bfs_txflags = flags;
2325 bf->bf_state.bfs_shpream =
2329 bf->bf_state.bfs_tid = WME_AC_TO_TID(pri);
2330 bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum;
2331 bf->bf_state.bfs_pri = pri;
2334 bf->bf_state.bfs_ctsrate = 0;
2335 bf->bf_state.bfs_ctsduration = 0;
2336 bf->bf_state.bfs_ismrr = ismrr;
2339 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
2341 bf->bf_state.bfs_rc[0].rix = rix;
2342 bf->bf_state.bfs_rc[0].tries = try0;
2343 bf->bf_state.bfs_rc[0].ratecode = txrate;
2349 bf->bf_state.bfs_rc[1].rix = rix;
2350 bf->bf_state.bfs_rc[1].tries = params->ibp_try1;
2353 bf->bf_state.bfs_rc[2].rix = rix;
2354 bf->bf_state.bfs_rc[2].tries = params->ibp_try2;
2357 bf->bf_state.bfs_rc[3].rix = rix;
2358 bf->bf_state.bfs_rc[3].tries = params->ibp_try3;
2364 ath_tx_rate_fill_rcflags(sc, bf);
2381 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2388 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2392 ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf);
2394 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2395 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2399 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2405 ath_tx_leak_count_update(sc, tid, bf);
2406 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2422 struct ath_buf *bf;
2473 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT);
2474 if (bf == NULL) {
2480 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n",
2481 m, params, bf);
2488 if (ath_tx_start(sc, ni, bf, m)) {
2497 if (ath_tx_raw_start(sc, ni, bf, m, params)) {
2527 "bf=%p",
2530 bf);
2532 ath_returnbuf_head(sc, bf);
2636 struct ath_tid *tid, struct ath_buf *bf)
2643 if (bf->bf_state.bfs_isretried)
2648 if (! bf->bf_state.bfs_dobaw) {
2651 __func__, SEQNO(bf->bf_state.bfs_seqno),
2655 if (bf->bf_state.bfs_addedbaw)
2659 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2668 SEQNO(bf->bf_state.bfs_seqno))) {
2670 "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; "
2672 __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2681 index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno));
2686 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2699 "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n",
2703 bf,
2704 SEQNO(bf->bf_state.bfs_seqno)
2707 tid->tx_buf[cindex] = bf;
2759 "%s: old bf=%p, new bf=%p\n", __func__, old_bf, new_bf);
2774 struct ath_tid *tid, const struct ath_buf *bf)
2778 int seqno = SEQNO(bf->bf_state.bfs_seqno);
2802 if (tid->tx_buf[cindex] != bf) {
2804 "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n",
2805 __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
2825 struct ath_buf *bf)
2832 wh = mtod(bf->bf_m, struct ieee80211_frame *);
2856 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
2963 struct ath_buf *bf, struct mbuf *m0)
3027 struct ath_txq *txq, struct ath_buf *bf)
3029 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
3038 ATH_TID_INSERT_HEAD(tid, bf, bf_list);
3044 if (bf->bf_state.bfs_dobaw &&
3046 SEQNO(bf->bf_state.bfs_seqno)))) {
3047 ATH_TID_INSERT_HEAD(tid, bf, bf_list);
3062 if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) {
3065 bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes);
3066 bf->bf_state.bfs_aggr = 0;
3067 bf->bf_state.bfs_nframes = 1;
3071 ath_tx_update_clrdmask(sc, tid, bf);
3074 ath_tx_do_ratelookup(sc, bf, tid->tid, bf->bf_state.bfs_pktlen,
3076 ath_tx_calc_duration(sc, bf);
3077 ath_tx_calc_protection(sc, bf);
3078 ath_tx_set_rtscts(sc, bf);
3079 ath_tx_rate_fill_rcflags(sc, bf);
3080 ath_tx_setds(sc, bf);
3089 if (bf->bf_state.bfs_dobaw) {
3090 ath_tx_addto_baw(sc, an, tid, bf);
3091 bf->bf_state.bfs_addedbaw = 1;
3095 bf->bf_comp = ath_tx_aggr_comp;
3102 ath_tx_leak_count_update(sc, tid, bf);
3105 ath_tx_handoff(sc, txq, bf);
3116 struct ath_txq *txq, int queue_to_head, struct ath_buf *bf)
3122 struct mbuf *m0 = bf->bf_m;
3132 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n",
3133 __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh));
3137 bf->bf_state.bfs_tid = tid;
3138 bf->bf_state.bfs_tx_queue = txq->axq_qnum;
3139 bf->bf_state.bfs_pri = pri;
3158 ATH_TID_INSERT_HEAD(atid, bf, bf_list);
3160 ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3164 ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3186 ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3201 bf = ATH_TID_FIRST(atid);
3202 ATH_TID_REMOVE(atid, bf, bf_list);
3209 bf->bf_state.bfs_aggr = 0;
3210 bf->bf_state.bfs_nframes = 1;
3213 ath_tx_xmit_aggr(sc, an, txq, bf);
3243 ath_tx_update_clrdmask(sc, atid, bf);
3250 ath_tx_leak_count_update(sc, atid, bf);
3255 ath_tx_xmit_normal(sc, txq, bf);
3259 ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3409 struct ath_buf *bf)
3418 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf);
3421 ath_tx_set_retry(sc, bf);
3424 ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list);
3434 struct ath_buf *bf)
3447 ath_tx_tid_filt_addbuf(sc, tid, bf);
3460 struct ath_buf *bf;
3479 while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) {
3480 ATH_TID_FILT_REMOVE(tid, bf, bf_list);
3481 ATH_TID_INSERT_HEAD(tid, bf, bf_list);
3497 * since the buffer may be cloned, bf must be not touched after this
3502 struct ath_buf *bf)
3512 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {
3515 "%s: bf=%p, seqno=%d, exceeded retries\n",
3517 bf,
3518 SEQNO(bf->bf_state.bfs_seqno));
3527 if (bf->bf_flags & ATH_BUF_BUSY) {
3528 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3531 __func__, bf, nbf);
3533 nbf = bf;
3539 __func__, bf);
3555 struct ath_buf *bf, *bf_next, *nbf;
3559 bf = bf_first;
3560 while (bf) {
3561 bf_next = bf->bf_next;
3562 bf->bf_next = NULL; /* Remove it from the aggr list */
3567 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {
3570 "%s: tid=%d, bf=%p, seqno=%d, exceeded retries\n",
3573 bf,
3574 SEQNO(bf->bf_state.bfs_seqno));
3575 TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
3579 if (bf->bf_flags & ATH_BUF_BUSY) {
3580 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3583 __func__, tid->tid, bf, nbf, SEQNO(bf->bf_state.bfs_seqno));
3585 nbf = bf;
3595 __func__, tid->tid, bf, SEQNO(bf->bf_state.bfs_seqno));
3596 TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
3601 bf = bf_next;
3787 struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf)
3797 bf->bf_state.bfs_dobaw) {
3803 if (bf->bf_state.bfs_retries > 0) {
3804 ath_tx_update_baw(sc, an, tid, bf);
3805 bf->bf_state.bfs_dobaw = 0;
3811 if (! bf->bf_state.bfs_addedbaw)
3814 __func__, SEQNO(bf->bf_state.bfs_seqno));
3819 bf->bf_next = NULL;
3822 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
3827 const char *pfx, struct ath_tid *tid, struct ath_buf *bf)
3837 "%s: %s: %6D: bf=%p: addbaw=%d, dobaw=%d, "
3843 bf,
3844 bf->bf_state.bfs_addedbaw,
3845 bf->bf_state.bfs_dobaw,
3846 SEQNO(bf->bf_state.bfs_seqno),
3847 bf->bf_state.bfs_retries);
3849 "%s: %s: %6D: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n",
3854 bf,
3859 "%s: %s: %6D: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, "
3865 bf,
3888 mtod(bf->bf_m, const uint8_t *),
3889 bf->bf_m->m_len, 0, -1);
3911 struct ath_buf *bf;
3923 bf = ATH_TID_FIRST(tid);
3924 if (bf == NULL) {
3929 ath_tx_tid_drain_print(sc, an, "norm", tid, bf);
3933 ATH_TID_REMOVE(tid, bf, bf_list);
3934 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
3940 bf = ATH_TID_FILT_FIRST(tid);
3941 if (bf == NULL)
3945 ath_tx_tid_drain_print(sc, an, "filt", tid, bf);
3949 ATH_TID_FILT_REMOVE(tid, bf, bf_list);
3950 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
4071 struct ath_buf *bf;
4112 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4113 TAILQ_REMOVE(&bf_cq, bf, bf_list);
4114 ath_tx_default_comp(sc, bf, 0);
4126 struct ath_buf *bf;
4143 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4144 TAILQ_REMOVE(&bf_cq, bf, bf_list);
4145 ath_tx_default_comp(sc, bf, 0);
4166 ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
4168 struct ieee80211_node *ni = bf->bf_node;
4170 int tid = bf->bf_state.bfs_tid;
4172 struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
4177 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n",
4178 __func__, bf, fail, atid->hwq_depth - 1);
4194 ath_tx_tid_filt_comp_buf(sc, atid, bf);
4235 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
4236 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
4238 bf->bf_state.bfs_pktlen,
4239 bf->bf_state.bfs_pktlen,
4242 ath_tx_default_comp(sc, bf, fail);
4253 ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf)
4255 struct ieee80211_node *ni = bf->bf_node;
4257 int tid = bf->bf_state.bfs_tid;
4267 if (bf->bf_state.bfs_dobaw) {
4268 ath_tx_update_baw(sc, an, atid, bf);
4269 if (!bf->bf_state.bfs_addedbaw)
4272 __func__, SEQNO(bf->bf_state.bfs_seqno));
4284 ath_tx_default_comp(sc, bf, 0);
4297 struct ath_buf *bf, *bf_next;
4309 bf = bf_head;
4310 while (bf != NULL) {
4311 bf_next = bf->bf_next; /* next aggregate frame, or NULL */
4321 if (bf->bf_state.bfs_addedbaw) {
4322 ath_tx_update_baw(sc, an, atid, bf);
4323 bf->bf_state.bfs_dobaw = 0;
4329 bf->bf_comp = ath_tx_normal_comp;
4330 bf->bf_next = NULL;
4335 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
4340 bf = bf_next;
4362 struct ath_buf *bf, *bf_next;
4376 while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) {
4377 ATH_TID_FILT_REMOVE(atid, bf, bf_list);
4378 ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4387 bf = ATH_TID_FIRST(atid);
4388 while (bf) {
4393 bf_next = TAILQ_NEXT(bf, bf_list);
4398 ath_tx_tid_cleanup_frame(sc, an, tid, bf, bf_cq);
4403 bf = bf_next;
4430 struct ath_tid *tid, struct ath_buf *bf)
4438 * works out, 'bf' will have no DMA mapping, no mbuf
4441 nbf = ath_buf_clone(sc, bf);
4475 if (bf->bf_state.bfs_dobaw)
4476 ath_tx_switch_baw_buf(sc, an, tid, bf, nbf);
4479 ath_freebuf(sc, bf);
4495 ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf)
4497 struct ieee80211_node *ni = bf->bf_node;
4499 int tid = bf->bf_state.bfs_tid;
4515 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
4516 (bf->bf_flags & ATH_BUF_BUSY)) {
4518 nbf = ath_tx_retry_clone(sc, an, atid, bf);
4520 /* bf has been freed at this point */
4521 bf = nbf;
4523 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
4526 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
4529 __func__, SEQNO(bf->bf_state.bfs_seqno));
4533 if (bf->bf_state.bfs_dobaw) {
4534 ath_tx_update_baw(sc, an, atid, bf);
4535 if (! bf->bf_state.bfs_addedbaw)
4538 __func__, SEQNO(bf->bf_state.bfs_seqno));
4540 bf->bf_state.bfs_dobaw = 0;
4551 /* Free buffer, bf is free after this call */
4552 ath_tx_default_comp(sc, bf, 0);
4561 ath_tx_set_retry(sc, bf);
4568 ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4585 ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf,
4588 struct ieee80211_node *ni = bf->bf_node;
4590 int tid = bf->bf_state.bfs_tid;
4596 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
4597 ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0);
4599 /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */
4609 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
4610 (bf->bf_flags & ATH_BUF_BUSY)) {
4612 nbf = ath_tx_retry_clone(sc, an, atid, bf);
4614 /* bf has been freed at this point */
4615 bf = nbf;
4617 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
4620 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
4624 __func__, SEQNO(bf->bf_state.bfs_seqno));
4625 ath_tx_update_baw(sc, an, atid, bf);
4626 if (!bf->bf_state.bfs_addedbaw)
4629 __func__, SEQNO(bf->bf_state.bfs_seqno));
4630 bf->bf_state.bfs_dobaw = 0;
4634 ath_tx_set_retry(sc, bf);
4636 bf->bf_next = NULL; /* Just to make sure */
4639 bf->bf_state.bfs_aggr = 0;
4640 bf->bf_state.bfs_ndelim = 0; /* ??? needed? */
4641 bf->bf_state.bfs_nframes = 1;
4643 TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
4656 struct ath_buf *bf_next, *bf;
4679 bf = bf_first;
4680 while (bf) {
4681 bf_next = bf->bf_next;
4682 bf->bf_next = NULL; /* Remove it from the aggr list */
4684 if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
4686 bf->bf_next = NULL;
4687 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
4689 bf = bf_next;
4693 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
4694 TAILQ_REMOVE(&bf_q, bf, bf_list);
4695 ATH_TID_INSERT_HEAD(tid, bf, bf_list);
4724 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4725 TAILQ_REMOVE(&bf_cq, bf, bf_list);
4726 ath_tx_default_comp(sc, bf, 0);
4739 struct ath_buf *bf, *bf_next;
4751 bf = bf_first;
4752 while (bf) {
4754 if (bf->bf_state.bfs_dobaw) {
4755 ath_tx_update_baw(sc, an, atid, bf);
4756 if (!bf->bf_state.bfs_addedbaw)
4759 __func__, SEQNO(bf->bf_state.bfs_seqno));
4761 bf = bf->bf_next;
4784 bf = bf_first;
4785 while (bf) {
4786 bf_next = bf->bf_next;
4787 bf->bf_next = NULL;
4788 ath_tx_default_comp(sc, bf, 1);
4789 bf = bf_next;
4803 //struct ath_desc *ds = bf->bf_lastds;
4815 struct ath_buf *bf, *bf_next;
4885 TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) {
4886 if (bf->bf_state.bfs_addedbaw)
4888 if (bf->bf_state.bfs_dobaw) {
4889 ath_tx_update_baw(sc, an, atid, bf);
4890 if (!bf->bf_state.bfs_addedbaw)
4894 SEQNO(bf->bf_state.bfs_seqno));
4896 bf->bf_state.bfs_dobaw = 0;
5000 bf = bf_first;
5020 while (bf) {
5023 SEQNO(bf->bf_state.bfs_seqno));
5024 bf_next = bf->bf_next;
5025 bf->bf_next = NULL; /* Remove it from the aggr list */
5028 "%s: checking bf=%p seqno=%d; ack=%d\n",
5029 __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
5034 ath_tx_update_baw(sc, an, atid, bf);
5035 bf->bf_state.bfs_dobaw = 0;
5036 if (!bf->bf_state.bfs_addedbaw)
5039 __func__, SEQNO(bf->bf_state.bfs_seqno));
5040 bf->bf_next = NULL;
5041 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
5044 if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
5046 bf->bf_next = NULL;
5047 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
5051 bf = bf_next;
5067 "%s: num frames seen=%d; bf nframes=%d\n",
5095 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
5096 TAILQ_REMOVE(&bf_q, bf, bf_list);
5097 ATH_TID_INSERT_HEAD(atid, bf, bf_list);
5129 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
5130 TAILQ_REMOVE(&bf_cq, bf, bf_list);
5131 ath_tx_default_comp(sc, bf, 0);
5143 ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail)
5145 struct ieee80211_node *ni = bf->bf_node;
5147 int tid = bf->bf_state.bfs_tid;
5154 * bf pointer.
5156 ts = bf->bf_status.ds_txstat;
5164 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
5165 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
5166 &bf->bf_status.ds_txstat,
5167 bf->bf_state.bfs_pktlen,
5168 bf->bf_state.bfs_pktlen,
5182 "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n",
5183 __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth,
5184 SEQNO(bf->bf_state.bfs_seqno));
5213 ath_tx_comp_cleanup_unaggr(sc, bf);
5237 freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf);
5239 * If freeframe=0 then bf is no longer ours; don't
5244 if (bf->bf_state.bfs_addedbaw)
5246 if (bf->bf_state.bfs_dobaw) {
5247 ath_tx_update_baw(sc, an, atid, bf);
5248 if (!bf->bf_state.bfs_addedbaw)
5251 __func__, SEQNO(bf->bf_state.bfs_seqno));
5253 bf->bf_state.bfs_dobaw = 0;
5272 * cloned and bf is still valid. Just complete/free it.
5275 ath_tx_default_comp(sc, bf, fail);
5290 ath_tx_aggr_retry_unaggr(sc, bf);
5296 __func__, tid, SEQNO(bf->bf_state.bfs_seqno));
5297 if (bf->bf_state.bfs_dobaw) {
5298 ath_tx_update_baw(sc, an, atid, bf);
5299 bf->bf_state.bfs_dobaw = 0;
5300 if (!bf->bf_state.bfs_addedbaw)
5303 __func__, SEQNO(bf->bf_state.bfs_seqno));
5327 ath_tx_default_comp(sc, bf, fail);
5328 /* bf is freed at this point */
5332 ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
5334 if (bf->bf_state.bfs_aggr)
5335 ath_tx_aggr_comp_aggr(sc, bf, fail);
5337 ath_tx_aggr_comp_unaggr(sc, bf, fail);
5352 struct ath_buf *bf;
5365 TAILQ_FOREACH(bf, &tid->tid_q, bf_list) {
5373 SEQNO(bf->bf_state.bfs_seqno)))) {
5378 if (! bf->bf_state.bfs_dobaw) {
5382 nbytes += bf->bf_state.bfs_pktlen;
5408 struct ath_buf *bf;
5443 bf = ATH_TID_FIRST(tid);
5444 if (bf == NULL) {
5452 if (! bf->bf_state.bfs_dobaw) {
5456 ATH_TID_REMOVE(tid, bf, bf_list);
5458 if (bf->bf_state.bfs_nframes > 1)
5462 bf->bf_state.bfs_aggr,
5463 bf->bf_state.bfs_nframes);
5471 bf->bf_state.bfs_aggr = 0;
5472 bf->bf_state.bfs_nframes = 1;
5475 ath_tx_update_clrdmask(sc, tid, bf);
5477 ath_tx_do_ratelookup(sc, bf, tid->tid,
5478 bf->bf_state.bfs_pktlen, false);
5479 ath_tx_calc_duration(sc, bf);
5480 ath_tx_calc_protection(sc, bf);
5481 ath_tx_set_rtscts(sc, bf);
5482 ath_tx_rate_fill_rcflags(sc, bf);
5483 ath_tx_setds(sc, bf);
5484 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5500 ath_tx_do_ratelookup(sc, bf, tid->tid, swq_pktbytes, true);
5507 ath_tx_calc_duration(sc, bf);
5508 ath_tx_calc_protection(sc, bf);
5510 ath_tx_set_rtscts(sc, bf);
5511 ath_tx_rate_fill_rcflags(sc, bf);
5528 bf = TAILQ_FIRST(&bf_q);
5538 if (bf->bf_state.bfs_nframes == 1) {
5543 ath_tx_update_clrdmask(sc, tid, bf);
5545 bf->bf_state.bfs_aggr = 0;
5546 bf->bf_state.bfs_ndelim = 0;
5547 ath_tx_setds(sc, bf);
5548 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5557 __func__, bf->bf_state.bfs_nframes,
5558 bf->bf_state.bfs_al);
5559 bf->bf_state.bfs_aggr = 1;
5560 sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++;
5564 ath_tx_update_clrdmask(sc, tid, bf);
5569 ath_tx_calc_duration(sc, bf);
5570 ath_tx_calc_protection(sc, bf);
5577 ath_tx_set_rtscts(sc, bf);
5584 ath_tx_setds_11n(sc, bf);
5588 bf->bf_comp = ath_tx_aggr_comp;
5590 if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID)
5599 ath_tx_leak_count_update(sc, tid, bf);
5602 ath_tx_handoff(sc, txq, bf);
5639 struct ath_buf *bf;
5666 bf = ATH_TID_FIRST(tid);
5667 if (bf == NULL) {
5671 ATH_TID_REMOVE(tid, bf, bf_list);
5674 if (tid->tid != bf->bf_state.bfs_tid) {
5676 " tid %d\n", __func__, bf->bf_state.bfs_tid,
5680 bf->bf_comp = ath_tx_normal_comp;
5686 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
5689 ath_tx_update_clrdmask(sc, tid, bf);
5692 ath_tx_do_ratelookup(sc, bf, tid->tid,
5693 bf->bf_state.bfs_pktlen, false);
5694 ath_tx_calc_duration(sc, bf);
5695 ath_tx_calc_protection(sc, bf);
5696 ath_tx_set_rtscts(sc, bf);
5697 ath_tx_rate_fill_rcflags(sc, bf);
5698 ath_tx_setds(sc, bf);
5705 ath_tx_leak_count_update(sc, tid, bf);
5712 ath_tx_handoff(sc, txq, bf);
5917 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these
6028 struct ath_buf *bf;
6085 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
6086 TAILQ_REMOVE(&bf_cq, bf, bf_list);
6087 ath_tx_default_comp(sc, bf, 1);
6104 struct ath_buf *bf;
6138 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
6139 TAILQ_REMOVE(&bf_cq, bf, bf_list);
6140 ath_tx_default_comp(sc, bf, 1);