Lines Matching +full:cts +full:- +full:override
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
5 * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd
116 * What queue to throw the non-QoS TID traffic into
148 if (bf->bf_nseg == 0)
150 n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1;
151 for (i = 0, ds = (const char *) bf->bf_desc;
153 i++, ds += sc->sc_tx_desclen) {
154 if_ath_alq_post(&sc->sc_alq,
156 sc->sc_tx_desclen,
159 bf = bf->bf_next;
170 return ((sc->sc_ah->ah_magic == 0x20065416) ||
171 (sc->sc_ah->ah_magic == 0x19741014));
177 * Non-QoS frames get mapped to a TID so frames consistently
187 /* Non-QoS: map frame to a TID queue for software queueing */
191 /* QoS - fetch the TID from the header, ignore mbuf WME */
200 wh = mtod(bf->bf_m, struct ieee80211_frame *);
202 if (bf->bf_state.bfs_isretried == 0) {
203 wh->i_fc[1] |= IEEE80211_FC1_RETRY;
204 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
207 bf->bf_state.bfs_isretried = 1;
208 bf->bf_state.bfs_retries ++;
220 * For non-QoS frames, return the mbuf WMI priority.
222 * This has implications that higher priority non-QoS traffic
223 * may end up being scheduled before other non-QoS traffic,
224 * leading to out-of-sequence packets being emitted.
240 * QoS data frame (sequence number or otherwise) -
248 * Otherwise - return mbuf QoS pri.
282 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) {
283 /* XXX non-management? */
309 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
310 bf->bf_segs, &bf->bf_nseg,
314 bf->bf_nseg = ATH_MAX_SCATTER + 1;
316 sc->sc_stats.ast_tx_busdma++;
325 if (bf->bf_nseg > ATH_MAX_SCATTER) { /* too many desc's, linearize */
326 sc->sc_stats.ast_tx_linear++;
330 sc->sc_stats.ast_tx_nombuf++;
334 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
335 bf->bf_segs, &bf->bf_nseg,
338 sc->sc_stats.ast_tx_busdma++;
342 KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER,
343 ("too many segments after defrag; nseg %u", bf->bf_nseg));
344 } else if (bf->bf_nseg == 0) { /* null packet, discard */
345 sc->sc_stats.ast_tx_nodata++;
350 __func__, m0, m0->m_pkthdr.len);
351 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
352 bf->bf_m = m0;
358 * Chain together segments+descriptors for a frame - 11n or otherwise.
367 struct ath_hal *ah = sc->sc_ah;
379 struct ath_descdma *dd = &sc->sc_txdma;
390 numTxMaps = sc->sc_tx_nmaps;
396 ds = (char *) bf->bf_desc;
400 for (i = 0; i < bf->bf_nseg; i++) {
401 bufAddrList[bp] = bf->bf_segs[i].ds_addr;
402 segLenList[bp] = bf->bf_segs[i].ds_len;
409 if ((i != bf->bf_nseg - 1) && (bp < numTxMaps))
417 if (i == bf->bf_nseg - 1)
421 bf->bf_daddr + dd->dd_descsize * (dsp + 1));
432 , bf->bf_descid /* XXX desc id */
433 , bf->bf_state.bfs_tx_queue
435 , i == bf->bf_nseg - 1 /* last segment */
444 * sub-frames. Since the descriptors are in
445 * non-cacheable memory, this leads to some
449 ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds);
456 ath_hal_set11n_aggr_last(sc->sc_ah,
467 ath_hal_set11n_aggr_middle(sc->sc_ah,
469 bf->bf_state.bfs_ndelim);
472 bf->bf_lastds = (struct ath_desc *) ds;
477 ds += sc->sc_tx_desclen;
486 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
498 * conditionally for the pre-11n chips, and call ath_buf_set_rate
502 * and 4 if multi-rate retry is needed.
508 struct ath_rc_series *rc = bf->bf_state.bfs_rc;
511 if (! bf->bf_state.bfs_ismrr)
518 else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) {
525 * Always call - that way a retried descriptor will
528 * XXX TODO: see if this is really needed - setting up
535 ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc
547 * bf->bf_next.
553 struct ath_desc *ds0 = bf_first->bf_desc;
556 __func__, bf_first->bf_state.bfs_nframes,
557 bf_first->bf_state.bfs_al);
561 if (bf->bf_state.bfs_txrate0 == 0)
564 if (bf->bf_state.bfs_rc[0].ratecode == 0)
569 * Setup all descriptors of all subframes - this will
575 __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen,
576 SEQNO(bf->bf_state.bfs_seqno));
579 * Setup the initial fields for the first descriptor - all
580 * the non-11n specific stuff.
582 ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc
583 , bf->bf_state.bfs_pktlen /* packet length */
584 , bf->bf_state.bfs_hdrlen /* header length */
585 , bf->bf_state.bfs_atype /* Atheros packet type */
586 , bf->bf_state.bfs_txpower /* txpower */
587 , bf->bf_state.bfs_txrate0
588 , bf->bf_state.bfs_try0 /* series 0 rate/tries */
589 , bf->bf_state.bfs_keyix /* key cache index */
590 , bf->bf_state.bfs_txantenna /* antenna mode */
591 , bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ /* flags */
592 , bf->bf_state.bfs_ctsrate /* rts/cts rate */
593 , bf->bf_state.bfs_ctsduration /* rts/cts duration */
604 ath_tx_set_ratectrl(sc, bf->bf_node, bf);
608 * Setup the descriptors for a multi-descriptor frame.
609 * This is both aggregate and non-aggregate aware.
614 !! (bf->bf_next == NULL) /* is_last_subframe */
622 ath_hal_set11n_aggr_first(sc->sc_ah,
624 bf->bf_state.bfs_al,
625 bf->bf_state.bfs_ndelim);
633 ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds,
634 bf->bf_daddr);
638 bf = bf->bf_next;
646 bf_first->bf_lastds = bf_prev->bf_lastds;
652 bf_first->bf_last = bf_prev;
655 * For non-AR9300 NICs, which require the rate control
656 * in the final descriptor - let's set that up now.
660 * if firstSeg is also true. For non-aggregate frames
667 * non-cachable memory for TX descriptors, but we'll just
672 * is called on the final descriptor in an MPDU or A-MPDU -
677 ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0);
683 * Hand-off a frame to the multicast TX queue.
702 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
703 ("%s: busy status 0x%x", __func__, bf->bf_flags));
709 if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) {
712 __func__, bf, bf->bf_state.bfs_tx_queue,
713 txq->axq_qnum);
722 wh = mtod(bf_last->bf_m, struct ieee80211_frame *);
723 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
724 bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap,
728 ath_hal_settxdesclink(sc->sc_ah,
729 bf_last->bf_lastds,
730 bf->bf_daddr);
737 * Hand-off packet to a hardware queue.
743 struct ath_hal *ah = sc->sc_ah;
755 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
756 ("%s: busy status 0x%x", __func__, bf->bf_flags));
757 KASSERT(txq->axq_qnum != ATH_TXQ_SWQ,
765 if (sc->sc_txproc_cnt == 0 && sc->sc_txstart_cnt == 0) {
766 device_printf(sc->sc_dev,
804 "ath_tx_handoff: non-tdma: txq=%u, add bf=%p "
806 txq->axq_qnum,
808 txq->axq_depth);
816 if (txq->axq_link != NULL) {
817 *txq->axq_link = bf->bf_daddr;
820 txq->axq_qnum, txq->axq_link,
821 (caddr_t)bf->bf_daddr, bf->bf_desc,
822 txq->axq_depth);
824 "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) "
826 txq->axq_qnum, txq->axq_link,
827 (caddr_t)bf->bf_daddr, bf->bf_desc,
828 bf->bf_lastds);
837 * So we just don't do that - if we hit the end of the list,
839 * re-start DMA by updating the link pointer of _that_
842 if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) {
843 bf_first = TAILQ_FIRST(&txq->axq_q);
844 txq->axq_flags |= ATH_TXQ_PUTRUNNING;
845 ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr);
848 __func__, txq->axq_qnum,
849 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc,
850 txq->axq_depth);
854 txq->axq_qnum,
855 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc,
856 bf_first->bf_lastds,
857 txq->axq_depth);
864 if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) {
867 __func__, bf, bf->bf_state.bfs_tx_queue,
868 txq->axq_qnum);
874 if (bf->bf_state.bfs_aggr)
875 txq->axq_aggr_depth++;
880 ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link);
892 * in that descriptor and then kick TxE here; it will re-read
897 ath_hal_txstart(ah, txq->axq_qnum);
900 "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum);
916 bf = TAILQ_FIRST(&txq->axq_q);
925 txq->axq_qnum,
928 (uint32_t) bf->bf_daddr);
931 if (sc->sc_debug & ATH_DEBUG_RESET)
939 KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)),
942 txq->axq_qnum));
944 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
945 txq->axq_flags |= ATH_TXQ_PUTRUNNING;
947 ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds,
948 &txq->axq_link);
949 ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
964 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
968 if (txq->axq_qnum == ATH_TXQ_SWQ)
1011 * added to it prior to entry so m0->m_pkthdr.len will
1015 cip = k->wk_cipher;
1016 (*hdrlen) += cip->ic_header;
1017 (*pktlen) += cip->ic_header + cip->ic_trailer;
1019 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag)
1020 (*pktlen) += cip->ic_miclen;
1021 (*keyix) = k->wk_keyix;
1022 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
1026 (*keyix) = ni->ni_ucastkey.wk_keyix;
1050 const HAL_RATE_TABLE *rt = sc->sc_currates;
1051 struct ieee80211com *ic = &sc->sc_ic;
1053 flags = bf->bf_state.bfs_txflags;
1054 rix = bf->bf_state.bfs_rc[0].rix;
1055 shortPreamble = bf->bf_state.bfs_shpream;
1056 wh = mtod(bf->bf_m, struct ieee80211_frame *);
1059 if (bf->bf_flags & ATH_BUF_TOA_PROBE) {
1062 bf->bf_state.bfs_doprot = 0;
1068 * to use RTS/CTS or just CTS. Note that this is only
1071 if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
1072 rt->info[rix].phy == IEEE80211_T_OFDM &&
1074 bf->bf_state.bfs_doprot = 1;
1076 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) {
1078 } else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) {
1083 * highest CCK rate for RTS/CTS. But stations
1088 sc->sc_stats.ast_tx_protect++;
1099 if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) &&
1100 rt->info[rix].phy == IEEE80211_T_HT &&
1103 sc->sc_stats.ast_tx_htprotect++;
1107 bf->bf_state.bfs_txflags = flags;
1123 struct ath_hal *ah = sc->sc_ah;
1124 const HAL_RATE_TABLE *rt = sc->sc_currates;
1125 int isfrag = bf->bf_m->m_flags & M_FRAG;
1127 flags = bf->bf_state.bfs_txflags;
1128 rix = bf->bf_state.bfs_rc[0].rix;
1129 shortPreamble = bf->bf_state.bfs_shpream;
1130 wh = mtod(bf->bf_m, struct ieee80211_frame *);
1139 dur = rt->info[rix].spAckDuration;
1141 dur = rt->info[rix].lpAckDuration;
1142 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) {
1155 bf->bf_nextfraglen,
1162 * fragment by disabling multi-rate retry which updates
1163 * duration based on the multi-rate duration table.
1165 bf->bf_state.bfs_ismrr = 0;
1166 bf->bf_state.bfs_try0 = ATH_TXMGTTRY;
1171 *(u_int16_t *)wh->i_dur = htole16(dur);
1182 * CTS transmit rate is derived from the transmit rate
1186 /* NB: cix is set above where RTS/CTS is enabled */
1188 ctsrate = rt->info[cix].rateCode;
1192 ctsrate |= rt->info[cix].shortPreamble;
1198 * Calculate the RTS/CTS duration for legacy frames.
1208 if (rt->info[cix].phy == IEEE80211_T_HT) {
1210 __func__, rt->info[cix].rateCode);
1211 return (-1);
1220 * NB: CTS is assumed the same size as an ACK so we can
1224 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
1225 ctsduration += rt->info[cix].spAckDuration;
1229 ctsduration += rt->info[rix].spAckDuration;
1231 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
1232 ctsduration += rt->info[cix].lpAckDuration;
1236 ctsduration += rt->info[rix].lpAckDuration;
1243 * Update the given ath_buf with updated rts/cts setup and duration
1246 * To support rate lookups for each software retry, the rts/cts rate
1247 * and cts duration must be re-calculated.
1249 * This function assumes the RTS/CTS flags have been set as needed;
1252 * XXX TODO: MRR need only be disabled for the pre-11n NICs.
1253 * XXX The 11n NICs support per-rate RTS/CTS configuration.
1260 uint8_t rix = bf->bf_state.bfs_rc[0].rix;
1262 const HAL_RATE_TABLE *rt = sc->sc_currates;
1265 * No RTS/CTS enabled? Don't bother.
1267 if ((bf->bf_state.bfs_txflags &
1270 bf->bf_state.bfs_ctsrate = 0;
1271 bf->bf_state.bfs_ctsduration = 0;
1279 if (bf->bf_state.bfs_doprot)
1280 rix = sc->sc_protrix;
1282 rix = bf->bf_state.bfs_rc[0].rix;
1285 * If the raw path has hard-coded ctsrate0 to something,
1288 if (bf->bf_state.bfs_ctsrate0 != 0)
1289 cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0);
1292 cix = rt->info[rix].controlRate;
1295 ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix,
1296 bf->bf_state.bfs_shpream);
1300 ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix,
1301 bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen,
1302 rt, bf->bf_state.bfs_txflags);
1305 bf->bf_state.bfs_ctsrate = ctsrate;
1306 bf->bf_state.bfs_ctsduration = ctsduration;
1309 * Must disable multi-rate retry when using RTS/CTS.
1311 if (!sc->sc_mrrprot) {
1312 bf->bf_state.bfs_ismrr = 0;
1313 bf->bf_state.bfs_try0 =
1314 bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */
1319 * Setup the descriptor chain for a normal or fast-frame
1330 struct ath_desc *ds = bf->bf_desc;
1331 struct ath_hal *ah = sc->sc_ah;
1333 if (bf->bf_state.bfs_txrate0 == 0)
1338 , bf->bf_state.bfs_pktlen /* packet length */
1339 , bf->bf_state.bfs_hdrlen /* header length */
1340 , bf->bf_state.bfs_atype /* Atheros packet type */
1341 , bf->bf_state.bfs_txpower /* txpower */
1342 , bf->bf_state.bfs_txrate0
1343 , bf->bf_state.bfs_try0 /* series 0 rate/tries */
1344 , bf->bf_state.bfs_keyix /* key cache index */
1345 , bf->bf_state.bfs_txantenna /* antenna mode */
1346 , bf->bf_state.bfs_txflags /* flags */
1347 , bf->bf_state.bfs_ctsrate /* rts/cts rate */
1348 , bf->bf_state.bfs_ctsduration /* rts/cts duration */
1354 bf->bf_lastds = ds;
1355 bf->bf_last = bf;
1358 ath_tx_set_ratectrl(sc, bf->bf_node, bf);
1366 * Non-data frames and raw frames don't require it.
1369 * then disabled later on if something requires it (eg RTS/CTS on
1370 * pre-11n chipsets.
1372 * This needs to be done before the RTS/CTS fields are calculated
1384 if (! bf->bf_state.bfs_doratelookup)
1388 bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1390 ATH_NODE_LOCK(ATH_NODE(bf->bf_node));
1391 ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream,
1395 bf->bf_state.bfs_rc[0].rix = rix;
1396 bf->bf_state.bfs_rc[0].ratecode = rate;
1397 bf->bf_state.bfs_rc[0].tries = try0;
1399 if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY)
1400 ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix,
1401 is_aggr, bf->bf_state.bfs_rc);
1402 ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node));
1404 sc->sc_txrix = rix; /* for LED blinking */
1405 sc->sc_lastdatarix = rix; /* for fast frames */
1406 bf->bf_state.bfs_try0 = try0;
1407 bf->bf_state.bfs_txrate0 = rate;
1408 bf->bf_state.bfs_rc_maxpktlen = maxpktlen;
1418 struct ath_node *an = ATH_NODE(bf->bf_node);
1422 if (an->clrdmask == 1) {
1423 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
1424 an->clrdmask = 0;
1444 struct ieee80211_node *ni = &an->an_node;
1449 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1450 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1454 /* If it's not in powersave - direct-dispatch BAR */
1455 if ((ATH_NODE(ni)->an_is_powersave == 0)
1461 } else if ((ATH_NODE(ni)->an_is_powersave == 1)
1469 } else if ((ATH_NODE(ni)->an_is_powersave == 1)
1479 __func__, ni->ni_macaddr, ":", type, subtype);
1497 * XXX we don't update the leak count here - if we're doing
1505 struct ath_node *an = ATH_NODE(bf->bf_node);
1506 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
1514 * non-aggregate session frames.
1517 * frames that must go out - eg management/raw frames.
1519 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
1522 ath_tx_do_ratelookup(sc, bf, tid->tid, bf->bf_state.bfs_pktlen, false);
1529 /* Track per-TID hardware queue depth correctly */
1530 tid->hwq_depth++;
1533 bf->bf_comp = ath_tx_normal_comp;
1555 struct ieee80211vap *vap = ni->ni_vap;
1556 struct ieee80211com *ic = &sc->sc_ic;
1568 /* XXX TODO: this pri is only used for non-QoS check, right? */
1575 * re-ordered frames to have out of order CCMP PN's, resulting
1581 iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
1582 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1583 isfrag = m0->m_flags & M_FRAG;
1589 pktlen = m0->m_pkthdr.len - (hdrlen & 3);
1611 bf->bf_node = ni; /* NB: held reference */
1612 m0 = bf->bf_m; /* NB: may have changed */
1616 ds = bf->bf_desc;
1617 rt = sc->sc_currates;
1618 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
1625 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
1626 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
1628 sc->sc_stats.ast_tx_shortpre++;
1636 ismrr = 0; /* default no multi-rate retry*/
1644 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
1646 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1655 rix = an->an_mgmtrix;
1656 txrate = rt->info[rix].rateCode;
1658 txrate |= rt->info[rix].shortPreamble;
1664 rix = an->an_mgmtrix;
1665 txrate = rt->info[rix].rateCode;
1667 txrate |= rt->info[rix].shortPreamble;
1679 rix = an->an_mcastrix;
1680 txrate = rt->info[rix].rateCode;
1682 txrate |= rt->info[rix].shortPreamble;
1684 } else if (m0->m_flags & M_EAPOL) {
1686 rix = an->an_mgmtrix;
1687 txrate = rt->info[rix].rateCode;
1689 txrate |= rt->info[rix].shortPreamble;
1694 * the hard-coded TX information decided here.
1697 bf->bf_state.bfs_doratelookup = 1;
1707 device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n",
1708 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
1719 * + non-QoS frames (eg management?) that the net80211 stack has
1720 * assigned a higher AC to, but since it's a non-QoS TID, it's
1726 * surrounding ADDBA request/response - hence why that is special
1737 if (txq != sc->sc_ac2q[pri]) {
1742 txq->axq_qnum,
1744 sc->sc_ac2q[pri],
1745 sc->sc_ac2q[pri]->axq_qnum);
1754 } else if (pktlen > vap->iv_rtsthreshold &&
1755 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) {
1757 sc->sc_stats.ast_tx_rts++;
1760 sc->sc_stats.ast_tx_noack++;
1762 if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) {
1765 sc->sc_stats.ast_tdma_ack++;
1777 device_printf(sc->sc_dev,
1784 * exchange. So this means things like RTS/CTS
1787 * So, if you send a RTS-protected NULL data frame,
1797 bf->bf_flags |= ATH_BUF_TOA_PROBE;
1825 txq->axq_intrcnt = 0;
1826 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
1828 txq->axq_intrcnt = 0;
1838 m0->m_nextpkt = NULL;
1841 ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len,
1842 sc->sc_hwmap[rix].ieeerate, -1);
1845 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
1847 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
1849 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
1850 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
1851 sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni);
1852 sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
1858 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1864 bf->bf_state.bfs_rc[0].rix = rix;
1865 bf->bf_state.bfs_rc[0].tries = try0;
1866 bf->bf_state.bfs_rc[0].ratecode = txrate;
1869 bf->bf_state.bfs_pktlen = pktlen;
1870 bf->bf_state.bfs_hdrlen = hdrlen;
1871 bf->bf_state.bfs_atype = atype;
1872 bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni);
1873 bf->bf_state.bfs_txrate0 = txrate;
1874 bf->bf_state.bfs_try0 = try0;
1875 bf->bf_state.bfs_keyix = keyix;
1876 bf->bf_state.bfs_txantenna = sc->sc_txantenna;
1877 bf->bf_state.bfs_txflags = flags;
1878 bf->bf_state.bfs_shpream = shortPreamble;
1881 bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */
1882 bf->bf_state.bfs_ctsrate = 0; /* calculated later */
1883 bf->bf_state.bfs_ctsduration = 0;
1884 bf->bf_state.bfs_ismrr = ismrr;
1904 struct ieee80211vap *vap = ni->ni_vap;
1928 * the per-TID pool. That means that even QoS group addressed
1932 * all be out of whack. So - chances are, the right thing
1937 * to see what the TID should be. If it's a non-QoS frame, the
1947 txq = sc->sc_ac2q[pri];
1949 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1950 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1951 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1958 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1959 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
1960 > sc->sc_txq_mcastq_maxdepth) {
1961 sc->sc_stats.ast_tx_mcastq_overflow++;
1980 * that we are direct-dispatching.
1985 ATH_NODE(ni)->an_is_powersave &&
1986 ATH_NODE(ni)->an_swq_depth >
1987 sc->sc_txq_node_psq_maxdepth) {
1988 sc->sc_stats.ast_tx_node_psq_overflow++;
1993 /* A-MPDU TX */
2002 bf->bf_state.bfs_tid = tid;
2003 bf->bf_state.bfs_tx_queue = txq->axq_qnum;
2004 bf->bf_state.bfs_pri = pri;
2008 * When servicing one or more stations in power-save mode
2015 if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) {
2016 txq = &avp->av_mcastq;
2022 bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum;
2028 bf->bf_state.bfs_dobaw = 0;
2030 /* A-MPDU TX? Manually set sequence number */
2035 * Don't assign A-MPDU sequence numbers to group address
2038 if (is_ampdu_tx && (! IEEE80211_IS_MULTICAST(wh->i_addr1))) {
2042 * and group-addressed frames don't get a sequence number
2048 * Don't add QoS NULL frames and group-addressed frames
2052 (! IEEE80211_IS_MULTICAST(wh->i_addr1)) &&
2054 bf->bf_state.bfs_dobaw = 1;
2062 bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT;
2077 m0 = bf->bf_m;
2081 * If it's a multicast frame, do a direct-dispatch to the
2097 * Until things are better debugged - if this node is asleep
2098 * and we're sending it a non-BAR frame, direct dispatch it.
2100 * sent - eg, during reassociation/reauthentication after
2105 if (txq == &avp->av_mcastq) {
2108 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2114 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2120 * direct-dispatch to the hardware.
2122 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2140 struct ieee80211com *ic = &sc->sc_ic;
2141 struct ieee80211vap *vap = ni->ni_vap;
2151 int o_tid = -1;
2160 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
2167 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN;
2169 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2170 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2178 pri = params->ibp_pri & 3;
2179 /* Override pri if the frame isn't a QoS one */
2183 /* XXX If it's an ADDBA, override the correct queue */
2190 "%s: overriding tid %d pri %d -> %d\n",
2206 m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0,
2216 bf->bf_state.bfs_dobaw = 0;
2221 m0 = bf->bf_m; /* NB: may have changed */
2224 bf->bf_node = ni; /* NB: held reference */
2229 if (params->ibp_flags & IEEE80211_BPF_RTS)
2231 else if (params->ibp_flags & IEEE80211_BPF_CTS) {
2233 bf->bf_state.bfs_doprot = 1;
2237 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast)
2240 rt = sc->sc_currates;
2241 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
2244 rix = ath_tx_findrix(sc, params->ibp_rate0);
2245 try0 = params->ibp_try0;
2248 * Override EAPOL rate as appropriate.
2250 if (m0->m_flags & M_EAPOL) {
2252 rix = an->an_mgmtrix;
2261 device_printf(sc->sc_dev,
2265 bf->bf_flags |= ATH_BUF_TOA_PROBE;
2268 txrate = rt->info[rix].rateCode;
2269 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
2270 txrate |= rt->info[rix].shortPreamble;
2271 sc->sc_txrix = rix;
2272 ismrr = (params->ibp_try1 != 0);
2273 txantenna = params->ibp_pri >> 2;
2275 txantenna = sc->sc_txantenna;
2282 bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate;
2291 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,
2292 sc->sc_hwmap[rix].ieeerate, -1);
2295 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
2296 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
2297 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2298 if (m0->m_flags & M_FRAG)
2299 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
2300 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
2301 sc->sc_tx_th.wt_txpower = MIN(params->ibp_power,
2303 sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
2311 ds = bf->bf_desc;
2315 bf->bf_state.bfs_pktlen = pktlen;
2316 bf->bf_state.bfs_hdrlen = hdrlen;
2317 bf->bf_state.bfs_atype = atype;
2318 bf->bf_state.bfs_txpower = MIN(params->ibp_power,
2320 bf->bf_state.bfs_txrate0 = txrate;
2321 bf->bf_state.bfs_try0 = try0;
2322 bf->bf_state.bfs_keyix = keyix;
2323 bf->bf_state.bfs_txantenna = txantenna;
2324 bf->bf_state.bfs_txflags = flags;
2325 bf->bf_state.bfs_shpream =
2326 !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE);
2329 bf->bf_state.bfs_tid = WME_AC_TO_TID(pri);
2330 bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum;
2331 bf->bf_state.bfs_pri = pri;
2334 bf->bf_state.bfs_ctsrate = 0;
2335 bf->bf_state.bfs_ctsduration = 0;
2336 bf->bf_state.bfs_ismrr = ismrr;
2339 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
2341 bf->bf_state.bfs_rc[0].rix = rix;
2342 bf->bf_state.bfs_rc[0].tries = try0;
2343 bf->bf_state.bfs_rc[0].ratecode = txrate;
2348 rix = ath_tx_findrix(sc, params->ibp_rate1);
2349 bf->bf_state.bfs_rc[1].rix = rix;
2350 bf->bf_state.bfs_rc[1].tries = params->ibp_try1;
2352 rix = ath_tx_findrix(sc, params->ibp_rate2);
2353 bf->bf_state.bfs_rc[2].rix = rix;
2354 bf->bf_state.bfs_rc[2].tries = params->ibp_try2;
2356 rix = ath_tx_findrix(sc, params->ibp_rate3);
2357 bf->bf_state.bfs_rc[3].rix = rix;
2358 bf->bf_state.bfs_rc[3].tries = params->ibp_try3;
2381 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2388 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2392 ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf);
2394 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2395 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2398 /* Direct-dispatch to the hardware */
2399 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2406 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2420 struct ieee80211com *ic = ni->ni_ic;
2421 struct ath_softc *sc = ic->ic_softc;
2427 if (sc->sc_inreset_cnt > 0) {
2434 sc->sc_txstart_cnt++;
2444 if (!sc->sc_running || sc->sc_invalid) {
2446 __func__, sc->sc_running, sc->sc_invalid);
2457 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2458 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
2459 > sc->sc_txq_mcastq_maxdepth) {
2460 sc->sc_stats.ast_tx_mcastq_overflow++;
2475 sc->sc_stats.ast_tx_nobuf++;
2502 sc->sc_wd_timer = 5;
2503 sc->sc_stats.ast_tx_raw++;
2506 * Update the TIM - if there's anything queued to the
2515 sc->sc_txstart_cnt--;
2539 sc->sc_txstart_cnt--;
2550 sc->sc_stats.ast_tx_raw_fail++;
2560 * it goes out after any pending non-aggregate frames to the
2565 * number -earlier- than the ADDBA can be transmitted (but
2567 * be!) they'll arrive after the ADDBA - and the receiving end
2570 * The frames can't be appended to the TID software queue - it'll
2610 if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA)
2612 if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST)
2616 baparamset = le16toh(ia->rq_baparamset);
2622 /* Per-node software queue operations */
2643 if (bf->bf_state.bfs_isretried)
2646 tap = ath_tx_get_tx_tid(an, tid->tid);
2648 if (! bf->bf_state.bfs_dobaw) {
2651 __func__, SEQNO(bf->bf_state.bfs_seqno),
2652 tap->txa_start, tap->txa_wnd);
2655 if (bf->bf_state.bfs_addedbaw)
2657 "%s: re-added? tid=%d, seqno %d; window %d:%d; "
2659 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2660 tap->txa_start, tap->txa_wnd, tid->baw_head,
2661 tid->baw_tail);
2667 if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
2668 SEQNO(bf->bf_state.bfs_seqno))) {
2672 __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2673 tap->txa_start, tap->txa_wnd, tid->baw_head,
2674 tid->baw_tail);
2678 * ni->ni_txseqs[] is the currently allocated seqno.
2681 index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno));
2682 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2686 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2687 tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head,
2688 tid->baw_tail);
2691 assert(tid->tx_buf[cindex] == NULL);
2693 if (tid->tx_buf[cindex] != NULL) {
2697 __func__, index, cindex, tid->baw_head, tid->baw_tail);
2701 tid->tx_buf[cindex],
2702 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno),
2704 SEQNO(bf->bf_state.bfs_seqno)
2707 tid->tx_buf[cindex] = bf;
2709 if (index >= ((tid->baw_tail - tid->baw_head) &
2710 (ATH_TID_MAX_BUFS - 1))) {
2711 tid->baw_tail = cindex;
2712 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
2719 * When software retransmitting a (sub-)frame, it is entirely possible that
2731 int seqno = SEQNO(old_bf->bf_state.bfs_seqno);
2735 tap = ath_tx_get_tx_tid(an, tid->tid);
2736 index = ATH_BA_INDEX(tap->txa_start, seqno);
2737 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2744 if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) {
2751 old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno);
2754 if (tid->tx_buf[cindex] != old_bf) {
2762 tid->tx_buf[cindex] = new_bf;
2766 * seq_start - left edge of BAW
2767 * seq_next - current/next sequence number to allocate
2778 int seqno = SEQNO(bf->bf_state.bfs_seqno);
2782 tap = ath_tx_get_tx_tid(an, tid->tid);
2783 index = ATH_BA_INDEX(tap->txa_start, seqno);
2784 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2789 __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index,
2790 cindex, tid->baw_head, tid->baw_tail);
2793 * If this occurs then we have a big problem - something else
2794 * has slid tap->txa_start along without updating the BAW
2802 if (tid->tx_buf[cindex] != bf) {
2805 __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
2806 tid->tx_buf[cindex],
2807 (tid->tx_buf[cindex] != NULL) ?
2808 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1);
2811 tid->tx_buf[cindex] = NULL;
2813 while (tid->baw_head != tid->baw_tail &&
2814 !tid->tx_buf[tid->baw_head]) {
2815 INCR(tap->txa_start, IEEE80211_SEQ_RANGE);
2816 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
2820 __func__, tid->tid, tap->txa_start, tap->txa_wnd, tid->baw_head);
2831 if (tid->an->an_leak_count > 0) {
2832 wh = mtod(bf->bf_m, struct ieee80211_frame *);
2837 if ((tid->an->an_stack_psq > 0)
2838 || (tid->an->an_swq_depth > 0))
2839 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
2841 wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA;
2846 tid->an->an_node.ni_macaddr,
2848 tid->an->an_leak_count,
2849 tid->an->an_stack_psq,
2850 tid->an->an_swq_depth,
2851 !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA));
2854 * Re-sync the underlying buffer.
2856 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
2859 tid->an->an_leak_count --;
2869 if (tid->an->an_leak_count > 0) {
2872 if (tid->paused)
2888 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2894 * for PS-POLL, ensure that we allow scheduling to
2900 if (tid->sched)
2903 tid->sched = 1;
2910 if (tid->an->an_leak_count) {
2911 TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem);
2913 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2918 * We can't do the above - it'll confuse the TXQ software
2929 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2941 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2945 if (tid->sched == 0)
2948 tid->sched = 0;
2949 TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem);
2955 * This should only be called for A-MPDU TX frames.
2980 return -1;
2993 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2996 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID];
2997 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE);
2998 } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3003 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID];
3004 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE);
3007 seqno = ni->ni_txseqs[tid];
3008 INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE);
3010 *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
3015 "%s: -> subtype=0x%x, tid=%d, seqno=%d\n",
3029 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
3034 tap = ath_tx_get_tx_tid(an, tid->tid);
3039 /* XXX don't sched - we're paused! */
3044 if (bf->bf_state.bfs_dobaw &&
3045 (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
3046 SEQNO(bf->bf_state.bfs_seqno)))) {
3062 if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) {
3065 bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes);
3066 bf->bf_state.bfs_aggr = 0;
3067 bf->bf_state.bfs_nframes = 1;
3074 ath_tx_do_ratelookup(sc, bf, tid->tid, bf->bf_state.bfs_pktlen,
3083 sc->sc_aggr_stats.aggr_low_hwq_single_pkt++;
3085 /* Track per-TID hardware queue depth correctly */
3086 tid->hwq_depth++;
3089 if (bf->bf_state.bfs_dobaw) {
3091 bf->bf_state.bfs_addedbaw = 1;
3094 /* Set completion handler, multi-frame aggregate or not */
3095 bf->bf_comp = ath_tx_aggr_comp;
3110 * If the queue isn't busy, direct-dispatch.
3122 struct mbuf *m0 = bf->bf_m;
3126 /* Fetch the TID - non-QoS frames get assigned to TID 16 */
3130 atid = &an->an_tid[tid];
3136 /* XXX potentially duplicate info, re-check */
3137 bf->bf_state.bfs_tid = tid;
3138 bf->bf_state.bfs_tx_queue = txq->axq_qnum;
3139 bf->bf_state.bfs_pri = pri;
3147 * If the node is in power-save and we're leaking a frame,
3168 * AMPDU running, queue single-frame if the hardware queue
3177 * TODO: maybe we should treat this as two policies - minimise
3192 * Note: if we're say, configured to do ADDBA but not A-MPDU
3193 * then maybe we want to still queue two non-aggregate frames
3194 * to the hardware. Again with the per-TID policy
3200 if (txq->axq_depth + txq->fifo.axq_depth == 0) {
3205 * Ensure it's definitely treated as a non-AMPDU
3206 * frame - this information may have been left
3209 bf->bf_state.bfs_aggr = 0;
3210 bf->bf_state.bfs_nframes = 1;
3225 * If we're not doing A-MPDU, be prepared to direct dispatch
3228 * traffic and non-aggregate traffic: we want to ensure
3229 * that non-aggregate stations get a few frames queued to the
3233 * to the hardware from a non-AMPDU client, check both here
3235 * non-AMPDU stations get a fair chance to transmit.
3238 } else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) &&
3239 (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) {
3281 if (an->an_tid[i].isfiltered == 1)
3284 an->clrdmask = 1;
3288 * Configure the per-TID node state.
3302 atid = &an->an_tid[i];
3307 TAILQ_INIT(&atid->tid_q);
3308 TAILQ_INIT(&atid->filtq.tid_q);
3309 atid->tid = i;
3310 atid->an = an;
3312 atid->tx_buf[j] = NULL;
3313 atid->baw_head = atid->baw_tail = 0;
3314 atid->paused = 0;
3315 atid->sched = 0;
3316 atid->hwq_depth = 0;
3317 atid->cleanup_inprogress = 0;
3319 atid->ac = ATH_NONQOS_TID_AC;
3321 atid->ac = TID_TO_WME_AC(i);
3323 an->clrdmask = 1; /* Always start by setting this bit */
3338 tid->paused++;
3341 tid->an->an_node.ni_macaddr, ":",
3342 tid->tid,
3343 tid->paused);
3359 if (tid->paused == 0) {
3360 device_printf(sc->sc_dev,
3363 tid->an->an_node.ni_macaddr, ":",
3364 tid->tid);
3366 tid->paused--;
3372 tid->an->an_node.ni_macaddr, ":",
3373 tid->tid,
3374 tid->paused);
3376 if (tid->paused)
3380 * Override the clrdmask configuration for the next frame
3383 ath_tx_set_clrdmask(sc, tid->an);
3385 if (tid->axq_depth == 0)
3389 if (tid->isfiltered == 1) {
3414 if (!tid->isfiltered)
3422 sc->sc_stats.ast_tx_swfiltered++;
3439 if (! tid->isfiltered) {
3441 __func__, tid->tid);
3442 tid->isfiltered = 1;
3465 if (tid->hwq_depth != 0)
3469 __func__, tid->tid);
3470 if (tid->isfiltered == 1) {
3471 tid->isfiltered = 0;
3476 ath_tx_set_clrdmask(sc, tid->an);
3512 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {
3513 sc->sc_stats.ast_tx_swretrymax++;
3518 SEQNO(bf->bf_state.bfs_seqno));
3527 if (bf->bf_flags & ATH_BUF_BUSY) {
3528 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3530 "%s: busy buffer clone: %p -> %p\n",
3561 bf_next = bf->bf_next;
3562 bf->bf_next = NULL; /* Remove it from the aggr list */
3567 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {
3568 sc->sc_stats.ast_tx_swretrymax++;
3572 tid->tid,
3574 SEQNO(bf->bf_state.bfs_seqno));
3579 if (bf->bf_flags & ATH_BUF_BUSY) {
3580 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3582 "%s: tid=%d, busy buffer cloned: %p -> %p, seqno=%d\n",
3583 __func__, tid->tid, bf, nbf, SEQNO(bf->bf_state.bfs_seqno));
3595 __func__, tid->tid, bf, SEQNO(bf->bf_state.bfs_seqno));
3619 tid->tid,
3620 tid->bar_wait,
3621 tid->bar_tx);
3624 if (tid->bar_tx) {
3630 if (tid->bar_wait)
3634 tid->bar_wait = 1;
3641 * We've finished with BAR handling - either we succeeded or
3653 tid->an->an_node.ni_macaddr,
3655 tid->tid);
3657 if (tid->bar_tx == 0 || tid->bar_wait == 0) {
3660 __func__, tid->an->an_node.ni_macaddr, ":",
3661 tid->tid, tid->bar_tx, tid->bar_wait);
3664 tid->bar_tx = tid->bar_wait = 0;
3679 if (tid->bar_wait == 0 || tid->hwq_depth > 0)
3685 tid->an->an_node.ni_macaddr,
3687 tid->tid);
3714 tid->an->an_node.ni_macaddr,
3716 tid->tid);
3718 tap = ath_tx_get_tx_tid(tid->an, tid->tid);
3723 if (tid->bar_wait == 0 || tid->bar_tx == 1) {
3726 __func__, tid->an->an_node.ni_macaddr, ":",
3727 tid->tid, tid->bar_tx, tid->bar_wait);
3732 if (tid->hwq_depth > 0) {
3736 tid->an->an_node.ni_macaddr,
3738 tid->tid,
3739 tid->hwq_depth);
3744 tid->bar_tx = 1;
3747 * Override the clrdmask configuration for the next frame,
3750 ath_tx_set_clrdmask(sc, tid->an);
3761 tid->an->an_node.ni_macaddr,
3763 tid->tid,
3764 tap->txa_start);
3770 if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) {
3780 __func__, tid->an->an_node.ni_macaddr, ":",
3781 tid->tid);
3796 if (ath_tx_ampdu_running(sc, an, tid->tid) &&
3797 bf->bf_state.bfs_dobaw) {
3803 if (bf->bf_state.bfs_retries > 0) {
3805 bf->bf_state.bfs_dobaw = 0;
3809 * This has become a non-fatal error now
3811 if (! bf->bf_state.bfs_addedbaw)
3814 __func__, SEQNO(bf->bf_state.bfs_seqno));
3819 bf->bf_next = NULL;
3829 struct ieee80211_node *ni = &an->an_node;
3833 txq = sc->sc_ac2q[tid->ac];
3834 tap = ath_tx_get_tx_tid(an, tid->tid);
3841 ni->ni_macaddr,
3844 bf->bf_state.bfs_addedbaw,
3845 bf->bf_state.bfs_dobaw,
3846 SEQNO(bf->bf_state.bfs_seqno),
3847 bf->bf_state.bfs_retries);
3852 ni->ni_macaddr,
3855 txq->axq_qnum,
3856 txq->axq_depth,
3857 txq->axq_aggr_depth);
3863 ni->ni_macaddr,
3866 tid->axq_depth,
3867 tid->hwq_depth,
3868 tid->bar_wait,
3869 tid->isfiltered);
3877 ni->ni_macaddr,
3879 tid->tid,
3880 tid->sched, tid->paused,
3881 tid->incomp, tid->baw_head,
3882 tid->baw_tail, tap == NULL ? -1 : tap->txa_start,
3883 ni->ni_txseqs[tid->tid]);
3887 ieee80211_dump_pkt(ni->ni_ic,
3888 mtod(bf->bf_m, const uint8_t *),
3889 bf->bf_m->m_len, 0, -1);
3913 struct ieee80211_node *ni = &an->an_node;
3916 tap = ath_tx_get_tx_tid(an, tid->tid);
3954 * Override the clrdmask configuration for the next frame
3960 ath_tx_set_clrdmask(sc, tid->an);
3971 * when the packet is first transmitted - and thus the "retries"
3975 /* But don't do it for non-QoS TIDs */
3981 ni->ni_macaddr,
3984 tid->tid,
3985 tap->txa_start);
3987 ni->ni_txseqs[tid->tid] = tap->txa_start;
3988 tid->baw_tail = tid->baw_head;
4002 tid->bar_wait = tid->bar_tx = tid->isfiltered = 0;
4003 tid->paused = tid->sched = tid->addba_tx_pending = 0;
4004 tid->incomp = tid->cleanup_inprogress = 0;
4012 * XXX I'm not going through resume here - I don't want the
4016 if (tid->bar_wait) {
4017 if (tid->paused > 0) {
4018 tid->paused --;
4031 if (tid->isfiltered) {
4032 if (tid->paused > 0) {
4033 tid->paused --;
4042 tid->bar_wait = 0;
4043 tid->bar_tx = 0;
4044 tid->isfiltered = 0;
4045 tid->sched = 0;
4046 tid->addba_tx_pending = 0;
4050 * frames for that node as non-aggregate; or mark the ath_node
4053 * do a complete hard reset of state here - no pause, no
4076 &an->an_node);
4083 an->an_node.ni_macaddr,
4085 an->an_is_powersave,
4086 an->an_stack_psq,
4087 an->an_tim_set,
4088 an->an_swq_depth,
4089 an->clrdmask,
4090 an->an_leak_count);
4093 struct ath_tid *atid = &an->an_tid[tid];
4101 /* Reset the per-TID pause, BAR, etc state */
4108 an->an_leak_count = 0;
4135 while (! TAILQ_EMPTY(&txq->axq_tidq)) {
4136 tid = TAILQ_FIRST(&txq->axq_tidq);
4137 ath_tx_tid_drain(sc, tid->an, tid, &bf_cq);
4150 * Handle completion of non-aggregate session frames.
4153 * non-aggregate frames!
4155 * Software retransmission of non-aggregate frames needs to obey
4163 * ath_tx_hw_queue_norm() must override and set CLRDMASK.
4168 struct ieee80211_node *ni = bf->bf_node;
4170 int tid = bf->bf_state.bfs_tid;
4171 struct ath_tid *atid = &an->an_tid[tid];
4172 struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
4178 __func__, bf, fail, atid->hwq_depth - 1);
4180 atid->hwq_depth--;
4187 if ((ts->ts_status & HAL_TXERR_FILT) ||
4188 (ts->ts_status != 0 && atid->isfiltered)) {
4192 atid->isfiltered,
4193 ts->ts_status);
4197 if (atid->isfiltered)
4199 if (atid->hwq_depth < 0)
4201 __func__, atid->hwq_depth);
4205 if (atid->cleanup_inprogress) {
4206 atid->incomp--;
4207 if (atid->incomp == 0) {
4211 atid->cleanup_inprogress = 0;
4221 * for this end-node that has CLRDMASK set, so it's quite possible
4222 * that a filtered frame will be followed by a non-filtered
4227 if (atid->isfiltered)
4235 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
4236 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
4238 bf->bf_state.bfs_pktlen,
4239 bf->bf_state.bfs_pktlen,
4240 1, (ts->ts_status == 0) ? 0 : 1);
4247 * an A-MPDU.
4249 * There's no need to update the BAW here - the session is being
4255 struct ieee80211_node *ni = bf->bf_node;
4257 int tid = bf->bf_state.bfs_tid;
4258 struct ath_tid *atid = &an->an_tid[tid];
4261 __func__, tid, atid->incomp);
4264 atid->incomp--;
4267 if (bf->bf_state.bfs_dobaw) {
4269 if (!bf->bf_state.bfs_addedbaw)
4272 __func__, SEQNO(bf->bf_state.bfs_seqno));
4275 if (atid->incomp == 0) {
4279 atid->cleanup_inprogress = 0;
4296 struct ath_tid *atid = &an->an_tid[tid];
4311 bf_next = bf->bf_next; /* next aggregate frame, or NULL */
4318 * BAW - we shouldn't have it be in an aggregate
4321 if (bf->bf_state.bfs_addedbaw) {
4323 bf->bf_state.bfs_dobaw = 0;
4329 bf->bf_comp = ath_tx_normal_comp;
4330 bf->bf_next = NULL;
4361 struct ath_tid *atid = &an->an_tid[tid];
4368 atid->cleanup_inprogress);
4385 * + Fix the completion function to be non-aggregate
4410 if (atid->hwq_depth > 0) {
4412 * XXX how about we kill atid->incomp, and instead
4413 * replace it with a macro that checks that atid->hwq_depth
4416 atid->incomp = atid->hwq_depth;
4417 atid->cleanup_inprogress = 1;
4420 if (atid->cleanup_inprogress)
4423 __func__, tid, atid->incomp);
4457 error = ath_tx_dmasetup(sc, nbf, nbf->bf_m);
4475 if (bf->bf_state.bfs_dobaw)
4490 * non-aggregate frames in an aggregate session are
4491 * transmitted in-order; they just have to be in-BAW)
4497 struct ieee80211_node *ni = bf->bf_node;
4499 int tid = bf->bf_state.bfs_tid;
4500 struct ath_tid *atid = &an->an_tid[tid];
4515 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
4516 (bf->bf_flags & ATH_BUF_BUSY)) {
4523 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
4526 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
4529 __func__, SEQNO(bf->bf_state.bfs_seqno));
4530 sc->sc_stats.ast_tx_swretrymax++;
4533 if (bf->bf_state.bfs_dobaw) {
4535 if (! bf->bf_state.bfs_addedbaw)
4538 __func__, SEQNO(bf->bf_state.bfs_seqno));
4540 bf->bf_state.bfs_dobaw = 0;
4562 sc->sc_stats.ast_tx_swretries++;
4588 struct ieee80211_node *ni = bf->bf_node;
4590 int tid = bf->bf_state.bfs_tid;
4591 struct ath_tid *atid = &an->an_tid[tid];
4596 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
4597 ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0);
4599 /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */
4609 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
4610 (bf->bf_flags & ATH_BUF_BUSY)) {
4617 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
4620 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
4621 sc->sc_stats.ast_tx_swretrymax++;
4624 __func__, SEQNO(bf->bf_state.bfs_seqno));
4626 if (!bf->bf_state.bfs_addedbaw)
4629 __func__, SEQNO(bf->bf_state.bfs_seqno));
4630 bf->bf_state.bfs_dobaw = 0;
4635 sc->sc_stats.ast_tx_swretries++;
4636 bf->bf_next = NULL; /* Just to make sure */
4639 bf->bf_state.bfs_aggr = 0;
4640 bf->bf_state.bfs_ndelim = 0; /* ??? needed? */
4641 bf->bf_state.bfs_nframes = 1;
4654 struct ieee80211_node *ni = bf_first->bf_node;
4666 * Update rate control - all frames have failed.
4668 ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc,
4669 &bf_first->bf_status.ds_txstat,
4670 bf_first->bf_state.bfs_al,
4671 bf_first->bf_state.bfs_rc_maxpktlen,
4672 bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes);
4675 tap = ath_tx_get_tx_tid(an, tid->tid);
4676 sc->sc_stats.ast_tx_aggr_failall++;
4681 bf_next = bf->bf_next;
4682 bf->bf_next = NULL; /* Remove it from the aggr list */
4683 sc->sc_stats.ast_tx_aggr_fail++;
4686 bf->bf_next = NULL;
4699 * Schedule the TID to be re-tried.
4731 * Handle clean-up of packets from an aggregate list.
4733 * There's no need to update the BAW here - the session is being
4740 struct ieee80211_node *ni = bf_first->bf_node;
4742 int tid = bf_first->bf_state.bfs_tid;
4743 struct ath_tid *atid = &an->an_tid[tid];
4748 atid->incomp--;
4754 if (bf->bf_state.bfs_dobaw) {
4756 if (!bf->bf_state.bfs_addedbaw)
4759 __func__, SEQNO(bf->bf_state.bfs_seqno));
4761 bf = bf->bf_next;
4764 if (atid->incomp == 0) {
4768 atid->cleanup_inprogress = 0;
4773 /* XXX why would we send a BAR when transitioning to non-aggregation? */
4786 bf_next = bf->bf_next;
4787 bf->bf_next = NULL;
4803 //struct ath_desc *ds = bf->bf_lastds;
4804 struct ieee80211_node *ni = bf_first->bf_node;
4806 int tid = bf_first->bf_state.bfs_tid;
4807 struct ath_tid *atid = &an->an_tid[tid];
4826 __func__, atid->hwq_depth);
4829 * Take a copy; this may be needed -after- bf_first
4832 ts = bf_first->bf_status.ds_txstat;
4833 agglen = bf_first->bf_state.bfs_al;
4834 rc_agglen = bf_first->bf_state.bfs_rc_maxpktlen;
4842 atid->hwq_depth--;
4843 if (atid->hwq_depth < 0)
4845 __func__, atid->hwq_depth);
4854 if (atid->isfiltered)
4860 if (atid->cleanup_inprogress) {
4861 if (atid->isfiltered)
4878 (ts.ts_status != 0 && atid->isfiltered)) {
4886 if (bf->bf_state.bfs_addedbaw)
4888 if (bf->bf_state.bfs_dobaw) {
4890 if (!bf->bf_state.bfs_addedbaw)
4894 SEQNO(bf->bf_state.bfs_seqno));
4896 bf->bf_state.bfs_dobaw = 0;
4916 pktlen = bf_first->bf_state.bfs_pktlen;
4937 * extract starting sequence and block-ack bitmap
4939 /* XXX endian-ness of seq_st, ba? */
4943 isaggr = bf_first->bf_state.bfs_aggr;
4953 memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc));
4958 __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags,
4966 * out tid 1 - the aggregate frames are all marked as TID 1,
4981 device_printf(sc->sc_dev,
4985 taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask);
4992 sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0);
5001 nf = bf_first->bf_state.bfs_nframes;
5023 SEQNO(bf->bf_state.bfs_seqno));
5024 bf_next = bf->bf_next;
5025 bf->bf_next = NULL; /* Remove it from the aggr list */
5029 __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
5033 sc->sc_stats.ast_tx_aggr_ok++;
5035 bf->bf_state.bfs_dobaw = 0;
5036 if (!bf->bf_state.bfs_addedbaw)
5039 __func__, SEQNO(bf->bf_state.bfs_seqno));
5040 bf->bf_next = NULL;
5043 sc->sc_stats.ast_tx_aggr_fail++;
5046 bf->bf_next = NULL;
5058 * have a consistent view of what -was- in the BAW.
5062 txseq = tap->txa_start;
5090 "%s: txa_start now %d\n", __func__, tap->txa_start);
5106 * If the queue is filtered, re-schedule as required.
5109 * for this end-node that has CLRDMASK set, so it's quite possible
5110 * that a filtered frame will be followed by a non-filtered
5115 if (atid->isfiltered)
5145 struct ieee80211_node *ni = bf->bf_node;
5147 int tid = bf->bf_state.bfs_tid;
5148 struct ath_tid *atid = &an->an_tid[tid];
5156 ts = bf->bf_status.ds_txstat;
5164 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
5165 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
5166 &bf->bf_status.ds_txstat,
5167 bf->bf_state.bfs_pktlen,
5168 bf->bf_state.bfs_pktlen,
5172 * This is called early so atid->hwq_depth can be tracked.
5183 __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth,
5184 SEQNO(bf->bf_state.bfs_seqno));
5186 atid->hwq_depth--;
5187 if (atid->hwq_depth < 0)
5189 __func__, atid->hwq_depth);
5196 if (atid->isfiltered)
5205 if (atid->cleanup_inprogress) {
5206 if (atid->isfiltered)
5225 * However - a busy buffer can't be added to the filtered
5230 (ts.ts_status != 0 && atid->isfiltered)) {
5244 if (bf->bf_state.bfs_addedbaw)
5246 if (bf->bf_state.bfs_dobaw) {
5248 if (!bf->bf_state.bfs_addedbaw)
5251 __func__, SEQNO(bf->bf_state.bfs_seqno));
5253 bf->bf_state.bfs_dobaw = 0;
5284 if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) {
5296 __func__, tid, SEQNO(bf->bf_state.bfs_seqno));
5297 if (bf->bf_state.bfs_dobaw) {
5299 bf->bf_state.bfs_dobaw = 0;
5300 if (!bf->bf_state.bfs_addedbaw)
5303 __func__, SEQNO(bf->bf_state.bfs_seqno));
5307 * If the queue is filtered, re-schedule as required.
5310 * for this end-node that has CLRDMASK set, so it's quite possible
5311 * that a filtered frame will be followed by a non-filtered
5316 if (atid->isfiltered)
5334 if (bf->bf_state.bfs_aggr)
5358 tap = ath_tx_get_tx_tid(an, tid->tid);
5365 TAILQ_FOREACH(bf, &tid->tid_q, bf_list) {
5372 if (tap != NULL && (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
5373 SEQNO(bf->bf_state.bfs_seqno)))) {
5378 if (! bf->bf_state.bfs_dobaw) {
5382 nbytes += bf->bf_state.bfs_pktlen;
5391 if (an->an_leak_count) {
5409 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5415 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid);
5423 tap = ath_tx_get_tx_tid(an, tid->tid);
5425 if (tid->tid == IEEE80211_NONQOS_TID)
5452 if (! bf->bf_state.bfs_dobaw) {
5454 "%s: non-baw packet\n",
5458 if (bf->bf_state.bfs_nframes > 1)
5462 bf->bf_state.bfs_aggr,
5463 bf->bf_state.bfs_nframes);
5466 * This shouldn't happen - such frames shouldn't
5471 bf->bf_state.bfs_aggr = 0;
5472 bf->bf_state.bfs_nframes = 1;
5477 ath_tx_do_ratelookup(sc, bf, tid->tid,
5478 bf->bf_state.bfs_pktlen, false);
5484 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5486 sc->sc_aggr_stats.aggr_nonbaw_pkt++;
5500 ath_tx_do_ratelookup(sc, bf, tid->tid, swq_pktbytes, true);
5519 * No frames to be picked up - out of BAW
5531 sc->sc_aggr_stats.aggr_rts_aggr_limited++;
5534 * If it's the only frame send as non-aggregate
5538 if (bf->bf_state.bfs_nframes == 1) {
5540 "%s: single-frame aggregate\n", __func__);
5545 bf->bf_state.bfs_aggr = 0;
5546 bf->bf_state.bfs_ndelim = 0;
5548 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5550 sc->sc_aggr_stats.aggr_baw_closed_single_pkt++;
5552 sc->sc_aggr_stats.aggr_single_pkt++;
5555 "%s: multi-frame aggregate: %d frames, "
5557 __func__, bf->bf_state.bfs_nframes,
5558 bf->bf_state.bfs_al);
5559 bf->bf_state.bfs_aggr = 1;
5560 sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++;
5561 sc->sc_aggr_stats.aggr_aggr_pkt++;
5587 /* Set completion handler, multi-frame aggregate or not */
5588 bf->bf_comp = ath_tx_aggr_comp;
5590 if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID)
5606 tid->hwq_depth++;
5616 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr ||
5640 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5643 __func__, an, tid->tid);
5647 /* Check - is AMPDU pending or running? then print out something */
5648 if (ath_tx_ampdu_pending(sc, an, tid->tid))
5650 __func__, tid->tid);
5651 if (ath_tx_ampdu_running(sc, an, tid->tid))
5653 __func__, tid->tid);
5674 if (tid->tid != bf->bf_state.bfs_tid) {
5676 " tid %d\n", __func__, bf->bf_state.bfs_tid,
5677 tid->tid);
5680 bf->bf_comp = ath_tx_normal_comp;
5683 * Override this for now, until the non-aggregate
5686 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
5692 ath_tx_do_ratelookup(sc, bf, tid->tid,
5693 bf->bf_state.bfs_pktlen, false);
5709 tid->hwq_depth++;
5735 * For non-EDMA chips, aggr frames that have been built are
5737 * There's no FIFO, so txq->axq_depth is what's been scheduled
5747 * The FIFO depth is what's in the hardware; the txq->axq_depth
5751 * into the EDMA FIFO. For multi-frame lists, this is the number
5756 /* For EDMA and non-EDMA, check built/scheduled against aggr limit */
5757 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr) {
5758 sc->sc_aggr_stats.aggr_sched_nopkt++;
5763 * For non-EDMA chips, axq_depth is the "what's scheduled to
5768 if (txq->axq_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_nonaggr) {
5769 sc->sc_aggr_stats.aggr_sched_nopkt++;
5773 last = TAILQ_LAST(&txq->axq_tidq, axq_t_s);
5775 TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) {
5781 __func__, tid->tid, tid->paused);
5784 * This node may be in power-save and we're leaking
5790 if (ath_tx_ampdu_running(sc, tid->an, tid->tid))
5791 ath_tx_tid_hw_queue_aggr(sc, tid->an, tid);
5793 ath_tx_tid_hw_queue_norm(sc, tid->an, tid);
5795 /* Not empty? Re-schedule */
5796 if (tid->axq_depth != 0)
5805 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5808 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
5818 * but are pending a leaking frame in response to a ps-poll?
5837 struct ieee80211_node *ni = &an->an_node;
5843 tap = &ni->ni_tx_ampdu[tid];
5848 * Is AMPDU-TX running?
5862 return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING);
5866 * Is AMPDU-TX negotiation pending?
5880 return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND);
5884 * Is AMPDU-TX pending for the given TID?
5893 * XXX there's no timeout handler we can override?
5899 struct ath_softc *sc = ni->ni_ic->ic_softc;
5900 int tid = tap->txa_tid;
5902 struct ath_tid *atid = &an->an_tid[tid];
5909 * However, net80211 will keep self-assigning sequence numbers
5917 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these
5933 if (atid->addba_tx_pending == 0) {
5935 atid->addba_tx_pending = 1;
5942 ni->ni_macaddr,
5947 __func__, tap->txa_start, ni->ni_txseqs[tid]);
5949 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,
5961 * Note! net80211 keeps self-assigning sequence numbers until
5962 * ampdu is negotiated. This means the initially-negotiated BAW left
5963 * edge won't match the ni->ni_txseq.
5966 * ni->ni_txseq.
5969 * addba request should be tagged as aggregate and queued as non-aggregate
5977 struct ath_softc *sc = ni->ni_ic->ic_softc;
5978 int tid = tap->txa_tid;
5980 struct ath_tid *atid = &an->an_tid[tid];
5985 ni->ni_macaddr,
5991 __func__, tap->txa_start, ni->ni_txseqs[tid]);
5999 r = sc->sc_addba_response(ni, tap, status, code, batimeout);
6002 atid->addba_tx_pending = 0;
6008 tap->txa_start = ni->ni_txseqs[tid];
6023 struct ath_softc *sc = ni->ni_ic->ic_softc;
6024 int tid = tap->txa_tid;
6026 struct ath_tid *atid = &an->an_tid[tid];
6032 ni->ni_macaddr,
6041 if (atid->bar_wait) {
6047 atid->bar_tx = 1;
6053 sc->sc_addba_stop(ni, tap);
6069 * progress - it means something else is also doing
6072 if (atid->cleanup_inprogress) {
6079 if (! atid->cleanup_inprogress)
6112 tid = &an->an_tid[i];
6113 if (tid->hwq_depth == 0)
6118 an->an_node.ni_macaddr,
6125 if (! tid->cleanup_inprogress) {
6131 if (! tid->cleanup_inprogress)
6149 * ic->ic_addba_stop().
6151 * XXX This uses a hard-coded max BAR count value; the whole
6158 struct ath_softc *sc = ni->ni_ic->ic_softc;
6159 int tid = tap->txa_tid;
6161 struct ath_tid *atid = &an->an_tid[tid];
6162 int attempts = tap->txa_attempts;
6166 "%s: %6D: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d, txa_start=%d, txa_seqpending=%d\n",
6168 ni->ni_macaddr,
6170 tap->txa_tid,
6171 atid->tid,
6174 tap->txa_start,
6175 tap->txa_seqpending);
6189 old_txa_start = tap->txa_start;
6190 sc->sc_bar_response(ni, tap, status);
6191 if (tap->txa_start != old_txa_start) {
6192 device_printf(sc->sc_dev, "%s: tid=%d; txa_start=%d, old=%d, adjusting\n",
6195 tap->txa_start,
6198 tap->txa_start = old_txa_start;
6204 * XXX to a non-aggregate session. So we must unpause the
6212 if (atid->bar_tx == 0 || atid->bar_wait == 0)
6216 atid->bar_tx, atid->bar_wait);
6231 struct ath_softc *sc = ni->ni_ic->ic_softc;
6232 int tid = tap->txa_tid;
6234 struct ath_tid *atid = &an->an_tid[tid];
6239 ni->ni_macaddr,
6244 atid->addba_tx_pending = 0;
6248 sc->sc_addba_response_timeout(ni, tap);
6265 return (an->an_is_powersave);
6282 * doing node/TID operations. There are other complications -
6283 * the sched/unsched operations involve walking the per-txq
6298 if (an->an_is_powersave) {
6301 __func__, an->an_node.ni_macaddr, ":");
6307 atid = &an->an_tid[tid];
6308 txq = sc->sc_ac2q[atid->ac];
6314 an->an_is_powersave = 1;
6335 if (an->an_is_powersave == 0) {
6344 an->an_is_powersave = 0;
6348 an->an_leak_count = 0;
6351 atid = &an->an_tid[tid];
6352 txq = sc->sc_ac2q[atid->ac];
6382 sc->sc_tx_desclen = sizeof(struct ath_desc);
6383 sc->sc_tx_statuslen = sizeof(struct ath_desc);
6384 sc->sc_tx_nmaps = 1; /* only one buffer per TX desc */
6386 sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup;
6387 sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown;
6388 sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func;
6390 sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart;
6391 sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff;
6393 sc->sc_tx.xmit_drain = ath_legacy_tx_drain;