1 /* $NetBSD: ath.c,v 1.49 2005/06/30 00:52:56 dyoung Exp $ */ 2 3 /*- 4 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer, 12 * without modification. 13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 14 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 15 * redistribution must be conditioned upon including a substantially 16 * similar Disclaimer requirement for further binary redistribution. 17 * 3. Neither the names of the above-listed copyright holders nor the names 18 * of any contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * Alternatively, this software may be distributed under the terms of the 22 * GNU General Public License ("GPL") version 2 as published by the Free 23 * Software Foundation. 24 * 25 * NO WARRANTY 26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 28 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 29 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 30 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 31 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 34 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 36 * THE POSSIBILITY OF SUCH DAMAGES. 37 */ 38 39 #include <sys/cdefs.h> 40 #ifdef __FreeBSD__ 41 __FBSDID("$FreeBSD: src/sys/dev/ath/if_ath.c,v 1.88 2005/04/12 17:56:43 sam Exp $"); 42 #endif 43 #ifdef __NetBSD__ 44 __KERNEL_RCSID(0, "$NetBSD: ath.c,v 1.49 2005/06/30 00:52:56 dyoung Exp $"); 45 #endif 46 47 /* 48 * Driver for the Atheros Wireless LAN controller. 49 * 50 * This software is derived from work of Atsushi Onoe; his contribution 51 * is greatly appreciated. 52 */ 53 54 #include "opt_inet.h" 55 56 #ifdef __NetBSD__ 57 #include "bpfilter.h" 58 #endif /* __NetBSD__ */ 59 60 #include <sys/param.h> 61 #include <sys/reboot.h> 62 #include <sys/systm.h> 63 #include <sys/types.h> 64 #include <sys/sysctl.h> 65 #include <sys/mbuf.h> 66 #include <sys/malloc.h> 67 #include <sys/lock.h> 68 #include <sys/kernel.h> 69 #include <sys/socket.h> 70 #include <sys/sockio.h> 71 #include <sys/errno.h> 72 #include <sys/callout.h> 73 #include <machine/bus.h> 74 #include <sys/endian.h> 75 76 #include <machine/bus.h> 77 78 #include <net/if.h> 79 #include <net/if_dl.h> 80 #include <net/if_media.h> 81 #include <net/if_arp.h> 82 #include <net/if_ether.h> 83 #include <net/if_llc.h> 84 85 #include <net80211/ieee80211_netbsd.h> 86 #include <net80211/ieee80211_var.h> 87 88 #if NBPFILTER > 0 89 #include <net/bpf.h> 90 #endif 91 92 #ifdef INET 93 #include <netinet/in.h> 94 #endif 95 96 #include <sys/device.h> 97 #include <dev/ic/ath_netbsd.h> 98 99 #define AR_DEBUG 100 #include <dev/ic/athvar.h> 101 #include <contrib/dev/ic/athhal_desc.h> 102 #include <contrib/dev/ic/athhal_devid.h> /* XXX for softled */ 103 104 /* unaligned little endian access */ 105 #define LE_READ_2(p) \ 106 ((u_int16_t) \ 107 ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8))) 108 #define LE_READ_4(p) \ 109 ((u_int32_t) \ 110 ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8) | \ 111 (((u_int8_t *)(p))[2] << 16) | (((u_int8_t *)(p))[3] << 24))) 112 113 enum { 114 ATH_LED_TX, 115 ATH_LED_RX, 116 ATH_LED_POLL, 117 }; 118 119 static int ath_init(struct ifnet *); 120 static void ath_stop_locked(struct ifnet *, int); 121 static void ath_stop(struct ifnet *, int); 122 static void ath_start(struct ifnet *); 123 static int ath_media_change(struct ifnet *); 124 static void ath_watchdog(struct ifnet *); 125 static int ath_ioctl(struct ifnet *, u_long, caddr_t); 126 static void ath_fatal_proc(void *, int); 127 static void ath_rxorn_proc(void *, int); 128 static void ath_bmiss_proc(void *, int); 129 static void ath_initkeytable(struct ath_softc *); 130 static int ath_key_alloc(struct ieee80211com *, 131 const struct ieee80211_key *); 132 static int ath_key_delete(struct ieee80211com *, 133 const struct ieee80211_key *); 134 static int ath_key_set(struct ieee80211com *, const struct ieee80211_key *, 135 const u_int8_t mac[IEEE80211_ADDR_LEN]); 136 static void ath_key_update_begin(struct ieee80211com *); 137 static void ath_key_update_end(struct ieee80211com *); 138 static void ath_mode_init(struct ath_softc *); 139 static void ath_setslottime(struct ath_softc *); 140 static void ath_updateslot(struct ifnet *); 141 static int ath_beaconq_setup(struct ath_hal *); 142 static int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *); 143 static void ath_beacon_setup(struct ath_softc *, struct ath_buf *); 144 static void ath_beacon_proc(void *, int); 145 static void ath_bstuck_proc(void *, int); 146 static void ath_beacon_free(struct ath_softc *); 147 static void ath_beacon_config(struct ath_softc *); 148 static void ath_descdma_cleanup(struct ath_softc *sc, 149 struct ath_descdma *, ath_bufhead *); 150 static int ath_desc_alloc(struct ath_softc *); 151 static void ath_desc_free(struct ath_softc *); 152 static struct ieee80211_node *ath_node_alloc(struct ieee80211_node_table *); 153 static void ath_node_free(struct ieee80211_node *); 154 static u_int8_t ath_node_getrssi(const struct ieee80211_node *); 155 static int ath_rxbuf_init(struct ath_softc *, struct ath_buf *); 156 static void ath_recv_mgmt(struct ieee80211com *ic, struct mbuf *m, 157 struct ieee80211_node *ni, 158 int subtype, int rssi, u_int32_t rstamp); 159 static void ath_setdefantenna(struct ath_softc *, u_int); 160 static void ath_rx_proc(void *, int); 161 static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); 162 static int ath_tx_setup(struct ath_softc *, int, int); 163 static int ath_wme_update(struct ieee80211com *); 164 static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); 165 static void ath_tx_cleanup(struct ath_softc *); 166 static int ath_tx_start(struct ath_softc *, struct ieee80211_node *, 167 struct ath_buf *, struct mbuf *); 168 static void ath_tx_proc_q0(void *, int); 169 static void ath_tx_proc_q0123(void *, int); 170 static void ath_tx_proc(void *, int); 171 static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 172 static void ath_draintxq(struct ath_softc *); 173 static void ath_stoprecv(struct ath_softc *); 174 static int ath_startrecv(struct ath_softc *); 175 static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); 176 static void ath_next_scan(void *); 177 static void ath_calibrate(void *); 178 static int ath_newstate(struct ieee80211com *, enum ieee80211_state, int); 179 static void ath_newassoc(struct ieee80211com *, 180 struct ieee80211_node *, int); 181 static int ath_getchannels(struct ath_softc *, u_int cc, 182 HAL_BOOL outdoor, HAL_BOOL xchanmode); 183 static void ath_led_event(struct ath_softc *, int); 184 static void ath_update_txpow(struct ath_softc *); 185 186 static int ath_rate_setup(struct ath_softc *, u_int mode); 187 static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 188 189 #ifdef __NetBSD__ 190 int ath_enable(struct ath_softc *); 191 void ath_disable(struct ath_softc *); 192 void ath_power(int, void *); 193 #endif 194 195 static void ath_bpfattach(struct ath_softc *); 196 static void ath_announce(struct ath_softc *); 197 198 int ath_dwelltime = 200; /* 5 channels/second */ 199 int ath_calinterval = 30; /* calibrate every 30 secs */ 200 int ath_outdoor = AH_TRUE; /* outdoor operation */ 201 int ath_xchanmode = AH_TRUE; /* enable extended channels */ 202 int ath_countrycode = CTRY_DEFAULT; /* country code */ 203 int ath_regdomain = 0; /* regulatory domain */ 204 int ath_debug = 0; 205 206 #ifdef AR_DEBUG 207 enum { 208 ATH_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ 209 ATH_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */ 210 ATH_DEBUG_RECV = 0x00000004, /* basic recv operation */ 211 ATH_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */ 212 ATH_DEBUG_RATE = 0x00000010, /* rate control */ 213 ATH_DEBUG_RESET = 0x00000020, /* reset processing */ 214 ATH_DEBUG_MODE = 0x00000040, /* mode init/setup */ 215 ATH_DEBUG_BEACON = 0x00000080, /* beacon handling */ 216 ATH_DEBUG_WATCHDOG = 0x00000100, /* watchdog timeout */ 217 ATH_DEBUG_INTR = 0x00001000, /* ISR */ 218 ATH_DEBUG_TX_PROC = 0x00002000, /* tx ISR proc */ 219 ATH_DEBUG_RX_PROC = 0x00004000, /* rx ISR proc */ 220 ATH_DEBUG_BEACON_PROC = 0x00008000, /* beacon ISR proc */ 221 ATH_DEBUG_CALIBRATE = 0x00010000, /* periodic calibration */ 222 ATH_DEBUG_KEYCACHE = 0x00020000, /* key cache management */ 223 ATH_DEBUG_STATE = 0x00040000, /* 802.11 state transitions */ 224 ATH_DEBUG_NODE = 0x00080000, /* node management */ 225 ATH_DEBUG_LED = 0x00100000, /* led management */ 226 ATH_DEBUG_FATAL = 0x80000000, /* fatal errors */ 227 ATH_DEBUG_ANY = 0xffffffff 228 }; 229 #define IFF_DUMPPKTS(sc, m) \ 230 ((sc->sc_debug & (m)) || \ 231 (sc->sc_if.if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2)) 232 #define DPRINTF(sc, m, fmt, ...) do { \ 233 if (sc->sc_debug & (m)) \ 234 printf(fmt, __VA_ARGS__); \ 235 } while (0) 236 #define KEYPRINTF(sc, ix, hk, mac) do { \ 237 if (sc->sc_debug & ATH_DEBUG_KEYCACHE) \ 238 ath_keyprint(__func__, ix, hk, mac); \ 239 } while (0) 240 static void ath_printrxbuf(struct ath_buf *bf, int); 241 static void ath_printtxbuf(struct ath_buf *bf, int); 242 #else 243 #define IFF_DUMPPKTS(sc, m) \ 244 ((sc->sc_if.if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2)) 245 #define DPRINTF(m, fmt, ...) 246 #define KEYPRINTF(sc, k, ix, mac) 247 #endif 248 249 #ifdef __NetBSD__ 250 int 251 ath_activate(struct device *self, enum devact act) 252 { 253 struct ath_softc *sc = (struct ath_softc *)self; 254 int rv = 0, s; 255 256 s = splnet(); 257 switch (act) { 258 case DVACT_ACTIVATE: 259 rv = EOPNOTSUPP; 260 break; 261 case DVACT_DEACTIVATE: 262 if_deactivate(&sc->sc_if); 263 break; 264 } 265 splx(s); 266 return rv; 267 } 268 269 int 270 ath_enable(struct ath_softc *sc) 271 { 272 if (ATH_IS_ENABLED(sc) == 0) { 273 if (sc->sc_enable != NULL && (*sc->sc_enable)(sc) != 0) { 274 printf("%s: device enable failed\n", 275 sc->sc_dev.dv_xname); 276 return (EIO); 277 } 278 sc->sc_flags |= ATH_ENABLED; 279 } 280 return (0); 281 } 282 283 void 284 ath_disable(struct ath_softc *sc) 285 { 286 if (!ATH_IS_ENABLED(sc)) 287 return; 288 if (sc->sc_disable != NULL) 289 (*sc->sc_disable)(sc); 290 sc->sc_flags &= ~ATH_ENABLED; 291 } 292 #endif /* __NetBSD__ */ 293 294 MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); 295 296 int 297 ath_attach(u_int16_t devid, struct ath_softc *sc) 298 { 299 struct ifnet *ifp = &sc->sc_if; 300 struct ieee80211com *ic = &sc->sc_ic; 301 struct ath_hal *ah; 302 HAL_STATUS status; 303 int error = 0, i; 304 305 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); 306 307 memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 308 309 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, &status); 310 if (ah == NULL) { 311 if_printf(ifp, "unable to attach hardware; HAL status %u\n", 312 status); 313 error = ENXIO; 314 goto bad; 315 } 316 if (ah->ah_abi != HAL_ABI_VERSION) { 317 if_printf(ifp, "HAL ABI mismatch detected " 318 "(HAL:0x%x != driver:0x%x)\n", 319 ah->ah_abi, HAL_ABI_VERSION); 320 error = ENXIO; 321 goto bad; 322 } 323 sc->sc_ah = ah; 324 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 325 326 /* 327 * Check if the MAC has multi-rate retry support. 328 * We do this by trying to setup a fake extended 329 * descriptor. MAC's that don't have support will 330 * return false w/o doing anything. MAC's that do 331 * support it will return true w/o doing anything. 332 */ 333 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); 334 335 /* 336 * Check if the device has hardware counters for PHY 337 * errors. If so we need to enable the MIB interrupt 338 * so we can act on stat triggers. 339 */ 340 if (ath_hal_hwphycounters(ah)) 341 sc->sc_needmib = 1; 342 343 /* 344 * Get the hardware key cache size. 345 */ 346 sc->sc_keymax = ath_hal_keycachesize(ah); 347 if (sc->sc_keymax > sizeof(sc->sc_keymap) * NBBY) { 348 if_printf(ifp, 349 "Warning, using only %zu of %u key cache slots\n", 350 sizeof(sc->sc_keymap) * NBBY, sc->sc_keymax); 351 sc->sc_keymax = sizeof(sc->sc_keymap) * NBBY; 352 } 353 /* 354 * Reset the key cache since some parts do not 355 * reset the contents on initial power up. 356 */ 357 for (i = 0; i < sc->sc_keymax; i++) 358 ath_hal_keyreset(ah, i); 359 /* 360 * Mark key cache slots associated with global keys 361 * as in use. If we knew TKIP was not to be used we 362 * could leave the +32, +64, and +32+64 slots free. 363 * XXX only for splitmic. 364 */ 365 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 366 setbit(sc->sc_keymap, i); 367 setbit(sc->sc_keymap, i+32); 368 setbit(sc->sc_keymap, i+64); 369 setbit(sc->sc_keymap, i+32+64); 370 } 371 372 /* 373 * Collect the channel list using the default country 374 * code and including outdoor channels. The 802.11 layer 375 * is resposible for filtering this list based on settings 376 * like the phy mode. 377 */ 378 error = ath_getchannels(sc, ath_countrycode, 379 ath_outdoor, ath_xchanmode); 380 if (error != 0) 381 goto bad; 382 /* 383 * Setup dynamic sysctl's now that country code and 384 * regdomain are available from the hal. 385 */ 386 ath_sysctlattach(sc); 387 388 /* 389 * Setup rate tables for all potential media types. 390 */ 391 ath_rate_setup(sc, IEEE80211_MODE_11A); 392 ath_rate_setup(sc, IEEE80211_MODE_11B); 393 ath_rate_setup(sc, IEEE80211_MODE_11G); 394 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); 395 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); 396 /* NB: setup here so ath_rate_update is happy */ 397 ath_setcurmode(sc, IEEE80211_MODE_11A); 398 399 /* 400 * Allocate tx+rx descriptors and populate the lists. 401 */ 402 error = ath_desc_alloc(sc); 403 if (error != 0) { 404 if_printf(ifp, "failed to allocate descriptors: %d\n", error); 405 goto bad; 406 } 407 ATH_CALLOUT_INIT(&sc->sc_scan_ch, debug_mpsafenet ? CALLOUT_MPSAFE : 0); 408 ATH_CALLOUT_INIT(&sc->sc_cal_ch, CALLOUT_MPSAFE); 409 410 ATH_TXBUF_LOCK_INIT(sc); 411 412 TASK_INIT(&sc->sc_rxtask, 0, ath_rx_proc, sc); 413 TASK_INIT(&sc->sc_rxorntask, 0, ath_rxorn_proc, sc); 414 TASK_INIT(&sc->sc_fataltask, 0, ath_fatal_proc, sc); 415 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); 416 TASK_INIT(&sc->sc_bstucktask, 0, ath_bstuck_proc, sc); 417 418 /* 419 * Allocate hardware transmit queues: one queue for 420 * beacon frames and one data queue for each QoS 421 * priority. Note that the hal handles reseting 422 * these queues at the needed time. 423 * 424 * XXX PS-Poll 425 */ 426 sc->sc_bhalq = ath_beaconq_setup(ah); 427 if (sc->sc_bhalq == (u_int) -1) { 428 if_printf(ifp, "unable to setup a beacon xmit queue!\n"); 429 error = EIO; 430 goto bad2; 431 } 432 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); 433 if (sc->sc_cabq == NULL) { 434 if_printf(ifp, "unable to setup CAB xmit queue!\n"); 435 error = EIO; 436 goto bad2; 437 } 438 /* NB: insure BK queue is the lowest priority h/w queue */ 439 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { 440 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n", 441 ieee80211_wme_acnames[WME_AC_BK]); 442 error = EIO; 443 goto bad2; 444 } 445 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || 446 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || 447 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { 448 /* 449 * Not enough hardware tx queues to properly do WME; 450 * just punt and assign them all to the same h/w queue. 451 * We could do a better job of this if, for example, 452 * we allocate queues when we switch from station to 453 * AP mode. 454 */ 455 if (sc->sc_ac2q[WME_AC_VI] != NULL) 456 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); 457 if (sc->sc_ac2q[WME_AC_BE] != NULL) 458 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); 459 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; 460 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; 461 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; 462 } 463 464 /* 465 * Special case certain configurations. Note the 466 * CAB queue is handled by these specially so don't 467 * include them when checking the txq setup mask. 468 */ 469 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) { 470 case 0x01: 471 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); 472 break; 473 case 0x0f: 474 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); 475 break; 476 default: 477 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); 478 break; 479 } 480 481 /* 482 * Setup rate control. Some rate control modules 483 * call back to change the anntena state so expose 484 * the necessary entry points. 485 * XXX maybe belongs in struct ath_ratectrl? 486 */ 487 sc->sc_setdefantenna = ath_setdefantenna; 488 sc->sc_rc = ath_rate_attach(sc); 489 if (sc->sc_rc == NULL) { 490 error = EIO; 491 goto bad2; 492 } 493 494 sc->sc_blinking = 0; 495 sc->sc_ledstate = 1; 496 sc->sc_ledon = 0; /* low true */ 497 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ 498 ATH_CALLOUT_INIT(&sc->sc_ledtimer, CALLOUT_MPSAFE); 499 /* 500 * Auto-enable soft led processing for IBM cards and for 501 * 5211 minipci cards. Users can also manually enable/disable 502 * support with a sysctl. 503 */ 504 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); 505 if (sc->sc_softled) { 506 ath_hal_gpioCfgOutput(ah, sc->sc_ledpin); 507 ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon); 508 } 509 510 ifp->if_softc = sc; 511 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 512 ifp->if_start = ath_start; 513 ifp->if_watchdog = ath_watchdog; 514 ifp->if_ioctl = ath_ioctl; 515 ifp->if_init = ath_init; 516 IFQ_SET_READY(&ifp->if_snd); 517 518 ic->ic_ifp = ifp; 519 ic->ic_reset = ath_reset; 520 ic->ic_newassoc = ath_newassoc; 521 ic->ic_updateslot = ath_updateslot; 522 ic->ic_wme.wme_update = ath_wme_update; 523 /* XXX not right but it's not used anywhere important */ 524 ic->ic_phytype = IEEE80211_T_OFDM; 525 ic->ic_opmode = IEEE80211_M_STA; 526 ic->ic_caps = 527 IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 528 | IEEE80211_C_HOSTAP /* hostap mode */ 529 | IEEE80211_C_MONITOR /* monitor mode */ 530 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 531 | IEEE80211_C_SHSLOT /* short slot time supported */ 532 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ 533 ; 534 /* 535 * Query the hal to figure out h/w crypto support. 536 */ 537 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) 538 ic->ic_caps |= IEEE80211_C_WEP; 539 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) 540 ic->ic_caps |= IEEE80211_C_AES; 541 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) 542 ic->ic_caps |= IEEE80211_C_AES_CCM; 543 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) 544 ic->ic_caps |= IEEE80211_C_CKIP; 545 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { 546 ic->ic_caps |= IEEE80211_C_TKIP; 547 /* 548 * Check if h/w does the MIC and/or whether the 549 * separate key cache entries are required to 550 * handle both tx+rx MIC keys. 551 */ 552 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) 553 ic->ic_caps |= IEEE80211_C_TKIPMIC; 554 if (ath_hal_tkipsplit(ah)) 555 sc->sc_splitmic = 1; 556 } 557 /* 558 * TPC support can be done either with a global cap or 559 * per-packet support. The latter is not available on 560 * all parts. We're a bit pedantic here as all parts 561 * support a global cap. 562 */ 563 sc->sc_hastpc = ath_hal_hastpc(ah); 564 if (sc->sc_hastpc || ath_hal_hastxpowlimit(ah)) 565 ic->ic_caps |= IEEE80211_C_TXPMGT; 566 567 /* 568 * Mark WME capability only if we have sufficient 569 * hardware queues to do proper priority scheduling. 570 */ 571 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) 572 ic->ic_caps |= IEEE80211_C_WME; 573 /* 574 * Check for frame bursting capability. 575 */ 576 if (ath_hal_hasbursting(ah)) 577 ic->ic_caps |= IEEE80211_C_BURST; 578 579 /* 580 * Indicate we need the 802.11 header padded to a 581 * 32-bit boundary for 4-address and QoS frames. 582 */ 583 ic->ic_flags |= IEEE80211_F_DATAPAD; 584 585 /* 586 * Query the hal about antenna support. 587 */ 588 if (ath_hal_hasdiversity(ah)) { 589 sc->sc_hasdiversity = 1; 590 sc->sc_diversity = ath_hal_getdiversity(ah); 591 } 592 sc->sc_defant = ath_hal_getdefantenna(ah); 593 594 /* 595 * Not all chips have the VEOL support we want to 596 * use with IBSS beacons; check here for it. 597 */ 598 sc->sc_hasveol = ath_hal_hasveol(ah); 599 600 /* get mac address from hardware */ 601 ath_hal_getmac(ah, ic->ic_myaddr); 602 603 if_attach(ifp); 604 /* call MI attach routine. */ 605 ieee80211_ifattach(ic); 606 /* override default methods */ 607 ic->ic_node_alloc = ath_node_alloc; 608 sc->sc_node_free = ic->ic_node_free; 609 ic->ic_node_free = ath_node_free; 610 ic->ic_node_getrssi = ath_node_getrssi; 611 sc->sc_recv_mgmt = ic->ic_recv_mgmt; 612 ic->ic_recv_mgmt = ath_recv_mgmt; 613 sc->sc_newstate = ic->ic_newstate; 614 ic->ic_newstate = ath_newstate; 615 ic->ic_crypto.cs_key_alloc = ath_key_alloc; 616 ic->ic_crypto.cs_key_delete = ath_key_delete; 617 ic->ic_crypto.cs_key_set = ath_key_set; 618 ic->ic_crypto.cs_key_update_begin = ath_key_update_begin; 619 ic->ic_crypto.cs_key_update_end = ath_key_update_end; 620 /* complete initialization */ 621 ieee80211_media_init(ic, ath_media_change, ieee80211_media_status); 622 623 ath_bpfattach(sc); 624 625 #ifdef __NetBSD__ 626 sc->sc_flags |= ATH_ATTACHED; 627 /* 628 * Make sure the interface is shutdown during reboot. 629 */ 630 sc->sc_sdhook = shutdownhook_establish(ath_shutdown, sc); 631 if (sc->sc_sdhook == NULL) 632 printf("%s: WARNING: unable to establish shutdown hook\n", 633 sc->sc_dev.dv_xname); 634 sc->sc_powerhook = powerhook_establish(ath_power, sc); 635 if (sc->sc_powerhook == NULL) 636 printf("%s: WARNING: unable to establish power hook\n", 637 sc->sc_dev.dv_xname); 638 #endif 639 if (boothowto & AB_VERBOSE) 640 ieee80211_announce(ic); 641 ath_announce(sc); 642 return 0; 643 bad2: 644 ath_tx_cleanup(sc); 645 ath_desc_free(sc); 646 bad: 647 if (ah) 648 ath_hal_detach(ah); 649 sc->sc_invalid = 1; 650 return error; 651 } 652 653 int 654 ath_detach(struct ath_softc *sc) 655 { 656 struct ifnet *ifp = &sc->sc_if; 657 int s; 658 659 if ((sc->sc_flags & ATH_ATTACHED) == 0) 660 return (0); 661 662 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 663 __func__, ifp->if_flags); 664 665 s = splnet(); 666 ath_stop(ifp, 1); 667 #if NBPFILTER > 0 668 bpfdetach(ifp); 669 #endif 670 /* 671 * NB: the order of these is important: 672 * o call the 802.11 layer before detaching the hal to 673 * insure callbacks into the driver to delete global 674 * key cache entries can be handled 675 * o reclaim the tx queue data structures after calling 676 * the 802.11 layer as we'll get called back to reclaim 677 * node state and potentially want to use them 678 * o to cleanup the tx queues the hal is called, so detach 679 * it last 680 * Other than that, it's straightforward... 681 */ 682 ieee80211_ifdetach(&sc->sc_ic); 683 ath_rate_detach(sc->sc_rc); 684 ath_desc_free(sc); 685 ath_tx_cleanup(sc); 686 ath_hal_detach(sc->sc_ah); 687 if_detach(ifp); 688 splx(s); 689 powerhook_disestablish(sc->sc_powerhook); 690 shutdownhook_disestablish(sc->sc_sdhook); 691 692 return 0; 693 } 694 695 #ifdef __NetBSD__ 696 void 697 ath_power(int why, void *arg) 698 { 699 struct ath_softc *sc = arg; 700 int s; 701 702 DPRINTF(sc, ATH_DEBUG_ANY, "ath_power(%d)\n", why); 703 704 s = splnet(); 705 switch (why) { 706 case PWR_SUSPEND: 707 case PWR_STANDBY: 708 ath_suspend(sc, why); 709 break; 710 case PWR_RESUME: 711 ath_resume(sc, why); 712 break; 713 case PWR_SOFTSUSPEND: 714 case PWR_SOFTSTANDBY: 715 case PWR_SOFTRESUME: 716 break; 717 } 718 splx(s); 719 } 720 #endif 721 722 void 723 ath_suspend(struct ath_softc *sc, int why) 724 { 725 struct ifnet *ifp = &sc->sc_if; 726 727 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 728 __func__, ifp->if_flags); 729 730 ath_stop(ifp, 1); 731 if (sc->sc_power != NULL) 732 (*sc->sc_power)(sc, why); 733 } 734 735 void 736 ath_resume(struct ath_softc *sc, int why) 737 { 738 struct ifnet *ifp = &sc->sc_if; 739 740 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 741 __func__, ifp->if_flags); 742 743 if (ifp->if_flags & IFF_UP) { 744 ath_init(ifp); 745 #if 0 746 (void)ath_intr(sc); 747 #endif 748 if (sc->sc_power != NULL) 749 (*sc->sc_power)(sc, why); 750 if (ifp->if_flags & IFF_RUNNING) 751 ath_start(ifp); 752 } 753 } 754 755 void 756 ath_shutdown(void *arg) 757 { 758 struct ath_softc *sc = arg; 759 760 ath_stop(&sc->sc_if, 1); 761 } 762 763 /* 764 * Interrupt handler. Most of the actual processing is deferred. 765 */ 766 int 767 ath_intr(void *arg) 768 { 769 struct ath_softc *sc = arg; 770 struct ifnet *ifp = &sc->sc_if; 771 struct ath_hal *ah = sc->sc_ah; 772 HAL_INT status; 773 774 if (sc->sc_invalid) { 775 /* 776 * The hardware is not ready/present, don't touch anything. 777 * Note this can happen early on if the IRQ is shared. 778 */ 779 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); 780 return 0; 781 } 782 if (!ath_hal_intrpend(ah)) /* shared irq, not for us */ 783 return 0; 784 if ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) != (IFF_RUNNING|IFF_UP)) { 785 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 786 __func__, ifp->if_flags); 787 ath_hal_getisr(ah, &status); /* clear ISR */ 788 ath_hal_intrset(ah, 0); /* disable further intr's */ 789 return 1; /* XXX */ 790 } 791 /* 792 * Figure out the reason(s) for the interrupt. Note 793 * that the hal returns a pseudo-ISR that may include 794 * bits we haven't explicitly enabled so we mask the 795 * value to insure we only process bits we requested. 796 */ 797 ath_hal_getisr(ah, &status); /* NB: clears ISR too */ 798 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); 799 status &= sc->sc_imask; /* discard unasked for bits */ 800 if (status & HAL_INT_FATAL) { 801 /* 802 * Fatal errors are unrecoverable. Typically 803 * these are caused by DMA errors. Unfortunately 804 * the exact reason is not (presently) returned 805 * by the hal. 806 */ 807 sc->sc_stats.ast_hardware++; 808 ath_hal_intrset(ah, 0); /* disable intr's until reset */ 809 TASK_RUN_OR_ENQUEUE(&sc->sc_fataltask); 810 } else if (status & HAL_INT_RXORN) { 811 sc->sc_stats.ast_rxorn++; 812 ath_hal_intrset(ah, 0); /* disable intr's until reset */ 813 TASK_RUN_OR_ENQUEUE(&sc->sc_rxorntask); 814 } else { 815 if (status & HAL_INT_SWBA) { 816 /* 817 * Software beacon alert--time to send a beacon. 818 * Handle beacon transmission directly; deferring 819 * this is too slow to meet timing constraints 820 * under load. 821 */ 822 ath_beacon_proc(sc, 0); 823 } 824 if (status & HAL_INT_RXEOL) { 825 /* 826 * NB: the hardware should re-read the link when 827 * RXE bit is written, but it doesn't work at 828 * least on older hardware revs. 829 */ 830 sc->sc_stats.ast_rxeol++; 831 sc->sc_rxlink = NULL; 832 } 833 if (status & HAL_INT_TXURN) { 834 sc->sc_stats.ast_txurn++; 835 /* bump tx trigger level */ 836 ath_hal_updatetxtriglevel(ah, AH_TRUE); 837 } 838 if (status & HAL_INT_RX) 839 TASK_RUN_OR_ENQUEUE(&sc->sc_rxtask); 840 if (status & HAL_INT_TX) 841 TASK_RUN_OR_ENQUEUE(&sc->sc_txtask); 842 if (status & HAL_INT_BMISS) { 843 sc->sc_stats.ast_bmiss++; 844 TASK_RUN_OR_ENQUEUE(&sc->sc_bmisstask); 845 } 846 if (status & HAL_INT_MIB) { 847 sc->sc_stats.ast_mib++; 848 /* 849 * Disable interrupts until we service the MIB 850 * interrupt; otherwise it will continue to fire. 851 */ 852 ath_hal_intrset(ah, 0); 853 /* 854 * Let the hal handle the event. We assume it will 855 * clear whatever condition caused the interrupt. 856 */ 857 ath_hal_mibevent(ah, 858 &ATH_NODE(sc->sc_ic.ic_bss)->an_halstats); 859 ath_hal_intrset(ah, sc->sc_imask); 860 } 861 } 862 return 1; 863 } 864 865 static void 866 ath_fatal_proc(void *arg, int pending) 867 { 868 struct ath_softc *sc = arg; 869 struct ifnet *ifp = &sc->sc_if; 870 871 if_printf(ifp, "hardware error; resetting\n"); 872 ath_reset(ifp); 873 } 874 875 static void 876 ath_rxorn_proc(void *arg, int pending) 877 { 878 struct ath_softc *sc = arg; 879 struct ifnet *ifp = &sc->sc_if; 880 881 if_printf(ifp, "rx FIFO overrun; resetting\n"); 882 ath_reset(ifp); 883 } 884 885 static void 886 ath_bmiss_proc(void *arg, int pending) 887 { 888 struct ath_softc *sc = arg; 889 struct ieee80211com *ic = &sc->sc_ic; 890 891 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); 892 KASSERT(ic->ic_opmode == IEEE80211_M_STA, 893 ("unexpect operating mode %u", ic->ic_opmode)); 894 if (ic->ic_state == IEEE80211_S_RUN) { 895 /* 896 * Rather than go directly to scan state, try to 897 * reassociate first. If that fails then the state 898 * machine will drop us into scanning after timing 899 * out waiting for a probe response. 900 */ 901 NET_LOCK_GIANT(); 902 ieee80211_new_state(ic, IEEE80211_S_ASSOC, -1); 903 NET_UNLOCK_GIANT(); 904 } 905 } 906 907 static u_int 908 ath_chan2flags(struct ieee80211com *ic, struct ieee80211_channel *chan) 909 { 910 #define N(a) (sizeof(a) / sizeof(a[0])) 911 static const u_int modeflags[] = { 912 0, /* IEEE80211_MODE_AUTO */ 913 CHANNEL_A, /* IEEE80211_MODE_11A */ 914 CHANNEL_B, /* IEEE80211_MODE_11B */ 915 CHANNEL_PUREG, /* IEEE80211_MODE_11G */ 916 0, /* IEEE80211_MODE_FH */ 917 CHANNEL_T, /* IEEE80211_MODE_TURBO_A */ 918 CHANNEL_108G /* IEEE80211_MODE_TURBO_G */ 919 }; 920 enum ieee80211_phymode mode = ieee80211_chan2mode(ic, chan); 921 922 KASSERT(mode < N(modeflags), ("unexpected phy mode %u", mode)); 923 KASSERT(modeflags[mode] != 0, ("mode %u undefined", mode)); 924 return modeflags[mode]; 925 #undef N 926 } 927 928 static int 929 ath_init(struct ifnet *ifp) 930 { 931 struct ath_softc *sc = (struct ath_softc *)ifp->if_softc; 932 struct ieee80211com *ic = &sc->sc_ic; 933 struct ieee80211_node *ni; 934 struct ath_hal *ah = sc->sc_ah; 935 HAL_STATUS status; 936 int error = 0; 937 938 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 939 __func__, ifp->if_flags); 940 941 ATH_LOCK(sc); 942 943 if ((error = ath_enable(sc)) != 0) 944 return error; 945 946 /* 947 * Stop anything previously setup. This is safe 948 * whether this is the first time through or not. 949 */ 950 ath_stop_locked(ifp, 0); 951 952 /* 953 * The basic interface to setting the hardware in a good 954 * state is ``reset''. On return the hardware is known to 955 * be powered up and with interrupts disabled. This must 956 * be followed by initialization of the appropriate bits 957 * and then setup of the interrupt mask. 958 */ 959 sc->sc_curchan.channel = ic->ic_ibss_chan->ic_freq; 960 sc->sc_curchan.channelFlags = ath_chan2flags(ic, ic->ic_ibss_chan); 961 if (!ath_hal_reset(ah, ic->ic_opmode, &sc->sc_curchan, AH_FALSE, &status)) { 962 if_printf(ifp, "unable to reset hardware; hal status %u\n", 963 status); 964 error = EIO; 965 goto done; 966 } 967 968 /* 969 * This is needed only to setup initial state 970 * but it's best done after a reset. 971 */ 972 ath_update_txpow(sc); 973 974 /* 975 * Setup the hardware after reset: the key cache 976 * is filled as needed and the receive engine is 977 * set going. Frame transmit is handled entirely 978 * in the frame output path; there's nothing to do 979 * here except setup the interrupt mask. 980 */ 981 ath_initkeytable(sc); /* XXX still needed? */ 982 if ((error = ath_startrecv(sc)) != 0) { 983 if_printf(ifp, "unable to start recv logic\n"); 984 goto done; 985 } 986 987 /* 988 * Enable interrupts. 989 */ 990 sc->sc_imask = HAL_INT_RX | HAL_INT_TX 991 | HAL_INT_RXEOL | HAL_INT_RXORN 992 | HAL_INT_FATAL | HAL_INT_GLOBAL; 993 /* 994 * Enable MIB interrupts when there are hardware phy counters. 995 * Note we only do this (at the moment) for station mode. 996 */ 997 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) 998 sc->sc_imask |= HAL_INT_MIB; 999 ath_hal_intrset(ah, sc->sc_imask); 1000 1001 ifp->if_flags |= IFF_RUNNING; 1002 ic->ic_state = IEEE80211_S_INIT; 1003 1004 /* 1005 * The hardware should be ready to go now so it's safe 1006 * to kick the 802.11 state machine as it's likely to 1007 * immediately call back to us to send mgmt frames. 1008 */ 1009 ni = ic->ic_bss; 1010 ni->ni_chan = ic->ic_ibss_chan; 1011 ath_chan_change(sc, ni->ni_chan); 1012 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 1013 if (ic->ic_roaming != IEEE80211_ROAMING_MANUAL) 1014 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1); 1015 } else 1016 ieee80211_new_state(ic, IEEE80211_S_RUN, -1); 1017 done: 1018 ATH_UNLOCK(sc); 1019 return error; 1020 } 1021 1022 static void 1023 ath_stop_locked(struct ifnet *ifp, int disable) 1024 { 1025 struct ath_softc *sc = ifp->if_softc; 1026 struct ieee80211com *ic = &sc->sc_ic; 1027 struct ath_hal *ah = sc->sc_ah; 1028 1029 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n", 1030 __func__, sc->sc_invalid, ifp->if_flags); 1031 1032 ATH_LOCK_ASSERT(sc); 1033 if (ifp->if_flags & IFF_RUNNING) { 1034 /* 1035 * Shutdown the hardware and driver: 1036 * reset 802.11 state machine 1037 * turn off timers 1038 * disable interrupts 1039 * turn off the radio 1040 * clear transmit machinery 1041 * clear receive machinery 1042 * drain and release tx queues 1043 * reclaim beacon resources 1044 * power down hardware 1045 * 1046 * Note that some of this work is not possible if the 1047 * hardware is gone (invalid). 1048 */ 1049 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 1050 ifp->if_flags &= ~IFF_RUNNING; 1051 ifp->if_timer = 0; 1052 if (!sc->sc_invalid) { 1053 if (sc->sc_softled) { 1054 callout_stop(&sc->sc_ledtimer); 1055 ath_hal_gpioset(ah, sc->sc_ledpin, 1056 !sc->sc_ledon); 1057 sc->sc_blinking = 0; 1058 } 1059 ath_hal_intrset(ah, 0); 1060 } 1061 ath_draintxq(sc); 1062 if (!sc->sc_invalid) { 1063 ath_stoprecv(sc); 1064 ath_hal_phydisable(ah); 1065 } else 1066 sc->sc_rxlink = NULL; 1067 IF_PURGE(&ifp->if_snd); 1068 ath_beacon_free(sc); 1069 if (disable) 1070 ath_disable(sc); 1071 } 1072 } 1073 1074 static void 1075 ath_stop(struct ifnet *ifp, int disable) 1076 { 1077 struct ath_softc *sc = ifp->if_softc; 1078 1079 ATH_LOCK(sc); 1080 ath_stop_locked(ifp, disable); 1081 if (!sc->sc_invalid) { 1082 /* 1083 * Set the chip in full sleep mode. Note that we are 1084 * careful to do this only when bringing the interface 1085 * completely to a stop. When the chip is in this state 1086 * it must be carefully woken up or references to 1087 * registers in the PCI clock domain may freeze the bus 1088 * (and system). This varies by chip and is mostly an 1089 * issue with newer parts that go to sleep more quickly. 1090 */ 1091 ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP, 0); 1092 } 1093 ATH_UNLOCK(sc); 1094 } 1095 1096 /* 1097 * Reset the hardware w/o losing operational state. This is 1098 * basically a more efficient way of doing ath_stop, ath_init, 1099 * followed by state transitions to the current 802.11 1100 * operational state. Used to recover from various errors and 1101 * to reset or reload hardware state. 1102 */ 1103 int 1104 ath_reset(struct ifnet *ifp) 1105 { 1106 struct ath_softc *sc = ifp->if_softc; 1107 struct ieee80211com *ic = &sc->sc_ic; 1108 struct ath_hal *ah = sc->sc_ah; 1109 struct ieee80211_channel *c; 1110 HAL_STATUS status; 1111 1112 /* 1113 * Convert to a HAL channel description with the flags 1114 * constrained to reflect the current operating mode. 1115 */ 1116 c = ic->ic_ibss_chan; 1117 sc->sc_curchan.channel = c->ic_freq; 1118 sc->sc_curchan.channelFlags = ath_chan2flags(ic, c); 1119 1120 ath_hal_intrset(ah, 0); /* disable interrupts */ 1121 ath_draintxq(sc); /* stop xmit side */ 1122 ath_stoprecv(sc); /* stop recv side */ 1123 /* NB: indicate channel change so we do a full reset */ 1124 if (!ath_hal_reset(ah, ic->ic_opmode, &sc->sc_curchan, AH_TRUE, &status)) 1125 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n", 1126 __func__, status); 1127 ath_update_txpow(sc); /* update tx power state */ 1128 if (ath_startrecv(sc) != 0) /* restart recv */ 1129 if_printf(ifp, "%s: unable to start recv logic\n", __func__); 1130 /* 1131 * We may be doing a reset in response to an ioctl 1132 * that changes the channel so update any state that 1133 * might change as a result. 1134 */ 1135 ath_chan_change(sc, c); 1136 if (ic->ic_state == IEEE80211_S_RUN) 1137 ath_beacon_config(sc); /* restart beacons */ 1138 ath_hal_intrset(ah, sc->sc_imask); 1139 1140 ath_start(ifp); /* restart xmit */ 1141 return 0; 1142 } 1143 1144 static void 1145 ath_start(struct ifnet *ifp) 1146 { 1147 struct ath_softc *sc = ifp->if_softc; 1148 struct ath_hal *ah = sc->sc_ah; 1149 struct ieee80211com *ic = &sc->sc_ic; 1150 struct ieee80211_node *ni; 1151 struct ath_buf *bf; 1152 struct mbuf *m; 1153 struct ieee80211_frame *wh; 1154 struct ether_header *eh; 1155 1156 if ((ifp->if_flags & IFF_RUNNING) == 0 || sc->sc_invalid) 1157 return; 1158 for (;;) { 1159 /* 1160 * Grab a TX buffer and associated resources. 1161 */ 1162 ATH_TXBUF_LOCK(sc); 1163 bf = STAILQ_FIRST(&sc->sc_txbuf); 1164 if (bf != NULL) 1165 STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list); 1166 ATH_TXBUF_UNLOCK(sc); 1167 if (bf == NULL) { 1168 DPRINTF(sc, ATH_DEBUG_ANY, "%s: out of xmit buffers\n", 1169 __func__); 1170 sc->sc_stats.ast_tx_qstop++; 1171 ifp->if_flags |= IFF_OACTIVE; 1172 break; 1173 } 1174 /* 1175 * Poll the management queue for frames; they 1176 * have priority over normal data frames. 1177 */ 1178 IF_DEQUEUE(&ic->ic_mgtq, m); 1179 if (m == NULL) { 1180 /* 1181 * No data frames go out unless we're associated. 1182 */ 1183 if (ic->ic_state != IEEE80211_S_RUN) { 1184 DPRINTF(sc, ATH_DEBUG_ANY, 1185 "%s: ignore data packet, state %u\n", 1186 __func__, ic->ic_state); 1187 sc->sc_stats.ast_tx_discard++; 1188 ATH_TXBUF_LOCK(sc); 1189 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 1190 ATH_TXBUF_UNLOCK(sc); 1191 break; 1192 } 1193 IFQ_DEQUEUE(&ifp->if_snd, m); /* XXX: LOCK */ 1194 if (m == NULL) { 1195 ATH_TXBUF_LOCK(sc); 1196 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 1197 ATH_TXBUF_UNLOCK(sc); 1198 break; 1199 } 1200 /* 1201 * Find the node for the destination so we can do 1202 * things like power save and fast frames aggregation. 1203 */ 1204 if (m->m_len < sizeof(struct ether_header) && 1205 (m = m_pullup(m, sizeof(struct ether_header))) == NULL) { 1206 ic->ic_stats.is_tx_nobuf++; /* XXX */ 1207 ni = NULL; 1208 goto bad; 1209 } 1210 eh = mtod(m, struct ether_header *); 1211 ni = ieee80211_find_txnode(ic, eh->ether_dhost); 1212 if (ni == NULL) { 1213 /* NB: ieee80211_find_txnode does stat+msg */ 1214 m_freem(m); 1215 goto bad; 1216 } 1217 if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) && 1218 (m->m_flags & M_PWR_SAV) == 0) { 1219 /* 1220 * Station in power save mode; pass the frame 1221 * to the 802.11 layer and continue. We'll get 1222 * the frame back when the time is right. 1223 */ 1224 ieee80211_pwrsave(ic, ni, m); 1225 goto reclaim; 1226 } 1227 /* calculate priority so we can find the tx queue */ 1228 if (ieee80211_classify(ic, m, ni)) { 1229 DPRINTF(sc, ATH_DEBUG_XMIT, 1230 "%s: discard, classification failure\n", 1231 __func__); 1232 m_freem(m); 1233 goto bad; 1234 } 1235 ifp->if_opackets++; 1236 1237 #if NBPFILTER > 0 1238 if (ifp->if_bpf) 1239 bpf_mtap(ifp->if_bpf, m); 1240 #endif 1241 /* 1242 * Encapsulate the packet in prep for transmission. 1243 */ 1244 m = ieee80211_encap(ic, m, ni); 1245 if (m == NULL) { 1246 DPRINTF(sc, ATH_DEBUG_ANY, 1247 "%s: encapsulation failure\n", 1248 __func__); 1249 sc->sc_stats.ast_tx_encap++; 1250 goto bad; 1251 } 1252 } else { 1253 /* 1254 * Hack! The referenced node pointer is in the 1255 * rcvif field of the packet header. This is 1256 * placed there by ieee80211_mgmt_output because 1257 * we need to hold the reference with the frame 1258 * and there's no other way (other than packet 1259 * tags which we consider too expensive to use) 1260 * to pass it along. 1261 */ 1262 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 1263 m->m_pkthdr.rcvif = NULL; 1264 1265 wh = mtod(m, struct ieee80211_frame *); 1266 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == 1267 IEEE80211_FC0_SUBTYPE_PROBE_RESP) { 1268 /* fill time stamp */ 1269 u_int64_t tsf; 1270 u_int32_t *tstamp; 1271 1272 tsf = ath_hal_gettsf64(ah); 1273 /* XXX: adjust 100us delay to xmit */ 1274 tsf += 100; 1275 tstamp = (u_int32_t *)&wh[1]; 1276 tstamp[0] = htole32(tsf & 0xffffffff); 1277 tstamp[1] = htole32(tsf >> 32); 1278 } 1279 sc->sc_stats.ast_tx_mgmt++; 1280 } 1281 1282 if (ath_tx_start(sc, ni, bf, m)) { 1283 bad: 1284 ifp->if_oerrors++; 1285 reclaim: 1286 ATH_TXBUF_LOCK(sc); 1287 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 1288 ATH_TXBUF_UNLOCK(sc); 1289 if (ni != NULL) 1290 ieee80211_free_node(ni); 1291 continue; 1292 } 1293 1294 sc->sc_tx_timer = 5; 1295 ifp->if_timer = 1; 1296 } 1297 } 1298 1299 static int 1300 ath_media_change(struct ifnet *ifp) 1301 { 1302 #define IS_UP(ifp) \ 1303 ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) == (IFF_RUNNING|IFF_UP)) 1304 int error; 1305 1306 error = ieee80211_media_change(ifp); 1307 if (error == ENETRESET) { 1308 if (IS_UP(ifp)) 1309 ath_init(ifp); /* XXX lose error */ 1310 error = 0; 1311 } 1312 return error; 1313 #undef IS_UP 1314 } 1315 1316 #ifdef AR_DEBUG 1317 static void 1318 ath_keyprint(const char *tag, u_int ix, 1319 const HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN]) 1320 { 1321 static const char *ciphers[] = { 1322 "WEP", 1323 "AES-OCB", 1324 "AES-CCM", 1325 "CKIP", 1326 "TKIP", 1327 "CLR", 1328 }; 1329 int i, n; 1330 1331 printf("%s: [%02u] %-7s ", tag, ix, ciphers[hk->kv_type]); 1332 for (i = 0, n = hk->kv_len; i < n; i++) 1333 printf("%02x", hk->kv_val[i]); 1334 printf(" mac %s", ether_sprintf(mac)); 1335 if (hk->kv_type == HAL_CIPHER_TKIP) { 1336 printf(" mic "); 1337 for (i = 0; i < sizeof(hk->kv_mic); i++) 1338 printf("%02x", hk->kv_mic[i]); 1339 } 1340 printf("\n"); 1341 } 1342 #endif 1343 1344 /* 1345 * Set a TKIP key into the hardware. This handles the 1346 * potential distribution of key state to multiple key 1347 * cache slots for TKIP. 1348 */ 1349 static int 1350 ath_keyset_tkip(struct ath_softc *sc, const struct ieee80211_key *k, 1351 HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN]) 1352 { 1353 #define IEEE80211_KEY_XR (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV) 1354 static const u_int8_t zerobssid[IEEE80211_ADDR_LEN]; 1355 struct ath_hal *ah = sc->sc_ah; 1356 1357 KASSERT(k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP, 1358 ("got a non-TKIP key, cipher %u", k->wk_cipher->ic_cipher)); 1359 KASSERT(sc->sc_splitmic, ("key cache !split")); 1360 if ((k->wk_flags & IEEE80211_KEY_XR) == IEEE80211_KEY_XR) { 1361 /* 1362 * TX key goes at first index, RX key at +32. 1363 * The hal handles the MIC keys at index+64. 1364 */ 1365 memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_mic)); 1366 KEYPRINTF(sc, k->wk_keyix, hk, zerobssid); 1367 if (!ath_hal_keyset(ah, k->wk_keyix, hk, zerobssid)) 1368 return 0; 1369 1370 memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic)); 1371 KEYPRINTF(sc, k->wk_keyix+32, hk, mac); 1372 /* XXX delete tx key on failure? */ 1373 return ath_hal_keyset(ah, k->wk_keyix+32, hk, mac); 1374 } else if (k->wk_flags & IEEE80211_KEY_XR) { 1375 /* 1376 * TX/RX key goes at first index. 1377 * The hal handles the MIC keys are index+64. 1378 */ 1379 KASSERT(k->wk_keyix < IEEE80211_WEP_NKID, 1380 ("group key at index %u", k->wk_keyix)); 1381 memcpy(hk->kv_mic, k->wk_flags & IEEE80211_KEY_XMIT ? 1382 k->wk_txmic : k->wk_rxmic, sizeof(hk->kv_mic)); 1383 KEYPRINTF(sc, k->wk_keyix, hk, zerobssid); 1384 return ath_hal_keyset(ah, k->wk_keyix, hk, zerobssid); 1385 } 1386 /* XXX key w/o xmit/recv; need this for compression? */ 1387 return 0; 1388 #undef IEEE80211_KEY_XR 1389 } 1390 1391 /* 1392 * Set a net80211 key into the hardware. This handles the 1393 * potential distribution of key state to multiple key 1394 * cache slots for TKIP with hardware MIC support. 1395 */ 1396 static int 1397 ath_keyset(struct ath_softc *sc, const struct ieee80211_key *k, 1398 const u_int8_t mac[IEEE80211_ADDR_LEN]) 1399 { 1400 #define N(a) (sizeof(a)/sizeof(a[0])) 1401 static const u_int8_t ciphermap[] = { 1402 HAL_CIPHER_WEP, /* IEEE80211_CIPHER_WEP */ 1403 HAL_CIPHER_TKIP, /* IEEE80211_CIPHER_TKIP */ 1404 HAL_CIPHER_AES_OCB, /* IEEE80211_CIPHER_AES_OCB */ 1405 HAL_CIPHER_AES_CCM, /* IEEE80211_CIPHER_AES_CCM */ 1406 (u_int8_t) -1, /* 4 is not allocated */ 1407 HAL_CIPHER_CKIP, /* IEEE80211_CIPHER_CKIP */ 1408 HAL_CIPHER_CLR, /* IEEE80211_CIPHER_NONE */ 1409 }; 1410 struct ath_hal *ah = sc->sc_ah; 1411 const struct ieee80211_cipher *cip = k->wk_cipher; 1412 HAL_KEYVAL hk; 1413 1414 memset(&hk, 0, sizeof(hk)); 1415 /* 1416 * Software crypto uses a "clear key" so non-crypto 1417 * state kept in the key cache are maintained and 1418 * so that rx frames have an entry to match. 1419 */ 1420 if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) { 1421 KASSERT(cip->ic_cipher < N(ciphermap), 1422 ("invalid cipher type %u", cip->ic_cipher)); 1423 hk.kv_type = ciphermap[cip->ic_cipher]; 1424 hk.kv_len = k->wk_keylen; 1425 memcpy(hk.kv_val, k->wk_key, k->wk_keylen); 1426 } else 1427 hk.kv_type = HAL_CIPHER_CLR; 1428 1429 if (hk.kv_type == HAL_CIPHER_TKIP && 1430 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && 1431 sc->sc_splitmic) { 1432 return ath_keyset_tkip(sc, k, &hk, mac); 1433 } else { 1434 KEYPRINTF(sc, k->wk_keyix, &hk, mac); 1435 return ath_hal_keyset(ah, k->wk_keyix, &hk, mac); 1436 } 1437 #undef N 1438 } 1439 1440 /* 1441 * Fill the hardware key cache with key entries. 1442 */ 1443 static void 1444 ath_initkeytable(struct ath_softc *sc) 1445 { 1446 struct ieee80211com *ic = &sc->sc_ic; 1447 struct ifnet *ifp = &sc->sc_if; 1448 struct ath_hal *ah = sc->sc_ah; 1449 const u_int8_t *bssid; 1450 int i; 1451 1452 /* XXX maybe should reset all keys when !PRIVACY */ 1453 if (ic->ic_state == IEEE80211_S_SCAN) 1454 bssid = ifp->if_broadcastaddr; 1455 else 1456 bssid = ic->ic_bss->ni_bssid; 1457 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 1458 struct ieee80211_key *k = &ic->ic_nw_keys[i]; 1459 1460 if (k->wk_keylen == 0) { 1461 ath_hal_keyreset(ah, i); 1462 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: reset key %u\n", 1463 __func__, i); 1464 } else { 1465 ath_keyset(sc, k, bssid); 1466 } 1467 } 1468 } 1469 1470 /* 1471 * Allocate tx/rx key slots for TKIP. We allocate two slots for 1472 * each key, one for decrypt/encrypt and the other for the MIC. 1473 */ 1474 static u_int16_t 1475 key_alloc_2pair(struct ath_softc *sc) 1476 { 1477 #define N(a) (sizeof(a)/sizeof(a[0])) 1478 u_int i, keyix; 1479 1480 KASSERT(sc->sc_splitmic, ("key cache !split")); 1481 /* XXX could optimize */ 1482 for (i = 0; i < N(sc->sc_keymap)/4; i++) { 1483 u_int8_t b = sc->sc_keymap[i]; 1484 if (b != 0xff) { 1485 /* 1486 * One or more slots in this byte are free. 1487 */ 1488 keyix = i*NBBY; 1489 while (b & 1) { 1490 again: 1491 keyix++; 1492 b >>= 1; 1493 } 1494 /* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */ 1495 if (isset(sc->sc_keymap, keyix+32) || 1496 isset(sc->sc_keymap, keyix+64) || 1497 isset(sc->sc_keymap, keyix+32+64)) { 1498 /* full pair unavailable */ 1499 /* XXX statistic */ 1500 if (keyix == (i+1)*NBBY) { 1501 /* no slots were appropriate, advance */ 1502 continue; 1503 } 1504 goto again; 1505 } 1506 setbit(sc->sc_keymap, keyix); 1507 setbit(sc->sc_keymap, keyix+64); 1508 setbit(sc->sc_keymap, keyix+32); 1509 setbit(sc->sc_keymap, keyix+32+64); 1510 DPRINTF(sc, ATH_DEBUG_KEYCACHE, 1511 "%s: key pair %u,%u %u,%u\n", 1512 __func__, keyix, keyix+64, 1513 keyix+32, keyix+32+64); 1514 return keyix; 1515 } 1516 } 1517 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__); 1518 return IEEE80211_KEYIX_NONE; 1519 #undef N 1520 } 1521 1522 /* 1523 * Allocate a single key cache slot. 1524 */ 1525 static u_int16_t 1526 key_alloc_single(struct ath_softc *sc) 1527 { 1528 #define N(a) (sizeof(a)/sizeof(a[0])) 1529 u_int i, keyix; 1530 1531 /* XXX try i,i+32,i+64,i+32+64 to minimize key pair conflicts */ 1532 for (i = 0; i < N(sc->sc_keymap); i++) { 1533 u_int8_t b = sc->sc_keymap[i]; 1534 if (b != 0xff) { 1535 /* 1536 * One or more slots are free. 1537 */ 1538 keyix = i*NBBY; 1539 while (b & 1) 1540 keyix++, b >>= 1; 1541 setbit(sc->sc_keymap, keyix); 1542 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: key %u\n", 1543 __func__, keyix); 1544 return keyix; 1545 } 1546 } 1547 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of space\n", __func__); 1548 return IEEE80211_KEYIX_NONE; 1549 #undef N 1550 } 1551 1552 /* 1553 * Allocate one or more key cache slots for a uniacst key. The 1554 * key itself is needed only to identify the cipher. For hardware 1555 * TKIP with split cipher+MIC keys we allocate two key cache slot 1556 * pairs so that we can setup separate TX and RX MIC keys. Note 1557 * that the MIC key for a TKIP key at slot i is assumed by the 1558 * hardware to be at slot i+64. This limits TKIP keys to the first 1559 * 64 entries. 1560 */ 1561 static int 1562 ath_key_alloc(struct ieee80211com *ic, const struct ieee80211_key *k) 1563 { 1564 struct ath_softc *sc = ic->ic_ifp->if_softc; 1565 1566 /* 1567 * Group key allocation must be handled specially for 1568 * parts that do not support multicast key cache search 1569 * functionality. For those parts the key id must match 1570 * the h/w key index so lookups find the right key. On 1571 * parts w/ the key search facility we install the sender's 1572 * mac address (with the high bit set) and let the hardware 1573 * find the key w/o using the key id. This is preferred as 1574 * it permits us to support multiple users for adhoc and/or 1575 * multi-station operation. 1576 */ 1577 if ((k->wk_flags & IEEE80211_KEY_GROUP) && !sc->sc_mcastkey) { 1578 u_int keyix; 1579 1580 if (!(&ic->ic_nw_keys[0] <= k && 1581 k < &ic->ic_nw_keys[IEEE80211_WEP_NKID])) { 1582 /* should not happen */ 1583 DPRINTF(sc, ATH_DEBUG_KEYCACHE, 1584 "%s: bogus group key\n", __func__); 1585 return IEEE80211_KEYIX_NONE; 1586 } 1587 keyix = k - ic->ic_nw_keys; 1588 /* 1589 * XXX we pre-allocate the global keys so 1590 * have no way to check if they've already been allocated. 1591 */ 1592 return keyix; 1593 } 1594 1595 /* 1596 * We allocate two pair for TKIP when using the h/w to do 1597 * the MIC. For everything else, including software crypto, 1598 * we allocate a single entry. Note that s/w crypto requires 1599 * a pass-through slot on the 5211 and 5212. The 5210 does 1600 * not support pass-through cache entries and we map all 1601 * those requests to slot 0. 1602 */ 1603 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { 1604 return key_alloc_single(sc); 1605 } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP && 1606 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic) { 1607 return key_alloc_2pair(sc); 1608 } else { 1609 return key_alloc_single(sc); 1610 } 1611 } 1612 1613 /* 1614 * Delete an entry in the key cache allocated by ath_key_alloc. 1615 */ 1616 static int 1617 ath_key_delete(struct ieee80211com *ic, const struct ieee80211_key *k) 1618 { 1619 struct ath_softc *sc = ic->ic_ifp->if_softc; 1620 struct ath_hal *ah = sc->sc_ah; 1621 const struct ieee80211_cipher *cip = k->wk_cipher; 1622 u_int keyix = k->wk_keyix; 1623 1624 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: delete key %u\n", __func__, keyix); 1625 1626 ath_hal_keyreset(ah, keyix); 1627 /* 1628 * Handle split tx/rx keying required for TKIP with h/w MIC. 1629 */ 1630 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP && 1631 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic) 1632 ath_hal_keyreset(ah, keyix+32); /* RX key */ 1633 if (keyix >= IEEE80211_WEP_NKID) { 1634 /* 1635 * Don't touch keymap entries for global keys so 1636 * they are never considered for dynamic allocation. 1637 */ 1638 clrbit(sc->sc_keymap, keyix); 1639 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP && 1640 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && 1641 sc->sc_splitmic) { 1642 clrbit(sc->sc_keymap, keyix+64); /* TX key MIC */ 1643 clrbit(sc->sc_keymap, keyix+32); /* RX key */ 1644 clrbit(sc->sc_keymap, keyix+32+64); /* RX key MIC */ 1645 } 1646 } 1647 return 1; 1648 } 1649 1650 /* 1651 * Set the key cache contents for the specified key. Key cache 1652 * slot(s) must already have been allocated by ath_key_alloc. 1653 */ 1654 static int 1655 ath_key_set(struct ieee80211com *ic, const struct ieee80211_key *k, 1656 const u_int8_t mac[IEEE80211_ADDR_LEN]) 1657 { 1658 struct ath_softc *sc = ic->ic_ifp->if_softc; 1659 1660 return ath_keyset(sc, k, mac); 1661 } 1662 1663 /* 1664 * Block/unblock tx+rx processing while a key change is done. 1665 * We assume the caller serializes key management operations 1666 * so we only need to worry about synchronization with other 1667 * uses that originate in the driver. 1668 */ 1669 static void 1670 ath_key_update_begin(struct ieee80211com *ic) 1671 { 1672 struct ifnet *ifp = ic->ic_ifp; 1673 struct ath_softc *sc = ifp->if_softc; 1674 1675 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 1676 #if 0 1677 tasklet_disable(&sc->sc_rxtq); 1678 #endif 1679 IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */ 1680 } 1681 1682 static void 1683 ath_key_update_end(struct ieee80211com *ic) 1684 { 1685 struct ifnet *ifp = ic->ic_ifp; 1686 struct ath_softc *sc = ifp->if_softc; 1687 1688 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 1689 IF_UNLOCK(&ifp->if_snd); 1690 #if 0 1691 tasklet_enable(&sc->sc_rxtq); 1692 #endif 1693 } 1694 1695 /* 1696 * Calculate the receive filter according to the 1697 * operating mode and state: 1698 * 1699 * o always accept unicast, broadcast, and multicast traffic 1700 * o maintain current state of phy error reception (the hal 1701 * may enable phy error frames for noise immunity work) 1702 * o probe request frames are accepted only when operating in 1703 * hostap, adhoc, or monitor modes 1704 * o enable promiscuous mode according to the interface state 1705 * o accept beacons: 1706 * - when operating in adhoc mode so the 802.11 layer creates 1707 * node table entries for peers, 1708 * - when operating in station mode for collecting rssi data when 1709 * the station is otherwise quiet, or 1710 * - when scanning 1711 */ 1712 static u_int32_t 1713 ath_calcrxfilter(struct ath_softc *sc, enum ieee80211_state state) 1714 { 1715 struct ieee80211com *ic = &sc->sc_ic; 1716 struct ath_hal *ah = sc->sc_ah; 1717 struct ifnet *ifp = &sc->sc_if; 1718 u_int32_t rfilt; 1719 1720 rfilt = (ath_hal_getrxfilter(ah) & HAL_RX_FILTER_PHYERR) 1721 | HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST; 1722 if (ic->ic_opmode != IEEE80211_M_STA) 1723 rfilt |= HAL_RX_FILTER_PROBEREQ; 1724 if (ic->ic_opmode != IEEE80211_M_HOSTAP && 1725 (ifp->if_flags & IFF_PROMISC)) 1726 rfilt |= HAL_RX_FILTER_PROM; 1727 if (ic->ic_opmode == IEEE80211_M_STA || 1728 ic->ic_opmode == IEEE80211_M_IBSS || 1729 state == IEEE80211_S_SCAN) 1730 rfilt |= HAL_RX_FILTER_BEACON; 1731 return rfilt; 1732 } 1733 1734 static void 1735 ath_mcastfilter_accum(caddr_t dl, u_int32_t *mfilt) 1736 { 1737 u_int32_t val; 1738 u_int8_t pos; 1739 1740 /* calculate XOR of eight 6bit values */ 1741 val = LE_READ_4(dl + 0); 1742 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 1743 val = LE_READ_4(dl + 3); 1744 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 1745 pos &= 0x3f; 1746 mfilt[pos / 32] |= (1 << (pos % 32)); 1747 } 1748 1749 static void 1750 ath_mcastfilter_compute(struct ath_softc *sc, u_int32_t *mfilt) 1751 { 1752 struct ifnet *ifp = &sc->sc_if; 1753 struct ether_multi *enm; 1754 struct ether_multistep estep; 1755 1756 mfilt[0] = mfilt[1] = 0; 1757 ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm); 1758 while (enm != NULL) { 1759 /* XXX Punt on ranges. */ 1760 if (!IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) { 1761 mfilt[0] = mfilt[1] = ~((u_int32_t)0); 1762 ifp->if_flags |= IFF_ALLMULTI; 1763 return; 1764 } 1765 ath_mcastfilter_accum(enm->enm_addrlo, mfilt); 1766 ETHER_NEXT_MULTI(estep, enm); 1767 } 1768 ifp->if_flags &= ~IFF_ALLMULTI; 1769 } 1770 1771 static void 1772 ath_mode_init(struct ath_softc *sc) 1773 { 1774 struct ieee80211com *ic = &sc->sc_ic; 1775 struct ath_hal *ah = sc->sc_ah; 1776 u_int32_t rfilt, mfilt[2]; 1777 1778 /* configure rx filter */ 1779 rfilt = ath_calcrxfilter(sc, ic->ic_state); 1780 ath_hal_setrxfilter(ah, rfilt); 1781 1782 /* configure operational mode */ 1783 ath_hal_setopmode(ah); 1784 1785 /* 1786 * Handle any link-level address change. Note that we only 1787 * need to force ic_myaddr; any other addresses are handled 1788 * as a byproduct of the ifnet code marking the interface 1789 * down then up. 1790 * 1791 * XXX should get from lladdr instead of arpcom but that's more work 1792 */ 1793 IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(sc->sc_if.if_sadl)); 1794 ath_hal_setmac(ah, ic->ic_myaddr); 1795 1796 /* calculate and install multicast filter */ 1797 #ifdef __FreeBSD__ 1798 if ((sc->sc_if.if_flags & IFF_ALLMULTI) == 0) 1799 ath_mcastfilter_compute(sc, mfilt); 1800 else 1801 mfilt[0] = mfilt[1] = ~0; 1802 #endif 1803 #ifdef __NetBSD__ 1804 ath_mcastfilter_compute(sc, mfilt); 1805 #endif 1806 ath_hal_setmcastfilter(ah, mfilt[0], mfilt[1]); 1807 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, MC filter %08x:%08x\n", 1808 __func__, rfilt, mfilt[0], mfilt[1]); 1809 } 1810 1811 /* 1812 * Set the slot time based on the current setting. 1813 */ 1814 static void 1815 ath_setslottime(struct ath_softc *sc) 1816 { 1817 struct ieee80211com *ic = &sc->sc_ic; 1818 struct ath_hal *ah = sc->sc_ah; 1819 1820 if (ic->ic_flags & IEEE80211_F_SHSLOT) 1821 ath_hal_setslottime(ah, HAL_SLOT_TIME_9); 1822 else 1823 ath_hal_setslottime(ah, HAL_SLOT_TIME_20); 1824 sc->sc_updateslot = OK; 1825 } 1826 1827 /* 1828 * Callback from the 802.11 layer to update the 1829 * slot time based on the current setting. 1830 */ 1831 static void 1832 ath_updateslot(struct ifnet *ifp) 1833 { 1834 struct ath_softc *sc = ifp->if_softc; 1835 struct ieee80211com *ic = &sc->sc_ic; 1836 1837 /* 1838 * When not coordinating the BSS, change the hardware 1839 * immediately. For other operation we defer the change 1840 * until beacon updates have propagated to the stations. 1841 */ 1842 if (ic->ic_opmode == IEEE80211_M_HOSTAP) 1843 sc->sc_updateslot = UPDATE; 1844 else 1845 ath_setslottime(sc); 1846 } 1847 1848 /* 1849 * Setup a h/w transmit queue for beacons. 1850 */ 1851 static int 1852 ath_beaconq_setup(struct ath_hal *ah) 1853 { 1854 HAL_TXQ_INFO qi; 1855 1856 memset(&qi, 0, sizeof(qi)); 1857 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 1858 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 1859 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 1860 /* NB: don't enable any interrupts */ 1861 return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi); 1862 } 1863 1864 /* 1865 * Allocate and setup an initial beacon frame. 1866 */ 1867 static int 1868 ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni) 1869 { 1870 struct ieee80211com *ic = ni->ni_ic; 1871 struct ath_buf *bf; 1872 struct mbuf *m; 1873 int error; 1874 1875 bf = STAILQ_FIRST(&sc->sc_bbuf); 1876 if (bf == NULL) { 1877 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: no dma buffers\n", __func__); 1878 sc->sc_stats.ast_be_nombuf++; /* XXX */ 1879 return ENOMEM; /* XXX */ 1880 } 1881 /* 1882 * NB: the beacon data buffer must be 32-bit aligned; 1883 * we assume the mbuf routines will return us something 1884 * with this alignment (perhaps should assert). 1885 */ 1886 m = ieee80211_beacon_alloc(ic, ni, &sc->sc_boff); 1887 if (m == NULL) { 1888 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: cannot get mbuf\n", 1889 __func__); 1890 sc->sc_stats.ast_be_nombuf++; 1891 return ENOMEM; 1892 } 1893 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m, 1894 BUS_DMA_NOWAIT); 1895 if (error == 0) { 1896 bf->bf_m = m; 1897 bf->bf_node = ieee80211_ref_node(ni); 1898 } else { 1899 m_freem(m); 1900 } 1901 return error; 1902 } 1903 1904 /* 1905 * Setup the beacon frame for transmit. 1906 */ 1907 static void 1908 ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf) 1909 { 1910 #define USE_SHPREAMBLE(_ic) \ 1911 (((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\ 1912 == IEEE80211_F_SHPREAMBLE) 1913 struct ieee80211_node *ni = bf->bf_node; 1914 struct ieee80211com *ic = ni->ni_ic; 1915 struct mbuf *m = bf->bf_m; 1916 struct ath_hal *ah = sc->sc_ah; 1917 struct ath_node *an = ATH_NODE(ni); 1918 struct ath_desc *ds; 1919 int flags, antenna; 1920 u_int8_t rate; 1921 1922 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: m %p len %u\n", 1923 __func__, m, m->m_len); 1924 1925 /* setup descriptors */ 1926 ds = bf->bf_desc; 1927 1928 flags = HAL_TXDESC_NOACK; 1929 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) { 1930 ds->ds_link = bf->bf_daddr; /* self-linked */ 1931 flags |= HAL_TXDESC_VEOL; 1932 /* 1933 * Let hardware handle antenna switching. 1934 */ 1935 antenna = 0; 1936 } else { 1937 ds->ds_link = 0; 1938 /* 1939 * Switch antenna every 4 beacons. 1940 * XXX assumes two antenna 1941 */ 1942 antenna = (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1); 1943 } 1944 1945 KASSERT(bf->bf_nseg == 1, 1946 ("multi-segment beacon frame; nseg %u", bf->bf_nseg)); 1947 ds->ds_data = bf->bf_segs[0].ds_addr; 1948 /* 1949 * Calculate rate code. 1950 * XXX everything at min xmit rate 1951 */ 1952 if (USE_SHPREAMBLE(ic)) 1953 rate = an->an_tx_mgtratesp; 1954 else 1955 rate = an->an_tx_mgtrate; 1956 ath_hal_setuptxdesc(ah, ds 1957 , m->m_len + IEEE80211_CRC_LEN /* frame length */ 1958 , sizeof(struct ieee80211_frame)/* header length */ 1959 , HAL_PKT_TYPE_BEACON /* Atheros packet type */ 1960 , ni->ni_txpower /* txpower XXX */ 1961 , rate, 1 /* series 0 rate/tries */ 1962 , HAL_TXKEYIX_INVALID /* no encryption */ 1963 , antenna /* antenna mode */ 1964 , flags /* no ack, veol for beacons */ 1965 , 0 /* rts/cts rate */ 1966 , 0 /* rts/cts duration */ 1967 ); 1968 /* NB: beacon's BufLen must be a multiple of 4 bytes */ 1969 ath_hal_filltxdesc(ah, ds 1970 , roundup(m->m_len, 4) /* buffer length */ 1971 , AH_TRUE /* first segment */ 1972 , AH_TRUE /* last segment */ 1973 , ds /* first descriptor */ 1974 ); 1975 /* XXX bus_dmamap_sync? -dcy */ 1976 #undef USE_SHPREAMBLE 1977 } 1978 1979 /* 1980 * Transmit a beacon frame at SWBA. Dynamic updates to the 1981 * frame contents are done as needed and the slot time is 1982 * also adjusted based on current state. 1983 */ 1984 static void 1985 ath_beacon_proc(void *arg, int pending) 1986 { 1987 struct ath_softc *sc = arg; 1988 struct ath_buf *bf = STAILQ_FIRST(&sc->sc_bbuf); 1989 struct ieee80211_node *ni = bf->bf_node; 1990 struct ieee80211com *ic = ni->ni_ic; 1991 struct ath_hal *ah = sc->sc_ah; 1992 struct mbuf *m; 1993 int ncabq, error, otherant; 1994 1995 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n", 1996 __func__, pending); 1997 1998 if (ic->ic_opmode == IEEE80211_M_STA || 1999 ic->ic_opmode == IEEE80211_M_MONITOR || 2000 bf == NULL || bf->bf_m == NULL) { 2001 DPRINTF(sc, ATH_DEBUG_ANY, "%s: ic_flags=%x bf=%p bf_m=%p\n", 2002 __func__, ic->ic_flags, bf, bf ? bf->bf_m : NULL); 2003 return; 2004 } 2005 /* 2006 * Check if the previous beacon has gone out. If 2007 * not don't don't try to post another, skip this 2008 * period and wait for the next. Missed beacons 2009 * indicate a problem and should not occur. If we 2010 * miss too many consecutive beacons reset the device. 2011 */ 2012 if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) { 2013 sc->sc_bmisscount++; 2014 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, 2015 "%s: missed %u consecutive beacons\n", 2016 __func__, sc->sc_bmisscount); 2017 if (sc->sc_bmisscount > 3) /* NB: 3 is a guess */ 2018 TASK_RUN_OR_ENQUEUE(&sc->sc_bstucktask); 2019 return; 2020 } 2021 if (sc->sc_bmisscount != 0) { 2022 DPRINTF(sc, ATH_DEBUG_BEACON, 2023 "%s: resume beacon xmit after %u misses\n", 2024 __func__, sc->sc_bmisscount); 2025 sc->sc_bmisscount = 0; 2026 } 2027 2028 /* 2029 * Update dynamic beacon contents. If this returns 2030 * non-zero then we need to remap the memory because 2031 * the beacon frame changed size (probably because 2032 * of the TIM bitmap). 2033 */ 2034 m = bf->bf_m; 2035 ncabq = ath_hal_numtxpending(ah, sc->sc_cabq->axq_qnum); 2036 if (ieee80211_beacon_update(ic, bf->bf_node, &sc->sc_boff, m, ncabq)) { 2037 /* XXX too conservative? */ 2038 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2039 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m, 2040 BUS_DMA_NOWAIT); 2041 if (error != 0) { 2042 if_printf(&sc->sc_if, 2043 "%s: bus_dmamap_load_mbuf failed, error %u\n", 2044 __func__, error); 2045 return; 2046 } 2047 } 2048 2049 /* 2050 * Handle slot time change when a non-ERP station joins/leaves 2051 * an 11g network. The 802.11 layer notifies us via callback, 2052 * we mark updateslot, then wait one beacon before effecting 2053 * the change. This gives associated stations at least one 2054 * beacon interval to note the state change. 2055 */ 2056 /* XXX locking */ 2057 if (sc->sc_updateslot == UPDATE) 2058 sc->sc_updateslot = COMMIT; /* commit next beacon */ 2059 else if (sc->sc_updateslot == COMMIT) 2060 ath_setslottime(sc); /* commit change to h/w */ 2061 2062 /* 2063 * Check recent per-antenna transmit statistics and flip 2064 * the default antenna if noticeably more frames went out 2065 * on the non-default antenna. 2066 * XXX assumes 2 anntenae 2067 */ 2068 otherant = sc->sc_defant & 1 ? 2 : 1; 2069 if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2) 2070 ath_setdefantenna(sc, otherant); 2071 sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0; 2072 2073 /* 2074 * Construct tx descriptor. 2075 */ 2076 ath_beacon_setup(sc, bf); 2077 2078 /* 2079 * Stop any current dma and put the new frame on the queue. 2080 * This should never fail since we check above that no frames 2081 * are still pending on the queue. 2082 */ 2083 if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) { 2084 DPRINTF(sc, ATH_DEBUG_ANY, 2085 "%s: beacon queue %u did not stop?\n", 2086 __func__, sc->sc_bhalq); 2087 } 2088 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 2089 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 2090 2091 /* 2092 * Enable the CAB queue before the beacon queue to 2093 * insure cab frames are triggered by this beacon. 2094 */ 2095 if (sc->sc_boff.bo_tim[4] & 1) /* NB: only at DTIM */ 2096 ath_hal_txstart(ah, sc->sc_cabq->axq_qnum); 2097 ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr); 2098 ath_hal_txstart(ah, sc->sc_bhalq); 2099 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, 2100 "%s: TXDP[%u] = %p (%p)\n", __func__, 2101 sc->sc_bhalq, (caddr_t)bf->bf_daddr, bf->bf_desc); 2102 2103 sc->sc_stats.ast_be_xmit++; 2104 } 2105 2106 /* 2107 * Reset the hardware after detecting beacons have stopped. 2108 */ 2109 static void 2110 ath_bstuck_proc(void *arg, int pending) 2111 { 2112 struct ath_softc *sc = arg; 2113 struct ifnet *ifp = &sc->sc_if; 2114 2115 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n", 2116 sc->sc_bmisscount); 2117 ath_reset(ifp); 2118 } 2119 2120 /* 2121 * Reclaim beacon resources. 2122 */ 2123 static void 2124 ath_beacon_free(struct ath_softc *sc) 2125 { 2126 struct ath_buf *bf; 2127 2128 STAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) { 2129 if (bf->bf_m != NULL) { 2130 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2131 m_freem(bf->bf_m); 2132 bf->bf_m = NULL; 2133 } 2134 if (bf->bf_node != NULL) { 2135 ieee80211_free_node(bf->bf_node); 2136 bf->bf_node = NULL; 2137 } 2138 } 2139 } 2140 2141 /* 2142 * Configure the beacon and sleep timers. 2143 * 2144 * When operating as an AP this resets the TSF and sets 2145 * up the hardware to notify us when we need to issue beacons. 2146 * 2147 * When operating in station mode this sets up the beacon 2148 * timers according to the timestamp of the last received 2149 * beacon and the current TSF, configures PCF and DTIM 2150 * handling, programs the sleep registers so the hardware 2151 * will wakeup in time to receive beacons, and configures 2152 * the beacon miss handling so we'll receive a BMISS 2153 * interrupt when we stop seeing beacons from the AP 2154 * we've associated with. 2155 */ 2156 static void 2157 ath_beacon_config(struct ath_softc *sc) 2158 { 2159 struct ath_hal *ah = sc->sc_ah; 2160 struct ieee80211com *ic = &sc->sc_ic; 2161 struct ieee80211_node *ni = ic->ic_bss; 2162 u_int32_t nexttbtt, intval; 2163 2164 nexttbtt = (LE_READ_4(ni->ni_tstamp.data + 4) << 22) | 2165 (LE_READ_4(ni->ni_tstamp.data) >> 10); 2166 intval = ni->ni_intval & HAL_BEACON_PERIOD; 2167 if (nexttbtt == 0) /* e.g. for ap mode */ 2168 nexttbtt = intval; 2169 else if (intval) /* NB: can be 0 for monitor mode */ 2170 nexttbtt = roundup(nexttbtt, intval); 2171 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n", 2172 __func__, nexttbtt, intval, ni->ni_intval); 2173 if (ic->ic_opmode == IEEE80211_M_STA) { 2174 HAL_BEACON_STATE bs; 2175 2176 /* NB: no PCF support right now */ 2177 memset(&bs, 0, sizeof(bs)); 2178 bs.bs_intval = intval; 2179 bs.bs_nexttbtt = nexttbtt; 2180 bs.bs_dtimperiod = bs.bs_intval; 2181 bs.bs_nextdtim = nexttbtt; 2182 /* 2183 * The 802.11 layer records the offset to the DTIM 2184 * bitmap while receiving beacons; use it here to 2185 * enable h/w detection of our AID being marked in 2186 * the bitmap vector (to indicate frames for us are 2187 * pending at the AP). 2188 */ 2189 bs.bs_timoffset = ni->ni_timoff; 2190 /* 2191 * Calculate the number of consecutive beacons to miss 2192 * before taking a BMISS interrupt. The configuration 2193 * is specified in ms, so we need to convert that to 2194 * TU's and then calculate based on the beacon interval. 2195 * Note that we clamp the result to at most 10 beacons. 2196 */ 2197 bs.bs_bmissthreshold = howmany(ic->ic_bmisstimeout, intval); 2198 if (bs.bs_bmissthreshold > 10) 2199 bs.bs_bmissthreshold = 10; 2200 else if (bs.bs_bmissthreshold <= 0) 2201 bs.bs_bmissthreshold = 1; 2202 2203 /* 2204 * Calculate sleep duration. The configuration is 2205 * given in ms. We insure a multiple of the beacon 2206 * period is used. Also, if the sleep duration is 2207 * greater than the DTIM period then it makes senses 2208 * to make it a multiple of that. 2209 * 2210 * XXX fixed at 100ms 2211 */ 2212 bs.bs_sleepduration = 2213 roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval); 2214 if (bs.bs_sleepduration > bs.bs_dtimperiod) 2215 bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod); 2216 2217 DPRINTF(sc, ATH_DEBUG_BEACON, 2218 "%s: intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n" 2219 , __func__ 2220 , bs.bs_intval 2221 , bs.bs_nexttbtt 2222 , bs.bs_dtimperiod 2223 , bs.bs_nextdtim 2224 , bs.bs_bmissthreshold 2225 , bs.bs_sleepduration 2226 , bs.bs_cfpperiod 2227 , bs.bs_cfpmaxduration 2228 , bs.bs_cfpnext 2229 , bs.bs_timoffset 2230 ); 2231 ath_hal_intrset(ah, 0); 2232 ath_hal_beacontimers(ah, &bs); 2233 sc->sc_imask |= HAL_INT_BMISS; 2234 ath_hal_intrset(ah, sc->sc_imask); 2235 } else { 2236 ath_hal_intrset(ah, 0); 2237 if (nexttbtt == intval) 2238 intval |= HAL_BEACON_RESET_TSF; 2239 if (ic->ic_opmode == IEEE80211_M_IBSS) { 2240 /* 2241 * In IBSS mode enable the beacon timers but only 2242 * enable SWBA interrupts if we need to manually 2243 * prepare beacon frames. Otherwise we use a 2244 * self-linked tx descriptor and let the hardware 2245 * deal with things. 2246 */ 2247 intval |= HAL_BEACON_ENA; 2248 if (!sc->sc_hasveol) 2249 sc->sc_imask |= HAL_INT_SWBA; 2250 } else if (ic->ic_opmode == IEEE80211_M_HOSTAP) { 2251 /* 2252 * In AP mode we enable the beacon timers and 2253 * SWBA interrupts to prepare beacon frames. 2254 */ 2255 intval |= HAL_BEACON_ENA; 2256 sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */ 2257 } 2258 ath_hal_beaconinit(ah, nexttbtt, intval); 2259 sc->sc_bmisscount = 0; 2260 ath_hal_intrset(ah, sc->sc_imask); 2261 /* 2262 * When using a self-linked beacon descriptor in 2263 * ibss mode load it once here. 2264 */ 2265 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) 2266 ath_beacon_proc(sc, 0); 2267 } 2268 } 2269 2270 static int 2271 ath_descdma_setup(struct ath_softc *sc, 2272 struct ath_descdma *dd, ath_bufhead *head, 2273 const char *name, int nbuf, int ndesc) 2274 { 2275 #define DS2PHYS(_dd, _ds) \ 2276 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 2277 struct ifnet *ifp = &sc->sc_if; 2278 struct ath_desc *ds; 2279 struct ath_buf *bf; 2280 int i, bsize, error; 2281 2282 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n", 2283 __func__, name, nbuf, ndesc); 2284 2285 dd->dd_name = name; 2286 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc; 2287 2288 /* 2289 * Setup DMA descriptor area. 2290 */ 2291 dd->dd_dmat = sc->sc_dmat; 2292 2293 error = bus_dmamem_alloc(dd->dd_dmat, dd->dd_desc_len, PAGE_SIZE, 2294 0, &dd->dd_dseg, 1, &dd->dd_dnseg, 0); 2295 2296 if (error != 0) { 2297 if_printf(ifp, "unable to alloc memory for %u %s descriptors, " 2298 "error %u\n", nbuf * ndesc, dd->dd_name, error); 2299 goto fail0; 2300 } 2301 2302 error = bus_dmamem_map(dd->dd_dmat, &dd->dd_dseg, dd->dd_dnseg, 2303 dd->dd_desc_len, (caddr_t *)&dd->dd_desc, BUS_DMA_COHERENT); 2304 if (error != 0) { 2305 if_printf(ifp, "unable to map %u %s descriptors, error = %u\n", 2306 nbuf * ndesc, dd->dd_name, error); 2307 goto fail1; 2308 } 2309 2310 /* allocate descriptors */ 2311 error = bus_dmamap_create(dd->dd_dmat, dd->dd_desc_len, 1, 2312 dd->dd_desc_len, 0, BUS_DMA_NOWAIT, &dd->dd_dmamap); 2313 if (error != 0) { 2314 if_printf(ifp, "unable to create dmamap for %s descriptors, " 2315 "error %u\n", dd->dd_name, error); 2316 goto fail2; 2317 } 2318 2319 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, dd->dd_desc, 2320 dd->dd_desc_len, NULL, BUS_DMA_NOWAIT); 2321 if (error != 0) { 2322 if_printf(ifp, "unable to map %s descriptors, error %u\n", 2323 dd->dd_name, error); 2324 goto fail3; 2325 } 2326 2327 ds = dd->dd_desc; 2328 dd->dd_desc_paddr = dd->dd_dmamap->dm_segs[0].ds_addr; 2329 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n", 2330 __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len, 2331 (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len); 2332 2333 /* allocate rx buffers */ 2334 bsize = sizeof(struct ath_buf) * nbuf; 2335 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO); 2336 if (bf == NULL) { 2337 if_printf(ifp, "malloc of %s buffers failed, size %u\n", 2338 dd->dd_name, bsize); 2339 goto fail4; 2340 } 2341 dd->dd_bufptr = bf; 2342 2343 STAILQ_INIT(head); 2344 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) { 2345 bf->bf_desc = ds; 2346 bf->bf_daddr = DS2PHYS(dd, ds); 2347 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, ndesc, 2348 MCLBYTES, 0, BUS_DMA_NOWAIT, &bf->bf_dmamap); 2349 if (error != 0) { 2350 if_printf(ifp, "unable to create dmamap for %s " 2351 "buffer %u, error %u\n", dd->dd_name, i, error); 2352 ath_descdma_cleanup(sc, dd, head); 2353 return error; 2354 } 2355 STAILQ_INSERT_TAIL(head, bf, bf_list); 2356 } 2357 return 0; 2358 fail4: 2359 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 2360 fail3: 2361 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 2362 fail2: 2363 bus_dmamem_unmap(dd->dd_dmat, (caddr_t)dd->dd_desc, dd->dd_desc_len); 2364 fail1: 2365 bus_dmamem_free(dd->dd_dmat, &dd->dd_dseg, dd->dd_dnseg); 2366 fail0: 2367 memset(dd, 0, sizeof(*dd)); 2368 return error; 2369 #undef DS2PHYS 2370 } 2371 2372 static void 2373 ath_descdma_cleanup(struct ath_softc *sc, 2374 struct ath_descdma *dd, ath_bufhead *head) 2375 { 2376 struct ath_buf *bf; 2377 struct ieee80211_node *ni; 2378 2379 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 2380 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 2381 bus_dmamem_unmap(dd->dd_dmat, (caddr_t)dd->dd_desc, dd->dd_desc_len); 2382 bus_dmamem_free(dd->dd_dmat, &dd->dd_dseg, dd->dd_dnseg); 2383 2384 STAILQ_FOREACH(bf, head, bf_list) { 2385 if (bf->bf_m) { 2386 m_freem(bf->bf_m); 2387 bf->bf_m = NULL; 2388 } 2389 if (bf->bf_dmamap != NULL) { 2390 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 2391 bf->bf_dmamap = NULL; 2392 } 2393 ni = bf->bf_node; 2394 bf->bf_node = NULL; 2395 if (ni != NULL) { 2396 /* 2397 * Reclaim node reference. 2398 */ 2399 ieee80211_free_node(ni); 2400 } 2401 } 2402 2403 STAILQ_INIT(head); 2404 free(dd->dd_bufptr, M_ATHDEV); 2405 memset(dd, 0, sizeof(*dd)); 2406 } 2407 2408 static int 2409 ath_desc_alloc(struct ath_softc *sc) 2410 { 2411 int error; 2412 2413 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, 2414 "rx", ATH_RXBUF, 1); 2415 if (error != 0) 2416 return error; 2417 2418 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 2419 "tx", ATH_TXBUF, ATH_TXDESC); 2420 if (error != 0) { 2421 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 2422 return error; 2423 } 2424 2425 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, 2426 "beacon", 1, 1); 2427 if (error != 0) { 2428 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 2429 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 2430 return error; 2431 } 2432 return 0; 2433 } 2434 2435 static void 2436 ath_desc_free(struct ath_softc *sc) 2437 { 2438 2439 if (sc->sc_bdma.dd_desc_len != 0) 2440 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); 2441 if (sc->sc_txdma.dd_desc_len != 0) 2442 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 2443 if (sc->sc_rxdma.dd_desc_len != 0) 2444 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 2445 } 2446 2447 static struct ieee80211_node * 2448 ath_node_alloc(struct ieee80211_node_table *nt) 2449 { 2450 struct ieee80211com *ic = nt->nt_ic; 2451 struct ath_softc *sc = ic->ic_ifp->if_softc; 2452 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; 2453 struct ath_node *an; 2454 2455 an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); 2456 if (an == NULL) { 2457 /* XXX stat+msg */ 2458 return NULL; 2459 } 2460 an->an_avgrssi = ATH_RSSI_DUMMY_MARKER; 2461 an->an_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; 2462 an->an_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; 2463 an->an_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; 2464 ath_rate_node_init(sc, an); 2465 2466 DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an); 2467 return &an->an_node; 2468 } 2469 2470 static void 2471 ath_node_free(struct ieee80211_node *ni) 2472 { 2473 struct ieee80211com *ic = ni->ni_ic; 2474 struct ath_softc *sc = ic->ic_ifp->if_softc; 2475 2476 DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni); 2477 2478 ath_rate_node_cleanup(sc, ATH_NODE(ni)); 2479 sc->sc_node_free(ni); 2480 } 2481 2482 static u_int8_t 2483 ath_node_getrssi(const struct ieee80211_node *ni) 2484 { 2485 #define HAL_EP_RND(x, mul) \ 2486 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) 2487 u_int32_t avgrssi = ATH_NODE_CONST(ni)->an_avgrssi; 2488 int32_t rssi; 2489 2490 /* 2491 * When only one frame is received there will be no state in 2492 * avgrssi so fallback on the value recorded by the 802.11 layer. 2493 */ 2494 if (avgrssi != ATH_RSSI_DUMMY_MARKER) 2495 rssi = HAL_EP_RND(avgrssi, HAL_RSSI_EP_MULTIPLIER); 2496 else 2497 rssi = ni->ni_rssi; 2498 /* NB: theoretically we shouldn't need this, but be paranoid */ 2499 return rssi < 0 ? 0 : rssi > 127 ? 127 : rssi; 2500 #undef HAL_EP_RND 2501 } 2502 2503 static int 2504 ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 2505 { 2506 struct ath_hal *ah = sc->sc_ah; 2507 int error; 2508 struct mbuf *m; 2509 struct ath_desc *ds; 2510 2511 m = bf->bf_m; 2512 if (m == NULL) { 2513 /* 2514 * NB: by assigning a page to the rx dma buffer we 2515 * implicitly satisfy the Atheros requirement that 2516 * this buffer be cache-line-aligned and sized to be 2517 * multiple of the cache line size. Not doing this 2518 * causes weird stuff to happen (for the 5210 at least). 2519 */ 2520 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 2521 if (m == NULL) { 2522 DPRINTF(sc, ATH_DEBUG_ANY, 2523 "%s: no mbuf/cluster\n", __func__); 2524 sc->sc_stats.ast_rx_nombuf++; 2525 return ENOMEM; 2526 } 2527 bf->bf_m = m; 2528 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 2529 2530 error = bus_dmamap_load_mbuf(sc->sc_dmat, 2531 bf->bf_dmamap, m, 2532 BUS_DMA_NOWAIT); 2533 if (error != 0) { 2534 DPRINTF(sc, ATH_DEBUG_ANY, 2535 "%s: bus_dmamap_load_mbuf failed; error %d\n", 2536 __func__, error); 2537 sc->sc_stats.ast_rx_busdma++; 2538 return error; 2539 } 2540 KASSERT(bf->bf_nseg == 1, 2541 ("multi-segment packet; nseg %u", bf->bf_nseg)); 2542 } 2543 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 2544 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 2545 2546 /* 2547 * Setup descriptors. For receive we always terminate 2548 * the descriptor list with a self-linked entry so we'll 2549 * not get overrun under high load (as can happen with a 2550 * 5212 when ANI processing enables PHY error frames). 2551 * 2552 * To insure the last descriptor is self-linked we create 2553 * each descriptor as self-linked and add it to the end. As 2554 * each additional descriptor is added the previous self-linked 2555 * entry is ``fixed'' naturally. This should be safe even 2556 * if DMA is happening. When processing RX interrupts we 2557 * never remove/process the last, self-linked, entry on the 2558 * descriptor list. This insures the hardware always has 2559 * someplace to write a new frame. 2560 */ 2561 ds = bf->bf_desc; 2562 ds->ds_link = bf->bf_daddr; /* link to self */ 2563 ds->ds_data = bf->bf_segs[0].ds_addr; 2564 ath_hal_setuprxdesc(ah, ds 2565 , m->m_len /* buffer size */ 2566 , 0 2567 ); 2568 2569 if (sc->sc_rxlink != NULL) 2570 *sc->sc_rxlink = bf->bf_daddr; 2571 sc->sc_rxlink = &ds->ds_link; 2572 return 0; 2573 } 2574 2575 static uint64_t 2576 ath_tsf_extend(struct ath_hal *ah, uint32_t rstamp) 2577 { 2578 uint64_t tsf; 2579 2580 KASSERT((rstamp & 0xffff0000) == 0, 2581 ("rx timestamp > 16 bits wide, %" PRIu32, rstamp)); 2582 2583 tsf = ath_hal_gettsf64(ah); 2584 2585 /* Compensate for rollover. */ 2586 if ((tsf & 0xffff) <= rstamp) 2587 tsf -= 0x10000; 2588 2589 return (tsf & ~(uint64_t)0xffff) | rstamp; 2590 } 2591 2592 /* 2593 * Extend 15-bit time stamp from rx descriptor to 2594 * a full 64-bit TSF using the current h/w TSF. 2595 */ 2596 static __inline u_int64_t 2597 ath_extend_tsf(struct ath_hal *ah, u_int32_t rstamp) 2598 { 2599 u_int64_t tsf; 2600 2601 tsf = ath_hal_gettsf64(ah); 2602 if ((tsf & 0x7fff) < rstamp) 2603 tsf -= 0x8000; 2604 return ((tsf &~ 0x7fff) | rstamp); 2605 } 2606 2607 /* 2608 * Intercept management frames to collect beacon rssi data 2609 * and to do ibss merges. 2610 */ 2611 static void 2612 ath_recv_mgmt(struct ieee80211com *ic, struct mbuf *m, 2613 struct ieee80211_node *ni, 2614 int subtype, int rssi, u_int32_t rstamp) 2615 { 2616 struct ath_softc *sc = ic->ic_ifp->if_softc; 2617 2618 /* 2619 * Call up first so subsequent work can use information 2620 * potentially stored in the node (e.g. for ibss merge). 2621 */ 2622 sc->sc_recv_mgmt(ic, m, ni, subtype, rssi, rstamp); 2623 switch (subtype) { 2624 case IEEE80211_FC0_SUBTYPE_BEACON: 2625 /* update rssi statistics for use by the hal */ 2626 ATH_RSSI_LPF(ATH_NODE(ni)->an_halstats.ns_avgbrssi, rssi); 2627 /* fall thru... */ 2628 case IEEE80211_FC0_SUBTYPE_PROBE_RESP: 2629 if (ic->ic_opmode == IEEE80211_M_IBSS && 2630 ic->ic_state == IEEE80211_S_RUN) { 2631 u_int64_t tsf = ath_tsf_extend(sc->sc_ah, rstamp); 2632 2633 /* 2634 * Handle ibss merge as needed; check the tsf on the 2635 * frame before attempting the merge. The 802.11 spec 2636 * says the station should change it's bssid to match 2637 * the oldest station with the same ssid, where oldest 2638 * is determined by the tsf. Note that hardware 2639 * reconfiguration happens through callback to 2640 * ath_newstate as the state machine will go from 2641 * RUN -> RUN when this happens. 2642 */ 2643 if (le64toh(ni->ni_tstamp.tsf) >= tsf) { 2644 DPRINTF(sc, ATH_DEBUG_STATE, 2645 "ibss merge, rstamp %u tsf %ju " 2646 "tstamp %ju\n", rstamp, (uintmax_t)tsf, 2647 (uintmax_t)ni->ni_tstamp.tsf); 2648 (void) ieee80211_ibss_merge(ic, ni); 2649 } 2650 } 2651 break; 2652 } 2653 } 2654 2655 /* 2656 * Set the default antenna. 2657 */ 2658 static void 2659 ath_setdefantenna(struct ath_softc *sc, u_int antenna) 2660 { 2661 struct ath_hal *ah = sc->sc_ah; 2662 2663 /* XXX block beacon interrupts */ 2664 ath_hal_setdefantenna(ah, antenna); 2665 if (sc->sc_defant != antenna) 2666 sc->sc_stats.ast_ant_defswitch++; 2667 sc->sc_defant = antenna; 2668 sc->sc_rxotherant = 0; 2669 } 2670 2671 static void 2672 ath_rx_proc(void *arg, int npending) 2673 { 2674 #define PA2DESC(_sc, _pa) \ 2675 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 2676 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 2677 struct ath_softc *sc = arg; 2678 struct ath_buf *bf; 2679 struct ieee80211com *ic = &sc->sc_ic; 2680 struct ifnet *ifp = &sc->sc_if; 2681 struct ath_hal *ah = sc->sc_ah; 2682 struct ath_desc *ds; 2683 struct mbuf *m; 2684 struct ieee80211_node *ni; 2685 struct ath_node *an; 2686 int len, type; 2687 u_int phyerr; 2688 HAL_STATUS status; 2689 2690 NET_LOCK_GIANT(); /* XXX */ 2691 2692 DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending); 2693 do { 2694 bf = STAILQ_FIRST(&sc->sc_rxbuf); 2695 if (bf == NULL) { /* NB: shouldn't happen */ 2696 if_printf(ifp, "%s: no buffer!\n", __func__); 2697 break; 2698 } 2699 ds = bf->bf_desc; 2700 if (ds->ds_link == bf->bf_daddr) { 2701 /* NB: never process the self-linked entry at the end */ 2702 break; 2703 } 2704 m = bf->bf_m; 2705 if (m == NULL) { /* NB: shouldn't happen */ 2706 if_printf(ifp, "%s: no mbuf!\n", __func__); 2707 continue; 2708 } 2709 /* XXX sync descriptor memory */ 2710 /* 2711 * Must provide the virtual address of the current 2712 * descriptor, the physical address, and the virtual 2713 * address of the next descriptor in the h/w chain. 2714 * This allows the HAL to look ahead to see if the 2715 * hardware is done with a descriptor by checking the 2716 * done bit in the following descriptor and the address 2717 * of the current descriptor the DMA engine is working 2718 * on. All this is necessary because of our use of 2719 * a self-linked list to avoid rx overruns. 2720 */ 2721 status = ath_hal_rxprocdesc(ah, ds, 2722 bf->bf_daddr, PA2DESC(sc, ds->ds_link)); 2723 #ifdef AR_DEBUG 2724 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 2725 ath_printrxbuf(bf, status == HAL_OK); 2726 #endif 2727 if (status == HAL_EINPROGRESS) 2728 break; 2729 STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list); 2730 if (ds->ds_rxstat.rs_more) { 2731 /* 2732 * Frame spans multiple descriptors; this 2733 * cannot happen yet as we don't support 2734 * jumbograms. If not in monitor mode, 2735 * discard the frame. 2736 */ 2737 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 2738 sc->sc_stats.ast_rx_toobig++; 2739 goto rx_next; 2740 } 2741 /* fall thru for monitor mode handling... */ 2742 } else if (ds->ds_rxstat.rs_status != 0) { 2743 if (ds->ds_rxstat.rs_status & HAL_RXERR_CRC) 2744 sc->sc_stats.ast_rx_crcerr++; 2745 if (ds->ds_rxstat.rs_status & HAL_RXERR_FIFO) 2746 sc->sc_stats.ast_rx_fifoerr++; 2747 if (ds->ds_rxstat.rs_status & HAL_RXERR_PHY) { 2748 sc->sc_stats.ast_rx_phyerr++; 2749 phyerr = ds->ds_rxstat.rs_phyerr & 0x1f; 2750 sc->sc_stats.ast_rx_phy[phyerr]++; 2751 goto rx_next; 2752 } 2753 if (ds->ds_rxstat.rs_status & HAL_RXERR_DECRYPT) { 2754 /* 2755 * Decrypt error. If the error occurred 2756 * because there was no hardware key, then 2757 * let the frame through so the upper layers 2758 * can process it. This is necessary for 5210 2759 * parts which have no way to setup a ``clear'' 2760 * key cache entry. 2761 * 2762 * XXX do key cache faulting 2763 */ 2764 if (ds->ds_rxstat.rs_keyix == HAL_RXKEYIX_INVALID) 2765 goto rx_accept; 2766 sc->sc_stats.ast_rx_badcrypt++; 2767 } 2768 if (ds->ds_rxstat.rs_status & HAL_RXERR_MIC) { 2769 sc->sc_stats.ast_rx_badmic++; 2770 /* 2771 * Do minimal work required to hand off 2772 * the 802.11 header for notifcation. 2773 */ 2774 /* XXX frag's and qos frames */ 2775 len = ds->ds_rxstat.rs_datalen; 2776 if (len >= sizeof (struct ieee80211_frame)) { 2777 bus_dmamap_sync(sc->sc_dmat, 2778 bf->bf_dmamap, 2779 0, bf->bf_dmamap->dm_mapsize, 2780 BUS_DMASYNC_POSTREAD); 2781 ieee80211_notify_michael_failure(ic, 2782 mtod(m, struct ieee80211_frame *), 2783 sc->sc_splitmic ? 2784 ds->ds_rxstat.rs_keyix-32 : 2785 ds->ds_rxstat.rs_keyix 2786 ); 2787 } 2788 } 2789 ifp->if_ierrors++; 2790 /* 2791 * Reject error frames, we normally don't want 2792 * to see them in monitor mode (in monitor mode 2793 * allow through packets that have crypto problems). 2794 */ 2795 if ((ds->ds_rxstat.rs_status &~ 2796 (HAL_RXERR_DECRYPT|HAL_RXERR_MIC)) || 2797 sc->sc_ic.ic_opmode != IEEE80211_M_MONITOR) 2798 goto rx_next; 2799 } 2800 rx_accept: 2801 /* 2802 * Sync and unmap the frame. At this point we're 2803 * committed to passing the mbuf somewhere so clear 2804 * bf_m; this means a new sk_buff must be allocated 2805 * when the rx descriptor is setup again to receive 2806 * another frame. 2807 */ 2808 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 2809 0, bf->bf_dmamap->dm_mapsize, 2810 BUS_DMASYNC_POSTREAD); 2811 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2812 bf->bf_m = NULL; 2813 2814 m->m_pkthdr.rcvif = ifp; 2815 len = ds->ds_rxstat.rs_datalen; 2816 m->m_pkthdr.len = m->m_len = len; 2817 2818 sc->sc_stats.ast_ant_rx[ds->ds_rxstat.rs_antenna]++; 2819 2820 #if NBPFILTER > 0 2821 if (sc->sc_drvbpf) { 2822 u_int8_t rix; 2823 2824 /* 2825 * Discard anything shorter than an ack or cts. 2826 */ 2827 if (len < IEEE80211_ACK_LEN) { 2828 DPRINTF(sc, ATH_DEBUG_RECV, 2829 "%s: runt packet %d\n", 2830 __func__, len); 2831 sc->sc_stats.ast_rx_tooshort++; 2832 m_freem(m); 2833 goto rx_next; 2834 } 2835 rix = ds->ds_rxstat.rs_rate; 2836 sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags; 2837 sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate; 2838 sc->sc_rx_th.wr_antsignal = ds->ds_rxstat.rs_rssi; 2839 sc->sc_rx_th.wr_antenna = ds->ds_rxstat.rs_antenna; 2840 /* XXX TSF */ 2841 2842 bpf_mtap2(sc->sc_drvbpf, 2843 &sc->sc_rx_th, sc->sc_rx_th_len, m); 2844 } 2845 #endif 2846 2847 /* 2848 * From this point on we assume the frame is at least 2849 * as large as ieee80211_frame_min; verify that. 2850 */ 2851 if (len < IEEE80211_MIN_LEN) { 2852 DPRINTF(sc, ATH_DEBUG_RECV, "%s: short packet %d\n", 2853 __func__, len); 2854 sc->sc_stats.ast_rx_tooshort++; 2855 m_freem(m); 2856 goto rx_next; 2857 } 2858 2859 if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) { 2860 ieee80211_dump_pkt(mtod(m, caddr_t), len, 2861 sc->sc_hwmap[ds->ds_rxstat.rs_rate].ieeerate, 2862 ds->ds_rxstat.rs_rssi); 2863 } 2864 2865 m_adj(m, -IEEE80211_CRC_LEN); 2866 2867 /* 2868 * Locate the node for sender, track state, and then 2869 * pass the (referenced) node up to the 802.11 layer 2870 * for its use. 2871 */ 2872 ni = ieee80211_find_rxnode(ic, 2873 mtod(m, const struct ieee80211_frame_min *)); 2874 2875 /* 2876 * Track rx rssi and do any rx antenna management. 2877 */ 2878 an = ATH_NODE(ni); 2879 ATH_RSSI_LPF(an->an_avgrssi, ds->ds_rxstat.rs_rssi); 2880 if (sc->sc_diversity) { 2881 /* 2882 * When using fast diversity, change the default rx 2883 * antenna if diversity chooses the other antenna 3 2884 * times in a row. 2885 */ 2886 if (sc->sc_defant != ds->ds_rxstat.rs_antenna) { 2887 if (++sc->sc_rxotherant >= 3) 2888 ath_setdefantenna(sc, 2889 ds->ds_rxstat.rs_antenna); 2890 } else 2891 sc->sc_rxotherant = 0; 2892 } 2893 2894 /* 2895 * Send frame up for processing. 2896 */ 2897 type = ieee80211_input(ic, m, ni, 2898 ds->ds_rxstat.rs_rssi, ds->ds_rxstat.rs_tstamp); 2899 2900 if (sc->sc_softled) { 2901 /* 2902 * Blink for any data frame. Otherwise do a 2903 * heartbeat-style blink when idle. The latter 2904 * is mainly for station mode where we depend on 2905 * periodic beacon frames to trigger the poll event. 2906 */ 2907 if (type == IEEE80211_FC0_TYPE_DATA) { 2908 sc->sc_rxrate = ds->ds_rxstat.rs_rate; 2909 ath_led_event(sc, ATH_LED_RX); 2910 } else if (ticks - sc->sc_ledevent >= sc->sc_ledidle) 2911 ath_led_event(sc, ATH_LED_POLL); 2912 } 2913 2914 /* 2915 * Reclaim node reference. 2916 */ 2917 ieee80211_free_node(ni); 2918 rx_next: 2919 STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 2920 } while (ath_rxbuf_init(sc, bf) == 0); 2921 2922 /* rx signal state monitoring */ 2923 ath_hal_rxmonitor(ah, &ATH_NODE(ic->ic_bss)->an_halstats); 2924 2925 #ifdef __NetBSD__ 2926 /* XXX Why isn't this necessary in FreeBSD? */ 2927 if ((ifp->if_flags & IFF_OACTIVE) == 0 && !IFQ_IS_EMPTY(&ifp->if_snd)) 2928 ath_start(ifp); 2929 #endif /* __NetBSD__ */ 2930 2931 NET_UNLOCK_GIANT(); /* XXX */ 2932 #undef PA2DESC 2933 } 2934 2935 /* 2936 * Setup a h/w transmit queue. 2937 */ 2938 static struct ath_txq * 2939 ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 2940 { 2941 #define N(a) (sizeof(a)/sizeof(a[0])) 2942 struct ath_hal *ah = sc->sc_ah; 2943 HAL_TXQ_INFO qi; 2944 int qnum; 2945 2946 memset(&qi, 0, sizeof(qi)); 2947 qi.tqi_subtype = subtype; 2948 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 2949 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 2950 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 2951 /* 2952 * Enable interrupts only for EOL and DESC conditions. 2953 * We mark tx descriptors to receive a DESC interrupt 2954 * when a tx queue gets deep; otherwise waiting for the 2955 * EOL to reap descriptors. Note that this is done to 2956 * reduce interrupt load and this only defers reaping 2957 * descriptors, never transmitting frames. Aside from 2958 * reducing interrupts this also permits more concurrency. 2959 * The only potential downside is if the tx queue backs 2960 * up in which case the top half of the kernel may backup 2961 * due to a lack of tx descriptors. 2962 */ 2963 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | TXQ_FLAG_TXDESCINT_ENABLE; 2964 qnum = ath_hal_setuptxqueue(ah, qtype, &qi); 2965 if (qnum == -1) { 2966 /* 2967 * NB: don't print a message, this happens 2968 * normally on parts with too few tx queues 2969 */ 2970 return NULL; 2971 } 2972 if (qnum >= N(sc->sc_txq)) { 2973 device_printf(sc->sc_dev, 2974 "hal qnum %u out of range, max %zu!\n", 2975 qnum, N(sc->sc_txq)); 2976 ath_hal_releasetxqueue(ah, qnum); 2977 return NULL; 2978 } 2979 if (!ATH_TXQ_SETUP(sc, qnum)) { 2980 struct ath_txq *txq = &sc->sc_txq[qnum]; 2981 2982 txq->axq_qnum = qnum; 2983 txq->axq_depth = 0; 2984 txq->axq_intrcnt = 0; 2985 txq->axq_link = NULL; 2986 STAILQ_INIT(&txq->axq_q); 2987 ATH_TXQ_LOCK_INIT(sc, txq); 2988 sc->sc_txqsetup |= 1<<qnum; 2989 } 2990 return &sc->sc_txq[qnum]; 2991 #undef N 2992 } 2993 2994 /* 2995 * Setup a hardware data transmit queue for the specified 2996 * access control. The hal may not support all requested 2997 * queues in which case it will return a reference to a 2998 * previously setup queue. We record the mapping from ac's 2999 * to h/w queues for use by ath_tx_start and also track 3000 * the set of h/w queues being used to optimize work in the 3001 * transmit interrupt handler and related routines. 3002 */ 3003 static int 3004 ath_tx_setup(struct ath_softc *sc, int ac, int haltype) 3005 { 3006 #define N(a) (sizeof(a)/sizeof(a[0])) 3007 struct ath_txq *txq; 3008 3009 if (ac >= N(sc->sc_ac2q)) { 3010 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", 3011 ac, N(sc->sc_ac2q)); 3012 return 0; 3013 } 3014 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); 3015 if (txq != NULL) { 3016 sc->sc_ac2q[ac] = txq; 3017 return 1; 3018 } else 3019 return 0; 3020 #undef N 3021 } 3022 3023 /* 3024 * Update WME parameters for a transmit queue. 3025 */ 3026 static int 3027 ath_txq_update(struct ath_softc *sc, int ac) 3028 { 3029 #define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) 3030 #define ATH_TXOP_TO_US(v) (v<<5) 3031 struct ieee80211com *ic = &sc->sc_ic; 3032 struct ath_txq *txq = sc->sc_ac2q[ac]; 3033 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 3034 struct ath_hal *ah = sc->sc_ah; 3035 HAL_TXQ_INFO qi; 3036 3037 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); 3038 qi.tqi_aifs = wmep->wmep_aifsn; 3039 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 3040 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 3041 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit); 3042 3043 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { 3044 device_printf(sc->sc_dev, "unable to update hardware queue " 3045 "parameters for %s traffic!\n", 3046 ieee80211_wme_acnames[ac]); 3047 return 0; 3048 } else { 3049 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ 3050 return 1; 3051 } 3052 #undef ATH_TXOP_TO_US 3053 #undef ATH_EXPONENT_TO_VALUE 3054 } 3055 3056 /* 3057 * Callback from the 802.11 layer to update WME parameters. 3058 */ 3059 static int 3060 ath_wme_update(struct ieee80211com *ic) 3061 { 3062 struct ath_softc *sc = ic->ic_ifp->if_softc; 3063 3064 return !ath_txq_update(sc, WME_AC_BE) || 3065 !ath_txq_update(sc, WME_AC_BK) || 3066 !ath_txq_update(sc, WME_AC_VI) || 3067 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; 3068 } 3069 3070 /* 3071 * Reclaim resources for a setup queue. 3072 */ 3073 static void 3074 ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 3075 { 3076 3077 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); 3078 ATH_TXQ_LOCK_DESTROY(txq); 3079 sc->sc_txqsetup &= ~(1<<txq->axq_qnum); 3080 } 3081 3082 /* 3083 * Reclaim all tx queue resources. 3084 */ 3085 static void 3086 ath_tx_cleanup(struct ath_softc *sc) 3087 { 3088 int i; 3089 3090 ATH_TXBUF_LOCK_DESTROY(sc); 3091 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 3092 if (ATH_TXQ_SETUP(sc, i)) 3093 ath_tx_cleanupq(sc, &sc->sc_txq[i]); 3094 } 3095 3096 /* 3097 * Defragment an mbuf chain, returning at most maxfrags separate 3098 * mbufs+clusters. If this is not possible NULL is returned and 3099 * the original mbuf chain is left in it's present (potentially 3100 * modified) state. We use two techniques: collapsing consecutive 3101 * mbufs and replacing consecutive mbufs by a cluster. 3102 */ 3103 static struct mbuf * 3104 ath_defrag(struct mbuf *m0, int how, int maxfrags) 3105 { 3106 struct mbuf *m, *n, *n2, **prev; 3107 u_int curfrags; 3108 3109 /* 3110 * Calculate the current number of frags. 3111 */ 3112 curfrags = 0; 3113 for (m = m0; m != NULL; m = m->m_next) 3114 curfrags++; 3115 /* 3116 * First, try to collapse mbufs. Note that we always collapse 3117 * towards the front so we don't need to deal with moving the 3118 * pkthdr. This may be suboptimal if the first mbuf has much 3119 * less data than the following. 3120 */ 3121 m = m0; 3122 again: 3123 for (;;) { 3124 n = m->m_next; 3125 if (n == NULL) 3126 break; 3127 if ((m->m_flags & M_RDONLY) == 0 && 3128 n->m_len < M_TRAILINGSPACE(m)) { 3129 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len, 3130 n->m_len); 3131 m->m_len += n->m_len; 3132 m->m_next = n->m_next; 3133 m_free(n); 3134 if (--curfrags <= maxfrags) 3135 return m0; 3136 } else 3137 m = n; 3138 } 3139 KASSERT(maxfrags > 1, 3140 ("maxfrags %u, but normal collapse failed", maxfrags)); 3141 /* 3142 * Collapse consecutive mbufs to a cluster. 3143 */ 3144 prev = &m0->m_next; /* NB: not the first mbuf */ 3145 while ((n = *prev) != NULL) { 3146 if ((n2 = n->m_next) != NULL && 3147 n->m_len + n2->m_len < MCLBYTES) { 3148 m = m_getcl(how, MT_DATA, 0); 3149 if (m == NULL) 3150 goto bad; 3151 bcopy(mtod(n, void *), mtod(m, void *), n->m_len); 3152 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len, 3153 n2->m_len); 3154 m->m_len = n->m_len + n2->m_len; 3155 m->m_next = n2->m_next; 3156 *prev = m; 3157 m_free(n); 3158 m_free(n2); 3159 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */ 3160 return m0; 3161 /* 3162 * Still not there, try the normal collapse 3163 * again before we allocate another cluster. 3164 */ 3165 goto again; 3166 } 3167 prev = &n->m_next; 3168 } 3169 /* 3170 * No place where we can collapse to a cluster; punt. 3171 * This can occur if, for example, you request 2 frags 3172 * but the packet requires that both be clusters (we 3173 * never reallocate the first mbuf to avoid moving the 3174 * packet header). 3175 */ 3176 bad: 3177 return NULL; 3178 } 3179 3180 static int 3181 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, 3182 struct mbuf *m0) 3183 { 3184 #define CTS_DURATION \ 3185 ath_hal_computetxtime(ah, rt, IEEE80211_ACK_LEN, cix, AH_TRUE) 3186 #define updateCTSForBursting(_ah, _ds, _txq) \ 3187 ath_hal_updateCTSForBursting(_ah, _ds, \ 3188 _txq->axq_linkbuf != NULL ? _txq->axq_linkbuf->bf_desc : NULL, \ 3189 _txq->axq_lastdsWithCTS, _txq->axq_gatingds, \ 3190 txopLimit, CTS_DURATION) 3191 struct ieee80211com *ic = &sc->sc_ic; 3192 struct ath_hal *ah = sc->sc_ah; 3193 struct ifnet *ifp = &sc->sc_if; 3194 const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams; 3195 int i, error, iswep, ismcast, keyix, hdrlen, pktlen, try0; 3196 u_int8_t rix, txrate, ctsrate; 3197 u_int8_t cix = 0xff; /* NB: silence compiler */ 3198 struct ath_desc *ds, *ds0; 3199 struct ath_txq *txq; 3200 struct ieee80211_frame *wh; 3201 u_int subtype, flags, ctsduration; 3202 HAL_PKT_TYPE atype; 3203 const HAL_RATE_TABLE *rt; 3204 HAL_BOOL shortPreamble; 3205 struct ath_node *an; 3206 struct mbuf *m; 3207 u_int pri; 3208 3209 wh = mtod(m0, struct ieee80211_frame *); 3210 iswep = wh->i_fc[1] & IEEE80211_FC1_WEP; 3211 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 3212 hdrlen = ieee80211_anyhdrsize(wh); 3213 /* 3214 * Packet length must not include any 3215 * pad bytes; deduct them here. 3216 */ 3217 pktlen = m0->m_pkthdr.len - (hdrlen & 3); 3218 3219 if (iswep) { 3220 const struct ieee80211_cipher *cip; 3221 struct ieee80211_key *k; 3222 3223 /* 3224 * Construct the 802.11 header+trailer for an encrypted 3225 * frame. The only reason this can fail is because of an 3226 * unknown or unsupported cipher/key type. 3227 */ 3228 k = ieee80211_crypto_encap(ic, ni, m0); 3229 if (k == NULL) { 3230 /* 3231 * This can happen when the key is yanked after the 3232 * frame was queued. Just discard the frame; the 3233 * 802.11 layer counts failures and provides 3234 * debugging/diagnostics. 3235 */ 3236 m_freem(m0); 3237 return EIO; 3238 } 3239 /* 3240 * Adjust the packet + header lengths for the crypto 3241 * additions and calculate the h/w key index. When 3242 * a s/w mic is done the frame will have had any mic 3243 * added to it prior to entry so skb->len above will 3244 * account for it. Otherwise we need to add it to the 3245 * packet length. 3246 */ 3247 cip = k->wk_cipher; 3248 hdrlen += cip->ic_header; 3249 pktlen += cip->ic_header + cip->ic_trailer; 3250 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0) 3251 pktlen += cip->ic_miclen; 3252 keyix = k->wk_keyix; 3253 3254 /* packet header may have moved, reset our local pointer */ 3255 wh = mtod(m0, struct ieee80211_frame *); 3256 } else 3257 keyix = HAL_TXKEYIX_INVALID; 3258 3259 pktlen += IEEE80211_CRC_LEN; 3260 3261 /* 3262 * Load the DMA map so any coalescing is done. This 3263 * also calculates the number of descriptors we need. 3264 */ 3265 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m0, 3266 BUS_DMA_NOWAIT); 3267 if (error == EFBIG) { 3268 /* XXX packet requires too many descriptors */ 3269 bf->bf_nseg = ATH_TXDESC+1; 3270 } else if (error != 0) { 3271 sc->sc_stats.ast_tx_busdma++; 3272 m_freem(m0); 3273 return error; 3274 } 3275 /* 3276 * Discard null packets and check for packets that 3277 * require too many TX descriptors. We try to convert 3278 * the latter to a cluster. 3279 */ 3280 if (error == EFBIG) { /* too many desc's, linearize */ 3281 sc->sc_stats.ast_tx_linear++; 3282 m = ath_defrag(m0, M_DONTWAIT, ATH_TXDESC); 3283 if (m == NULL) { 3284 m_freem(m0); 3285 sc->sc_stats.ast_tx_nombuf++; 3286 return ENOMEM; 3287 } 3288 m0 = m; 3289 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m0, 3290 BUS_DMA_NOWAIT); 3291 if (error != 0) { 3292 sc->sc_stats.ast_tx_busdma++; 3293 m_freem(m0); 3294 return error; 3295 } 3296 KASSERT(bf->bf_nseg <= ATH_TXDESC, 3297 ("too many segments after defrag; nseg %u", bf->bf_nseg)); 3298 } else if (bf->bf_nseg == 0) { /* null packet, discard */ 3299 sc->sc_stats.ast_tx_nodata++; 3300 m_freem(m0); 3301 return EIO; 3302 } 3303 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n", __func__, m0, pktlen); 3304 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 3305 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 3306 bf->bf_m = m0; 3307 bf->bf_node = ni; /* NB: held reference */ 3308 3309 /* setup descriptors */ 3310 ds = bf->bf_desc; 3311 rt = sc->sc_currates; 3312 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 3313 3314 /* 3315 * NB: the 802.11 layer marks whether or not we should 3316 * use short preamble based on the current mode and 3317 * negotiated parameters. 3318 */ 3319 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 3320 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 3321 shortPreamble = AH_TRUE; 3322 sc->sc_stats.ast_tx_shortpre++; 3323 } else { 3324 shortPreamble = AH_FALSE; 3325 } 3326 3327 an = ATH_NODE(ni); 3328 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 3329 /* 3330 * Calculate Atheros packet type from IEEE80211 packet header, 3331 * setup for rate calculations, and select h/w transmit queue. 3332 */ 3333 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 3334 case IEEE80211_FC0_TYPE_MGT: 3335 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3336 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) 3337 atype = HAL_PKT_TYPE_BEACON; 3338 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 3339 atype = HAL_PKT_TYPE_PROBE_RESP; 3340 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) 3341 atype = HAL_PKT_TYPE_ATIM; 3342 else 3343 atype = HAL_PKT_TYPE_NORMAL; /* XXX */ 3344 rix = 0; /* XXX lowest rate */ 3345 try0 = ATH_TXMAXTRY; 3346 if (shortPreamble) 3347 txrate = an->an_tx_mgtratesp; 3348 else 3349 txrate = an->an_tx_mgtrate; 3350 /* NB: force all management frames to highest queue */ 3351 if (ni->ni_flags & IEEE80211_NODE_QOS) { 3352 /* NB: force all management frames to highest queue */ 3353 pri = WME_AC_VO; 3354 } else 3355 pri = WME_AC_BE; 3356 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 3357 break; 3358 case IEEE80211_FC0_TYPE_CTL: 3359 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ 3360 rix = 0; /* XXX lowest rate */ 3361 try0 = ATH_TXMAXTRY; 3362 if (shortPreamble) 3363 txrate = an->an_tx_mgtratesp; 3364 else 3365 txrate = an->an_tx_mgtrate; 3366 /* NB: force all ctl frames to highest queue */ 3367 if (ni->ni_flags & IEEE80211_NODE_QOS) { 3368 /* NB: force all ctl frames to highest queue */ 3369 pri = WME_AC_VO; 3370 } else 3371 pri = WME_AC_BE; 3372 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 3373 break; 3374 case IEEE80211_FC0_TYPE_DATA: 3375 atype = HAL_PKT_TYPE_NORMAL; /* default */ 3376 /* 3377 * Data frames; consult the rate control module. 3378 */ 3379 ath_rate_findrate(sc, an, shortPreamble, pktlen, 3380 &rix, &try0, &txrate); 3381 sc->sc_txrate = txrate; /* for LED blinking */ 3382 /* 3383 * Default all non-QoS traffic to the background queue. 3384 */ 3385 if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) { 3386 pri = M_WME_GETAC(m0); 3387 if (cap->cap_wmeParams[pri].wmep_noackPolicy) { 3388 flags |= HAL_TXDESC_NOACK; 3389 sc->sc_stats.ast_tx_noack++; 3390 } 3391 } else 3392 pri = WME_AC_BE; 3393 break; 3394 default: 3395 if_printf(ifp, "bogus frame type 0x%x (%s)\n", 3396 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); 3397 /* XXX statistic */ 3398 m_freem(m0); 3399 return EIO; 3400 } 3401 txq = sc->sc_ac2q[pri]; 3402 3403 /* 3404 * When servicing one or more stations in power-save mode 3405 * multicast frames must be buffered until after the beacon. 3406 * We use the CAB queue for that. 3407 */ 3408 if (ismcast && ic->ic_ps_sta) { 3409 txq = sc->sc_cabq; 3410 /* XXX? more bit in 802.11 frame header */ 3411 } 3412 3413 /* 3414 * Calculate miscellaneous flags. 3415 */ 3416 if (ismcast) { 3417 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 3418 sc->sc_stats.ast_tx_noack++; 3419 } else if (pktlen > ic->ic_rtsthreshold) { 3420 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 3421 cix = rt->info[rix].controlRate; 3422 sc->sc_stats.ast_tx_rts++; 3423 } 3424 3425 /* 3426 * If 802.11g protection is enabled, determine whether 3427 * to use RTS/CTS or just CTS. Note that this is only 3428 * done for OFDM unicast frames. 3429 */ 3430 if ((ic->ic_flags & IEEE80211_F_USEPROT) && 3431 rt->info[rix].phy == IEEE80211_T_OFDM && 3432 (flags & HAL_TXDESC_NOACK) == 0) { 3433 /* XXX fragments must use CCK rates w/ protection */ 3434 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 3435 flags |= HAL_TXDESC_RTSENA; 3436 else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 3437 flags |= HAL_TXDESC_CTSENA; 3438 cix = rt->info[sc->sc_protrix].controlRate; 3439 sc->sc_stats.ast_tx_protect++; 3440 } 3441 3442 /* 3443 * Calculate duration. This logically belongs in the 802.11 3444 * layer but it lacks sufficient information to calculate it. 3445 */ 3446 if ((flags & HAL_TXDESC_NOACK) == 0 && 3447 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 3448 u_int16_t dur; 3449 /* 3450 * XXX not right with fragmentation. 3451 */ 3452 if (shortPreamble) 3453 dur = rt->info[rix].spAckDuration; 3454 else 3455 dur = rt->info[rix].lpAckDuration; 3456 *(u_int16_t *)wh->i_dur = htole16(dur); 3457 } 3458 3459 /* 3460 * Calculate RTS/CTS rate and duration if needed. 3461 */ 3462 ctsduration = 0; 3463 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) { 3464 /* 3465 * CTS transmit rate is derived from the transmit rate 3466 * by looking in the h/w rate table. We must also factor 3467 * in whether or not a short preamble is to be used. 3468 */ 3469 /* NB: cix is set above where RTS/CTS is enabled */ 3470 KASSERT(cix != 0xff, ("cix not setup")); 3471 ctsrate = rt->info[cix].rateCode; 3472 /* 3473 * Compute the transmit duration based on the frame 3474 * size and the size of an ACK frame. We call into the 3475 * HAL to do the computation since it depends on the 3476 * characteristics of the actual PHY being used. 3477 * 3478 * NB: CTS is assumed the same size as an ACK so we can 3479 * use the precalculated ACK durations. 3480 */ 3481 if (shortPreamble) { 3482 ctsrate |= rt->info[cix].shortPreamble; 3483 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 3484 ctsduration += rt->info[cix].spAckDuration; 3485 ctsduration += ath_hal_computetxtime(ah, 3486 rt, pktlen, rix, AH_TRUE); 3487 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 3488 ctsduration += rt->info[cix].spAckDuration; 3489 } else { 3490 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 3491 ctsduration += rt->info[cix].lpAckDuration; 3492 ctsduration += ath_hal_computetxtime(ah, 3493 rt, pktlen, rix, AH_FALSE); 3494 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 3495 ctsduration += rt->info[cix].lpAckDuration; 3496 } 3497 /* 3498 * Must disable multi-rate retry when using RTS/CTS. 3499 */ 3500 try0 = ATH_TXMAXTRY; 3501 } else 3502 ctsrate = 0; 3503 3504 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 3505 ieee80211_dump_pkt(mtod(m0, caddr_t), m0->m_len, 3506 sc->sc_hwmap[txrate].ieeerate, -1); 3507 3508 if (ic->ic_rawbpf) 3509 bpf_mtap(ic->ic_rawbpf, m0); 3510 if (sc->sc_drvbpf) { 3511 sc->sc_tx_th.wt_flags = sc->sc_hwmap[txrate].txflags; 3512 if (iswep) 3513 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 3514 sc->sc_tx_th.wt_rate = sc->sc_hwmap[txrate].ieeerate; 3515 sc->sc_tx_th.wt_txpower = ni->ni_txpower; 3516 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 3517 3518 bpf_mtap2(sc->sc_drvbpf, 3519 &sc->sc_tx_th, sc->sc_tx_th_len, m0); 3520 } 3521 3522 /* 3523 * Determine if a tx interrupt should be generated for 3524 * this descriptor. We take a tx interrupt to reap 3525 * descriptors when the h/w hits an EOL condition or 3526 * when the descriptor is specifically marked to generate 3527 * an interrupt. We periodically mark descriptors in this 3528 * way to insure timely replenishing of the supply needed 3529 * for sending frames. Defering interrupts reduces system 3530 * load and potentially allows more concurrent work to be 3531 * done but if done to aggressively can cause senders to 3532 * backup. 3533 * 3534 * NB: use >= to deal with sc_txintrperiod changing 3535 * dynamically through sysctl. 3536 */ 3537 if (flags & HAL_TXDESC_INTREQ) { 3538 txq->axq_intrcnt = 0; 3539 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { 3540 flags |= HAL_TXDESC_INTREQ; 3541 txq->axq_intrcnt = 0; 3542 } 3543 3544 /* 3545 * Formulate first tx descriptor with tx controls. 3546 */ 3547 /* XXX check return value? */ 3548 ath_hal_setuptxdesc(ah, ds 3549 , pktlen /* packet length */ 3550 , hdrlen /* header length */ 3551 , atype /* Atheros packet type */ 3552 , ni->ni_txpower /* txpower */ 3553 , txrate, try0 /* series 0 rate/tries */ 3554 , keyix /* key cache index */ 3555 , sc->sc_txantenna /* antenna mode */ 3556 , flags /* flags */ 3557 , ctsrate /* rts/cts rate */ 3558 , ctsduration /* rts/cts duration */ 3559 ); 3560 /* 3561 * Setup the multi-rate retry state only when we're 3562 * going to use it. This assumes ath_hal_setuptxdesc 3563 * initializes the descriptors (so we don't have to) 3564 * when the hardware supports multi-rate retry and 3565 * we don't use it. 3566 */ 3567 if (try0 != ATH_TXMAXTRY) 3568 ath_rate_setupxtxdesc(sc, an, ds, shortPreamble, rix); 3569 3570 /* 3571 * Fillin the remainder of the descriptor info. 3572 */ 3573 ds0 = ds; 3574 for (i = 0; i < bf->bf_nseg; i++, ds++) { 3575 ds->ds_data = bf->bf_segs[i].ds_addr; 3576 if (i == bf->bf_nseg - 1) 3577 ds->ds_link = 0; 3578 else 3579 ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1); 3580 ath_hal_filltxdesc(ah, ds 3581 , bf->bf_segs[i].ds_len /* segment length */ 3582 , i == 0 /* first segment */ 3583 , i == bf->bf_nseg - 1 /* last segment */ 3584 , ds0 /* first descriptor */ 3585 ); 3586 DPRINTF(sc, ATH_DEBUG_XMIT, 3587 "%s: %d: %08x %08x %08x %08x %08x %08x\n", 3588 __func__, i, ds->ds_link, ds->ds_data, 3589 ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]); 3590 } 3591 /* 3592 * Insert the frame on the outbound list and 3593 * pass it on to the hardware. 3594 */ 3595 ATH_TXQ_LOCK(txq); 3596 if (flags & (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) { 3597 u_int32_t txopLimit = IEEE80211_TXOP_TO_US( 3598 cap->cap_wmeParams[pri].wmep_txopLimit); 3599 /* 3600 * When bursting, potentially extend the CTS duration 3601 * of a previously queued frame to cover this frame 3602 * and not exceed the txopLimit. If that can be done 3603 * then disable RTS/CTS on this frame since it's now 3604 * covered (burst extension). Otherwise we must terminate 3605 * the burst before this frame goes out so as not to 3606 * violate the WME parameters. All this is complicated 3607 * as we need to update the state of packets on the 3608 * (live) hardware queue. The logic is buried in the hal 3609 * because it's highly chip-specific. 3610 */ 3611 if (txopLimit != 0) { 3612 sc->sc_stats.ast_tx_ctsburst++; 3613 if (updateCTSForBursting(ah, ds0, txq) == 0) { 3614 /* 3615 * This frame was not covered by RTS/CTS from 3616 * the previous frame in the burst; update the 3617 * descriptor pointers so this frame is now 3618 * treated as the last frame for extending a 3619 * burst. 3620 */ 3621 txq->axq_lastdsWithCTS = ds0; 3622 /* set gating Desc to final desc */ 3623 txq->axq_gatingds = 3624 (struct ath_desc *)txq->axq_link; 3625 } else 3626 sc->sc_stats.ast_tx_ctsext++; 3627 } 3628 } 3629 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 3630 if (txq->axq_link == NULL) { 3631 ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 3632 DPRINTF(sc, ATH_DEBUG_XMIT, 3633 "%s: TXDP[%u] = %p (%p) depth %d\n", __func__, 3634 txq->axq_qnum, (caddr_t)bf->bf_daddr, bf->bf_desc, 3635 txq->axq_depth); 3636 } else { 3637 *txq->axq_link = bf->bf_daddr; 3638 DPRINTF(sc, ATH_DEBUG_XMIT, 3639 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, 3640 txq->axq_qnum, txq->axq_link, 3641 (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth); 3642 } 3643 txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link; 3644 /* 3645 * The CAB queue is started from the SWBA handler since 3646 * frames only go out on DTIM and to avoid possible races. 3647 */ 3648 if (txq != sc->sc_cabq) 3649 ath_hal_txstart(ah, txq->axq_qnum); 3650 ATH_TXQ_UNLOCK(txq); 3651 3652 return 0; 3653 #undef updateCTSForBursting 3654 #undef CTS_DURATION 3655 } 3656 3657 /* 3658 * Process completed xmit descriptors from the specified queue. 3659 */ 3660 static void 3661 ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) 3662 { 3663 struct ath_hal *ah = sc->sc_ah; 3664 struct ieee80211com *ic = &sc->sc_ic; 3665 struct ath_buf *bf; 3666 struct ath_desc *ds, *ds0; 3667 struct ieee80211_node *ni; 3668 struct ath_node *an; 3669 int sr, lr, pri; 3670 HAL_STATUS status; 3671 3672 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", 3673 __func__, txq->axq_qnum, 3674 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 3675 txq->axq_link); 3676 for (;;) { 3677 ATH_TXQ_LOCK(txq); 3678 txq->axq_intrcnt = 0; /* reset periodic desc intr count */ 3679 bf = STAILQ_FIRST(&txq->axq_q); 3680 if (bf == NULL) { 3681 txq->axq_link = NULL; 3682 ATH_TXQ_UNLOCK(txq); 3683 break; 3684 } 3685 ds0 = &bf->bf_desc[0]; 3686 ds = &bf->bf_desc[bf->bf_nseg - 1]; 3687 status = ath_hal_txprocdesc(ah, ds); 3688 #ifdef AR_DEBUG 3689 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 3690 ath_printtxbuf(bf, status == HAL_OK); 3691 #endif 3692 if (status == HAL_EINPROGRESS) { 3693 ATH_TXQ_UNLOCK(txq); 3694 break; 3695 } 3696 if (ds0 == txq->axq_lastdsWithCTS) 3697 txq->axq_lastdsWithCTS = NULL; 3698 if (ds == txq->axq_gatingds) 3699 txq->axq_gatingds = NULL; 3700 ATH_TXQ_REMOVE_HEAD(txq, bf_list); 3701 ATH_TXQ_UNLOCK(txq); 3702 3703 ni = bf->bf_node; 3704 if (ni != NULL) { 3705 an = ATH_NODE(ni); 3706 if (ds->ds_txstat.ts_status == 0) { 3707 u_int8_t txant = ds->ds_txstat.ts_antenna; 3708 sc->sc_stats.ast_ant_tx[txant]++; 3709 sc->sc_ant_tx[txant]++; 3710 if (ds->ds_txstat.ts_rate & HAL_TXSTAT_ALTRATE) 3711 sc->sc_stats.ast_tx_altrate++; 3712 sc->sc_stats.ast_tx_rssi = 3713 ds->ds_txstat.ts_rssi; 3714 ATH_RSSI_LPF(an->an_halstats.ns_avgtxrssi, 3715 ds->ds_txstat.ts_rssi); 3716 pri = M_WME_GETAC(bf->bf_m); 3717 if (pri >= WME_AC_VO) 3718 ic->ic_wme.wme_hipri_traffic++; 3719 ni->ni_inact = ni->ni_inact_reload; 3720 } else { 3721 if (ds->ds_txstat.ts_status & HAL_TXERR_XRETRY) 3722 sc->sc_stats.ast_tx_xretries++; 3723 if (ds->ds_txstat.ts_status & HAL_TXERR_FIFO) 3724 sc->sc_stats.ast_tx_fifoerr++; 3725 if (ds->ds_txstat.ts_status & HAL_TXERR_FILT) 3726 sc->sc_stats.ast_tx_filtered++; 3727 } 3728 sr = ds->ds_txstat.ts_shortretry; 3729 lr = ds->ds_txstat.ts_longretry; 3730 sc->sc_stats.ast_tx_shortretry += sr; 3731 sc->sc_stats.ast_tx_longretry += lr; 3732 /* 3733 * Hand the descriptor to the rate control algorithm. 3734 */ 3735 ath_rate_tx_complete(sc, an, ds, ds0); 3736 /* 3737 * Reclaim reference to node. 3738 * 3739 * NB: the node may be reclaimed here if, for example 3740 * this is a DEAUTH message that was sent and the 3741 * node was timed out due to inactivity. 3742 */ 3743 ieee80211_free_node(ni); 3744 } 3745 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 3746 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 3747 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3748 m_freem(bf->bf_m); 3749 bf->bf_m = NULL; 3750 bf->bf_node = NULL; 3751 3752 ATH_TXBUF_LOCK(sc); 3753 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 3754 ATH_TXBUF_UNLOCK(sc); 3755 } 3756 } 3757 3758 /* 3759 * Deferred processing of transmit interrupt; special-cased 3760 * for a single hardware transmit queue (e.g. 5210 and 5211). 3761 */ 3762 static void 3763 ath_tx_proc_q0(void *arg, int npending) 3764 { 3765 struct ath_softc *sc = arg; 3766 struct ifnet *ifp = &sc->sc_if; 3767 3768 ath_tx_processq(sc, &sc->sc_txq[0]); 3769 ath_tx_processq(sc, sc->sc_cabq); 3770 ifp->if_flags &= ~IFF_OACTIVE; 3771 sc->sc_tx_timer = 0; 3772 3773 if (sc->sc_softled) 3774 ath_led_event(sc, ATH_LED_TX); 3775 3776 ath_start(ifp); 3777 } 3778 3779 /* 3780 * Deferred processing of transmit interrupt; special-cased 3781 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). 3782 */ 3783 static void 3784 ath_tx_proc_q0123(void *arg, int npending) 3785 { 3786 struct ath_softc *sc = arg; 3787 struct ifnet *ifp = &sc->sc_if; 3788 3789 /* 3790 * Process each active queue. 3791 */ 3792 ath_tx_processq(sc, &sc->sc_txq[0]); 3793 ath_tx_processq(sc, &sc->sc_txq[1]); 3794 ath_tx_processq(sc, &sc->sc_txq[2]); 3795 ath_tx_processq(sc, &sc->sc_txq[3]); 3796 ath_tx_processq(sc, sc->sc_cabq); 3797 3798 ifp->if_flags &= ~IFF_OACTIVE; 3799 sc->sc_tx_timer = 0; 3800 3801 if (sc->sc_softled) 3802 ath_led_event(sc, ATH_LED_TX); 3803 3804 ath_start(ifp); 3805 } 3806 3807 /* 3808 * Deferred processing of transmit interrupt. 3809 */ 3810 static void 3811 ath_tx_proc(void *arg, int npending) 3812 { 3813 struct ath_softc *sc = arg; 3814 struct ifnet *ifp = &sc->sc_if; 3815 int i; 3816 3817 /* 3818 * Process each active queue. 3819 */ 3820 /* XXX faster to read ISR_S0_S and ISR_S1_S to determine q's? */ 3821 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 3822 if (ATH_TXQ_SETUP(sc, i)) 3823 ath_tx_processq(sc, &sc->sc_txq[i]); 3824 3825 ifp->if_flags &= ~IFF_OACTIVE; 3826 sc->sc_tx_timer = 0; 3827 3828 if (sc->sc_softled) 3829 ath_led_event(sc, ATH_LED_TX); 3830 3831 ath_start(ifp); 3832 } 3833 3834 static void 3835 ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) 3836 { 3837 struct ath_hal *ah = sc->sc_ah; 3838 struct ieee80211_node *ni; 3839 struct ath_buf *bf; 3840 3841 /* 3842 * NB: this assumes output has been stopped and 3843 * we do not need to block ath_tx_tasklet 3844 */ 3845 for (;;) { 3846 ATH_TXQ_LOCK(txq); 3847 bf = STAILQ_FIRST(&txq->axq_q); 3848 if (bf == NULL) { 3849 txq->axq_link = NULL; 3850 ATH_TXQ_UNLOCK(txq); 3851 break; 3852 } 3853 ATH_TXQ_REMOVE_HEAD(txq, bf_list); 3854 ATH_TXQ_UNLOCK(txq); 3855 #ifdef AR_DEBUG 3856 if (sc->sc_debug & ATH_DEBUG_RESET) 3857 ath_printtxbuf(bf, 3858 ath_hal_txprocdesc(ah, bf->bf_desc) == HAL_OK); 3859 #endif /* AR_DEBUG */ 3860 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3861 m_freem(bf->bf_m); 3862 bf->bf_m = NULL; 3863 ni = bf->bf_node; 3864 bf->bf_node = NULL; 3865 if (ni != NULL) { 3866 /* 3867 * Reclaim node reference. 3868 */ 3869 ieee80211_free_node(ni); 3870 } 3871 ATH_TXBUF_LOCK(sc); 3872 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 3873 ATH_TXBUF_UNLOCK(sc); 3874 } 3875 } 3876 3877 static void 3878 ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) 3879 { 3880 struct ath_hal *ah = sc->sc_ah; 3881 3882 (void) ath_hal_stoptxdma(ah, txq->axq_qnum); 3883 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 3884 __func__, txq->axq_qnum, 3885 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), 3886 txq->axq_link); 3887 } 3888 3889 /* 3890 * Drain the transmit queues and reclaim resources. 3891 */ 3892 static void 3893 ath_draintxq(struct ath_softc *sc) 3894 { 3895 struct ath_hal *ah = sc->sc_ah; 3896 struct ifnet *ifp = &sc->sc_if; 3897 int i; 3898 3899 /* XXX return value */ 3900 if (!sc->sc_invalid) { 3901 /* don't touch the hardware if marked invalid */ 3902 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); 3903 DPRINTF(sc, ATH_DEBUG_RESET, 3904 "%s: beacon queue %p\n", __func__, 3905 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq)); 3906 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 3907 if (ATH_TXQ_SETUP(sc, i)) 3908 ath_tx_stopdma(sc, &sc->sc_txq[i]); 3909 } 3910 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 3911 if (ATH_TXQ_SETUP(sc, i)) 3912 ath_tx_draintxq(sc, &sc->sc_txq[i]); 3913 ifp->if_flags &= ~IFF_OACTIVE; 3914 sc->sc_tx_timer = 0; 3915 } 3916 3917 /* 3918 * Disable the receive h/w in preparation for a reset. 3919 */ 3920 static void 3921 ath_stoprecv(struct ath_softc *sc) 3922 { 3923 #define PA2DESC(_sc, _pa) \ 3924 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 3925 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 3926 struct ath_hal *ah = sc->sc_ah; 3927 3928 ath_hal_stoppcurecv(ah); /* disable PCU */ 3929 ath_hal_setrxfilter(ah, 0); /* clear recv filter */ 3930 ath_hal_stopdmarecv(ah); /* disable DMA engine */ 3931 DELAY(3000); /* 3ms is long enough for 1 frame */ 3932 #ifdef AR_DEBUG 3933 if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) { 3934 struct ath_buf *bf; 3935 3936 printf("%s: rx queue %p, link %p\n", __func__, 3937 (caddr_t)(uintptr_t) ath_hal_getrxbuf(ah), sc->sc_rxlink); 3938 STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 3939 struct ath_desc *ds = bf->bf_desc; 3940 HAL_STATUS status = ath_hal_rxprocdesc(ah, ds, 3941 bf->bf_daddr, PA2DESC(sc, ds->ds_link)); 3942 if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL)) 3943 ath_printrxbuf(bf, status == HAL_OK); 3944 } 3945 } 3946 #endif 3947 sc->sc_rxlink = NULL; /* just in case */ 3948 #undef PA2DESC 3949 } 3950 3951 /* 3952 * Enable the receive h/w following a reset. 3953 */ 3954 static int 3955 ath_startrecv(struct ath_softc *sc) 3956 { 3957 struct ath_hal *ah = sc->sc_ah; 3958 struct ath_buf *bf; 3959 3960 sc->sc_rxlink = NULL; 3961 STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 3962 int error = ath_rxbuf_init(sc, bf); 3963 if (error != 0) { 3964 DPRINTF(sc, ATH_DEBUG_RECV, 3965 "%s: ath_rxbuf_init failed %d\n", 3966 __func__, error); 3967 return error; 3968 } 3969 } 3970 3971 bf = STAILQ_FIRST(&sc->sc_rxbuf); 3972 ath_hal_putrxbuf(ah, bf->bf_daddr); 3973 ath_hal_rxena(ah); /* enable recv descriptors */ 3974 ath_mode_init(sc); /* set filters, etc. */ 3975 ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */ 3976 return 0; 3977 } 3978 3979 /* 3980 * Update internal state after a channel change. 3981 */ 3982 static void 3983 ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) 3984 { 3985 struct ieee80211com *ic = &sc->sc_ic; 3986 enum ieee80211_phymode mode; 3987 u_int16_t flags; 3988 3989 /* 3990 * Change channels and update the h/w rate map 3991 * if we're switching; e.g. 11a to 11b/g. 3992 */ 3993 mode = ieee80211_chan2mode(ic, chan); 3994 if (mode != sc->sc_curmode) 3995 ath_setcurmode(sc, mode); 3996 /* 3997 * Update BPF state. NB: ethereal et. al. don't handle 3998 * merged flags well so pick a unique mode for their use. 3999 */ 4000 if (IEEE80211_IS_CHAN_A(chan)) 4001 flags = IEEE80211_CHAN_A; 4002 /* XXX 11g schizophrenia */ 4003 else if (IEEE80211_IS_CHAN_G(chan) || 4004 IEEE80211_IS_CHAN_PUREG(chan)) 4005 flags = IEEE80211_CHAN_G; 4006 else 4007 flags = IEEE80211_CHAN_B; 4008 if (IEEE80211_IS_CHAN_T(chan)) 4009 flags |= IEEE80211_CHAN_TURBO; 4010 sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq = 4011 htole16(chan->ic_freq); 4012 sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags = 4013 htole16(flags); 4014 } 4015 4016 /* 4017 * Set/change channels. If the channel is really being changed, 4018 * it's done by reseting the chip. To accomplish this we must 4019 * first cleanup any pending DMA, then restart stuff after a la 4020 * ath_init. 4021 */ 4022 static int 4023 ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 4024 { 4025 struct ath_hal *ah = sc->sc_ah; 4026 struct ieee80211com *ic = &sc->sc_ic; 4027 HAL_CHANNEL hchan; 4028 4029 /* 4030 * Convert to a HAL channel description with 4031 * the flags constrained to reflect the current 4032 * operating mode. 4033 */ 4034 hchan.channel = chan->ic_freq; 4035 hchan.channelFlags = ath_chan2flags(ic, chan); 4036 4037 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz) -> %u (%u MHz)\n", 4038 __func__, 4039 ath_hal_mhz2ieee(sc->sc_curchan.channel, 4040 sc->sc_curchan.channelFlags), 4041 sc->sc_curchan.channel, 4042 ath_hal_mhz2ieee(hchan.channel, hchan.channelFlags), hchan.channel); 4043 if (hchan.channel != sc->sc_curchan.channel || 4044 hchan.channelFlags != sc->sc_curchan.channelFlags) { 4045 HAL_STATUS status; 4046 4047 /* 4048 * To switch channels clear any pending DMA operations; 4049 * wait long enough for the RX fifo to drain, reset the 4050 * hardware at the new frequency, and then re-enable 4051 * the relevant bits of the h/w. 4052 */ 4053 ath_hal_intrset(ah, 0); /* disable interrupts */ 4054 ath_draintxq(sc); /* clear pending tx frames */ 4055 ath_stoprecv(sc); /* turn off frame recv */ 4056 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, AH_TRUE, &status)) { 4057 if_printf(&sc->sc_if, "ath_chan_set: unable to reset " 4058 "channel %u (%u Mhz)\n", 4059 ieee80211_chan2ieee(ic, chan), chan->ic_freq); 4060 return EIO; 4061 } 4062 sc->sc_curchan = hchan; 4063 ath_update_txpow(sc); /* update tx power state */ 4064 4065 /* 4066 * Re-enable rx framework. 4067 */ 4068 if (ath_startrecv(sc) != 0) { 4069 if_printf(&sc->sc_if, 4070 "ath_chan_set: unable to restart recv logic\n"); 4071 return EIO; 4072 } 4073 4074 /* 4075 * Change channels and update the h/w rate map 4076 * if we're switching; e.g. 11a to 11b/g. 4077 */ 4078 ic->ic_ibss_chan = chan; 4079 ath_chan_change(sc, chan); 4080 4081 /* 4082 * Re-enable interrupts. 4083 */ 4084 ath_hal_intrset(ah, sc->sc_imask); 4085 } 4086 return 0; 4087 } 4088 4089 static void 4090 ath_next_scan(void *arg) 4091 { 4092 struct ath_softc *sc = arg; 4093 struct ieee80211com *ic = &sc->sc_ic; 4094 int s; 4095 4096 /* don't call ath_start w/o network interrupts blocked */ 4097 s = splnet(); 4098 4099 if (ic->ic_state == IEEE80211_S_SCAN) 4100 ieee80211_next_scan(ic); 4101 splx(s); 4102 } 4103 4104 /* 4105 * Periodically recalibrate the PHY to account 4106 * for temperature/environment changes. 4107 */ 4108 static void 4109 ath_calibrate(void *arg) 4110 { 4111 struct ath_softc *sc = arg; 4112 struct ath_hal *ah = sc->sc_ah; 4113 4114 sc->sc_stats.ast_per_cal++; 4115 4116 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: channel %u/%x\n", 4117 __func__, sc->sc_curchan.channel, sc->sc_curchan.channelFlags); 4118 4119 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { 4120 /* 4121 * Rfgain is out of bounds, reset the chip 4122 * to load new gain values. 4123 */ 4124 sc->sc_stats.ast_per_rfgain++; 4125 ath_reset(&sc->sc_if); 4126 } 4127 if (!ath_hal_calibrate(ah, &sc->sc_curchan)) { 4128 DPRINTF(sc, ATH_DEBUG_ANY, 4129 "%s: calibration of channel %u failed\n", 4130 __func__, sc->sc_curchan.channel); 4131 sc->sc_stats.ast_per_calfail++; 4132 } 4133 callout_reset(&sc->sc_cal_ch, ath_calinterval * hz, ath_calibrate, sc); 4134 } 4135 4136 static int 4137 ath_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg) 4138 { 4139 struct ifnet *ifp = ic->ic_ifp; 4140 struct ath_softc *sc = ifp->if_softc; 4141 struct ath_hal *ah = sc->sc_ah; 4142 struct ieee80211_node *ni; 4143 int i, error; 4144 const u_int8_t *bssid; 4145 u_int32_t rfilt; 4146 static const HAL_LED_STATE leds[] = { 4147 HAL_LED_INIT, /* IEEE80211_S_INIT */ 4148 HAL_LED_SCAN, /* IEEE80211_S_SCAN */ 4149 HAL_LED_AUTH, /* IEEE80211_S_AUTH */ 4150 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ 4151 HAL_LED_RUN, /* IEEE80211_S_RUN */ 4152 }; 4153 4154 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, 4155 ieee80211_state_name[ic->ic_state], 4156 ieee80211_state_name[nstate]); 4157 4158 callout_stop(&sc->sc_scan_ch); 4159 callout_stop(&sc->sc_cal_ch); 4160 ath_hal_setledstate(ah, leds[nstate]); /* set LED */ 4161 4162 if (nstate == IEEE80211_S_INIT) { 4163 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 4164 /* 4165 * NB: disable interrupts so we don't rx frames. 4166 */ 4167 ath_hal_intrset(ah, sc->sc_imask &~ ~HAL_INT_GLOBAL); 4168 /* 4169 * Notify the rate control algorithm. 4170 */ 4171 ath_rate_newstate(sc, nstate); 4172 goto done; 4173 } 4174 ni = ic->ic_bss; 4175 error = ath_chan_set(sc, ni->ni_chan); 4176 if (error != 0) 4177 goto bad; 4178 rfilt = ath_calcrxfilter(sc, nstate); 4179 if (nstate == IEEE80211_S_SCAN) 4180 bssid = ifp->if_broadcastaddr; 4181 else 4182 bssid = ni->ni_bssid; 4183 ath_hal_setrxfilter(ah, rfilt); 4184 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s\n", 4185 __func__, rfilt, ether_sprintf(bssid)); 4186 4187 if (nstate == IEEE80211_S_RUN && ic->ic_opmode == IEEE80211_M_STA) 4188 ath_hal_setassocid(ah, bssid, ni->ni_associd); 4189 else 4190 ath_hal_setassocid(ah, bssid, 0); 4191 if (ic->ic_flags & IEEE80211_F_PRIVACY) { 4192 for (i = 0; i < IEEE80211_WEP_NKID; i++) 4193 if (ath_hal_keyisvalid(ah, i)) 4194 ath_hal_keysetmac(ah, i, bssid); 4195 } 4196 4197 /* 4198 * Notify the rate control algorithm so rates 4199 * are setup should ath_beacon_alloc be called. 4200 */ 4201 ath_rate_newstate(sc, nstate); 4202 4203 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 4204 /* nothing to do */; 4205 } else if (nstate == IEEE80211_S_RUN) { 4206 DPRINTF(sc, ATH_DEBUG_STATE, 4207 "%s(RUN): ic_flags=0x%08x iv=%d bssid=%s " 4208 "capinfo=0x%04x chan=%d\n" 4209 , __func__ 4210 , ic->ic_flags 4211 , ni->ni_intval 4212 , ether_sprintf(ni->ni_bssid) 4213 , ni->ni_capinfo 4214 , ieee80211_chan2ieee(ic, ni->ni_chan)); 4215 4216 /* 4217 * Allocate and setup the beacon frame for AP or adhoc mode. 4218 */ 4219 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 4220 ic->ic_opmode == IEEE80211_M_IBSS) { 4221 /* 4222 * Stop any previous beacon DMA. This may be 4223 * necessary, for example, when an ibss merge 4224 * causes reconfiguration; there will be a state 4225 * transition from RUN->RUN that means we may 4226 * be called with beacon transmission active. 4227 */ 4228 ath_hal_stoptxdma(ah, sc->sc_bhalq); 4229 ath_beacon_free(sc); 4230 error = ath_beacon_alloc(sc, ni); 4231 if (error != 0) 4232 goto bad; 4233 } 4234 4235 /* 4236 * Configure the beacon and sleep timers. 4237 */ 4238 ath_beacon_config(sc); 4239 } else { 4240 ath_hal_intrset(ah, 4241 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); 4242 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 4243 } 4244 done: 4245 /* 4246 * Invoke the parent method to complete the work. 4247 */ 4248 error = sc->sc_newstate(ic, nstate, arg); 4249 /* 4250 * Finally, start any timers. 4251 */ 4252 if (nstate == IEEE80211_S_RUN) { 4253 /* start periodic recalibration timer */ 4254 callout_reset(&sc->sc_cal_ch, ath_calinterval * hz, 4255 ath_calibrate, sc); 4256 } else if (nstate == IEEE80211_S_SCAN) { 4257 /* start ap/neighbor scan timer */ 4258 callout_reset(&sc->sc_scan_ch, (ath_dwelltime * hz) / 1000, 4259 ath_next_scan, sc); 4260 } 4261 bad: 4262 return error; 4263 } 4264 4265 /* 4266 * Setup driver-specific state for a newly associated node. 4267 * Note that we're called also on a re-associate, the isnew 4268 * param tells us if this is the first time or not. 4269 */ 4270 static void 4271 ath_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, int isnew) 4272 { 4273 struct ath_softc *sc = ic->ic_ifp->if_softc; 4274 4275 ath_rate_newassoc(sc, ATH_NODE(ni), isnew); 4276 } 4277 4278 static int 4279 ath_getchannels(struct ath_softc *sc, u_int cc, 4280 HAL_BOOL outdoor, HAL_BOOL xchanmode) 4281 { 4282 struct ieee80211com *ic = &sc->sc_ic; 4283 struct ifnet *ifp = &sc->sc_if; 4284 struct ath_hal *ah = sc->sc_ah; 4285 HAL_CHANNEL *chans; 4286 int i, ix, nchan; 4287 4288 chans = malloc(IEEE80211_CHAN_MAX * sizeof(HAL_CHANNEL), 4289 M_TEMP, M_NOWAIT); 4290 if (chans == NULL) { 4291 if_printf(ifp, "unable to allocate channel table\n"); 4292 return ENOMEM; 4293 } 4294 if (!ath_hal_init_channels(ah, chans, IEEE80211_CHAN_MAX, &nchan, 4295 cc, HAL_MODE_ALL, outdoor, xchanmode)) { 4296 u_int32_t rd; 4297 4298 ath_hal_getregdomain(ah, &rd); 4299 if_printf(ifp, "unable to collect channel list from hal; " 4300 "regdomain likely %u country code %u\n", rd, cc); 4301 free(chans, M_TEMP); 4302 return EINVAL; 4303 } 4304 4305 /* 4306 * Convert HAL channels to ieee80211 ones and insert 4307 * them in the table according to their channel number. 4308 */ 4309 for (i = 0; i < nchan; i++) { 4310 HAL_CHANNEL *c = &chans[i]; 4311 ix = ath_hal_mhz2ieee(c->channel, c->channelFlags); 4312 if (ix > IEEE80211_CHAN_MAX) { 4313 if_printf(ifp, "bad hal channel %u (%u/%x) ignored\n", 4314 ix, c->channel, c->channelFlags); 4315 continue; 4316 } 4317 DPRINTF(sc, ATH_DEBUG_ANY, 4318 "%s: HAL channel %d/%d freq %d flags %#04x idx %d\n", 4319 sc->sc_dev.dv_xname, i, nchan, c->channel, c->channelFlags, 4320 ix); 4321 /* NB: flags are known to be compatible */ 4322 if (ic->ic_channels[ix].ic_freq == 0) { 4323 ic->ic_channels[ix].ic_freq = c->channel; 4324 ic->ic_channels[ix].ic_flags = c->channelFlags; 4325 } else { 4326 /* channels overlap; e.g. 11g and 11b */ 4327 ic->ic_channels[ix].ic_flags |= c->channelFlags; 4328 } 4329 } 4330 free(chans, M_TEMP); 4331 return 0; 4332 } 4333 4334 static void 4335 ath_led_done(void *arg) 4336 { 4337 struct ath_softc *sc = arg; 4338 4339 sc->sc_blinking = 0; 4340 } 4341 4342 /* 4343 * Turn the LED off: flip the pin and then set a timer so no 4344 * update will happen for the specified duration. 4345 */ 4346 static void 4347 ath_led_off(void *arg) 4348 { 4349 struct ath_softc *sc = arg; 4350 4351 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !sc->sc_ledon); 4352 callout_reset(&sc->sc_ledtimer, sc->sc_ledoff, ath_led_done, sc); 4353 } 4354 4355 /* 4356 * Blink the LED according to the specified on/off times. 4357 */ 4358 static void 4359 ath_led_blink(struct ath_softc *sc, int on, int off) 4360 { 4361 DPRINTF(sc, ATH_DEBUG_LED, "%s: on %u off %u\n", __func__, on, off); 4362 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, sc->sc_ledon); 4363 sc->sc_blinking = 1; 4364 sc->sc_ledoff = off; 4365 callout_reset(&sc->sc_ledtimer, on, ath_led_off, sc); 4366 } 4367 4368 static void 4369 ath_led_event(struct ath_softc *sc, int event) 4370 { 4371 4372 sc->sc_ledevent = ticks; /* time of last event */ 4373 if (sc->sc_blinking) /* don't interrupt active blink */ 4374 return; 4375 switch (event) { 4376 case ATH_LED_POLL: 4377 ath_led_blink(sc, sc->sc_hwmap[0].ledon, 4378 sc->sc_hwmap[0].ledoff); 4379 break; 4380 case ATH_LED_TX: 4381 ath_led_blink(sc, sc->sc_hwmap[sc->sc_txrate].ledon, 4382 sc->sc_hwmap[sc->sc_txrate].ledoff); 4383 break; 4384 case ATH_LED_RX: 4385 ath_led_blink(sc, sc->sc_hwmap[sc->sc_rxrate].ledon, 4386 sc->sc_hwmap[sc->sc_rxrate].ledoff); 4387 break; 4388 } 4389 } 4390 4391 static void 4392 ath_update_txpow(struct ath_softc *sc) 4393 { 4394 struct ieee80211com *ic = &sc->sc_ic; 4395 struct ath_hal *ah = sc->sc_ah; 4396 u_int32_t txpow; 4397 4398 if (sc->sc_curtxpow != ic->ic_txpowlimit) { 4399 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); 4400 /* read back in case value is clamped */ 4401 ath_hal_gettxpowlimit(ah, &txpow); 4402 ic->ic_txpowlimit = sc->sc_curtxpow = txpow; 4403 } 4404 /* 4405 * Fetch max tx power level for status requests. 4406 */ 4407 ath_hal_getmaxtxpow(sc->sc_ah, &txpow); 4408 ic->ic_bss->ni_txpower = txpow; 4409 } 4410 4411 static int 4412 ath_rate_setup(struct ath_softc *sc, u_int mode) 4413 { 4414 struct ath_hal *ah = sc->sc_ah; 4415 struct ieee80211com *ic = &sc->sc_ic; 4416 const HAL_RATE_TABLE *rt; 4417 struct ieee80211_rateset *rs; 4418 int i, maxrates; 4419 4420 switch (mode) { 4421 case IEEE80211_MODE_11A: 4422 sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_11A); 4423 break; 4424 case IEEE80211_MODE_11B: 4425 sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_11B); 4426 break; 4427 case IEEE80211_MODE_11G: 4428 sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_11G); 4429 break; 4430 case IEEE80211_MODE_TURBO_A: 4431 sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_TURBO); 4432 break; 4433 case IEEE80211_MODE_TURBO_G: 4434 sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_108G); 4435 break; 4436 default: 4437 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", 4438 __func__, mode); 4439 return 0; 4440 } 4441 rt = sc->sc_rates[mode]; 4442 if (rt == NULL) 4443 return 0; 4444 if (rt->rateCount > IEEE80211_RATE_MAXSIZE) { 4445 DPRINTF(sc, ATH_DEBUG_ANY, 4446 "%s: rate table too small (%u > %u)\n", 4447 __func__, rt->rateCount, IEEE80211_RATE_MAXSIZE); 4448 maxrates = IEEE80211_RATE_MAXSIZE; 4449 } else 4450 maxrates = rt->rateCount; 4451 rs = &ic->ic_sup_rates[mode]; 4452 for (i = 0; i < maxrates; i++) 4453 rs->rs_rates[i] = rt->info[i].dot11Rate; 4454 rs->rs_nrates = maxrates; 4455 return 1; 4456 } 4457 4458 static void 4459 ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 4460 { 4461 #define N(a) (sizeof(a)/sizeof(a[0])) 4462 /* NB: on/off times from the Atheros NDIS driver, w/ permission */ 4463 static const struct { 4464 u_int rate; /* tx/rx 802.11 rate */ 4465 u_int16_t timeOn; /* LED on time (ms) */ 4466 u_int16_t timeOff; /* LED off time (ms) */ 4467 } blinkrates[] = { 4468 { 108, 40, 10 }, 4469 { 96, 44, 11 }, 4470 { 72, 50, 13 }, 4471 { 48, 57, 14 }, 4472 { 36, 67, 16 }, 4473 { 24, 80, 20 }, 4474 { 22, 100, 25 }, 4475 { 18, 133, 34 }, 4476 { 12, 160, 40 }, 4477 { 10, 200, 50 }, 4478 { 6, 240, 58 }, 4479 { 4, 267, 66 }, 4480 { 2, 400, 100 }, 4481 { 0, 500, 130 }, 4482 }; 4483 const HAL_RATE_TABLE *rt; 4484 int i, j; 4485 4486 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 4487 rt = sc->sc_rates[mode]; 4488 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 4489 for (i = 0; i < rt->rateCount; i++) 4490 sc->sc_rixmap[rt->info[i].dot11Rate & IEEE80211_RATE_VAL] = i; 4491 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); 4492 for (i = 0; i < 32; i++) { 4493 u_int8_t ix = rt->rateCodeToIndex[i]; 4494 if (ix == 0xff) { 4495 sc->sc_hwmap[i].ledon = (500 * hz) / 1000; 4496 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; 4497 continue; 4498 } 4499 sc->sc_hwmap[i].ieeerate = 4500 rt->info[ix].dot11Rate & IEEE80211_RATE_VAL; 4501 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; 4502 if (rt->info[ix].shortPreamble || 4503 rt->info[ix].phy == IEEE80211_T_OFDM) 4504 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; 4505 /* NB: receive frames include FCS */ 4506 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags | 4507 IEEE80211_RADIOTAP_F_FCS; 4508 /* setup blink rate table to avoid per-packet lookup */ 4509 for (j = 0; j < N(blinkrates)-1; j++) 4510 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) 4511 break; 4512 /* NB: this uses the last entry if the rate isn't found */ 4513 /* XXX beware of overlow */ 4514 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; 4515 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; 4516 } 4517 sc->sc_currates = rt; 4518 sc->sc_curmode = mode; 4519 /* 4520 * All protection frames are transmited at 2Mb/s for 4521 * 11g, otherwise at 1Mb/s. 4522 * XXX select protection rate index from rate table. 4523 */ 4524 sc->sc_protrix = (mode == IEEE80211_MODE_11G ? 1 : 0); 4525 /* NB: caller is responsible for reseting rate control state */ 4526 #undef N 4527 } 4528 4529 #ifdef AR_DEBUG 4530 static void 4531 ath_printrxbuf(struct ath_buf *bf, int done) 4532 { 4533 struct ath_desc *ds; 4534 int i; 4535 4536 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { 4537 printf("R%d (%p %p) %08x %08x %08x %08x %08x %08x %c\n", 4538 i, ds, (struct ath_desc *)bf->bf_daddr + i, 4539 ds->ds_link, ds->ds_data, 4540 ds->ds_ctl0, ds->ds_ctl1, 4541 ds->ds_hw[0], ds->ds_hw[1], 4542 !done ? ' ' : (ds->ds_rxstat.rs_status == 0) ? '*' : '!'); 4543 } 4544 } 4545 4546 static void 4547 ath_printtxbuf(struct ath_buf *bf, int done) 4548 { 4549 struct ath_desc *ds; 4550 int i; 4551 4552 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { 4553 printf("T%d (%p %p) %08x %08x %08x %08x %08x %08x %08x %08x %c\n", 4554 i, ds, (struct ath_desc *)bf->bf_daddr + i, 4555 ds->ds_link, ds->ds_data, 4556 ds->ds_ctl0, ds->ds_ctl1, 4557 ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3], 4558 !done ? ' ' : (ds->ds_txstat.ts_status == 0) ? '*' : '!'); 4559 } 4560 } 4561 #endif /* AR_DEBUG */ 4562 4563 static void 4564 ath_watchdog(struct ifnet *ifp) 4565 { 4566 struct ath_softc *sc = ifp->if_softc; 4567 struct ieee80211com *ic = &sc->sc_ic; 4568 4569 ifp->if_timer = 0; 4570 if ((ifp->if_flags & IFF_RUNNING) == 0 || sc->sc_invalid) 4571 return; 4572 if (sc->sc_tx_timer) { 4573 if (--sc->sc_tx_timer == 0) { 4574 if_printf(ifp, "device timeout\n"); 4575 ath_reset(ifp); 4576 ifp->if_oerrors++; 4577 sc->sc_stats.ast_watchdog++; 4578 } else 4579 ifp->if_timer = 1; 4580 } 4581 ieee80211_watchdog(ic); 4582 } 4583 4584 /* 4585 * Diagnostic interface to the HAL. This is used by various 4586 * tools to do things like retrieve register contents for 4587 * debugging. The mechanism is intentionally opaque so that 4588 * it can change frequently w/o concern for compatiblity. 4589 */ 4590 static int 4591 ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad) 4592 { 4593 struct ath_hal *ah = sc->sc_ah; 4594 u_int id = ad->ad_id & ATH_DIAG_ID; 4595 void *indata = NULL; 4596 void *outdata = NULL; 4597 u_int32_t insize = ad->ad_in_size; 4598 u_int32_t outsize = ad->ad_out_size; 4599 int error = 0; 4600 4601 if (ad->ad_id & ATH_DIAG_IN) { 4602 /* 4603 * Copy in data. 4604 */ 4605 indata = malloc(insize, M_TEMP, M_NOWAIT); 4606 if (indata == NULL) { 4607 error = ENOMEM; 4608 goto bad; 4609 } 4610 error = copyin(ad->ad_in_data, indata, insize); 4611 if (error) 4612 goto bad; 4613 } 4614 if (ad->ad_id & ATH_DIAG_DYN) { 4615 /* 4616 * Allocate a buffer for the results (otherwise the HAL 4617 * returns a pointer to a buffer where we can read the 4618 * results). Note that we depend on the HAL leaving this 4619 * pointer for us to use below in reclaiming the buffer; 4620 * may want to be more defensive. 4621 */ 4622 outdata = malloc(outsize, M_TEMP, M_NOWAIT); 4623 if (outdata == NULL) { 4624 error = ENOMEM; 4625 goto bad; 4626 } 4627 } 4628 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) { 4629 if (outsize < ad->ad_out_size) 4630 ad->ad_out_size = outsize; 4631 if (outdata != NULL) 4632 error = copyout(outdata, ad->ad_out_data, 4633 ad->ad_out_size); 4634 } else { 4635 error = EINVAL; 4636 } 4637 bad: 4638 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) 4639 free(indata, M_TEMP); 4640 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) 4641 free(outdata, M_TEMP); 4642 return error; 4643 } 4644 4645 static int 4646 ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 4647 { 4648 #define IS_RUNNING(ifp) \ 4649 ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) == (IFF_RUNNING|IFF_UP)) 4650 struct ath_softc *sc = ifp->if_softc; 4651 struct ieee80211com *ic = &sc->sc_ic; 4652 struct ifreq *ifr = (struct ifreq *)data; 4653 int error = 0; 4654 4655 ATH_LOCK(sc); 4656 switch (cmd) { 4657 case SIOCSIFFLAGS: 4658 if (IS_RUNNING(ifp)) { 4659 /* 4660 * To avoid rescanning another access point, 4661 * do not call ath_init() here. Instead, 4662 * only reflect promisc mode settings. 4663 */ 4664 ath_mode_init(sc); 4665 } else if (ifp->if_flags & IFF_UP) { 4666 /* 4667 * Beware of being called during attach/detach 4668 * to reset promiscuous mode. In that case we 4669 * will still be marked UP but not RUNNING. 4670 * However trying to re-init the interface 4671 * is the wrong thing to do as we've already 4672 * torn down much of our state. There's 4673 * probably a better way to deal with this. 4674 */ 4675 if (!sc->sc_invalid && ic->ic_bss != NULL) 4676 ath_init(ifp); /* XXX lose error */ 4677 } else 4678 ath_stop_locked(ifp, 1); 4679 break; 4680 case SIOCADDMULTI: 4681 case SIOCDELMULTI: 4682 error = (cmd == SIOCADDMULTI) ? 4683 ether_addmulti(ifr, &sc->sc_ec) : 4684 ether_delmulti(ifr, &sc->sc_ec); 4685 if (error == ENETRESET) { 4686 if (ifp->if_flags & IFF_RUNNING) 4687 ath_mode_init(sc); 4688 error = 0; 4689 } 4690 break; 4691 case SIOCGATHSTATS: 4692 /* NB: embed these numbers to get a consistent view */ 4693 sc->sc_stats.ast_tx_packets = ifp->if_opackets; 4694 sc->sc_stats.ast_rx_packets = ifp->if_ipackets; 4695 sc->sc_stats.ast_rx_rssi = ieee80211_getrssi(ic); 4696 ATH_UNLOCK(sc); 4697 /* 4698 * NB: Drop the softc lock in case of a page fault; 4699 * we'll accept any potential inconsisentcy in the 4700 * statistics. The alternative is to copy the data 4701 * to a local structure. 4702 */ 4703 return copyout(&sc->sc_stats, 4704 ifr->ifr_data, sizeof (sc->sc_stats)); 4705 case SIOCGATHDIAG: 4706 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr); 4707 break; 4708 default: 4709 error = ieee80211_ioctl(ic, cmd, data); 4710 if (error == ENETRESET) { 4711 if (IS_RUNNING(ifp) && 4712 ic->ic_roaming != IEEE80211_ROAMING_MANUAL) 4713 ath_init(ifp); /* XXX lose error */ 4714 error = 0; 4715 } 4716 if (error == ERESTART) 4717 error = IS_RUNNING(ifp) ? ath_reset(ifp) : 0; 4718 break; 4719 } 4720 ATH_UNLOCK(sc); 4721 return error; 4722 #undef IS_RUNNING 4723 } 4724 4725 static void 4726 ath_bpfattach(struct ath_softc *sc) 4727 { 4728 struct ifnet *ifp = &sc->sc_if; 4729 4730 bpfattach2(ifp, DLT_IEEE802_11_RADIO, 4731 sizeof(struct ieee80211_frame) + sizeof(sc->sc_tx_th), 4732 &sc->sc_drvbpf); 4733 /* 4734 * Initialize constant fields. 4735 * XXX make header lengths a multiple of 32-bits so subsequent 4736 * headers are properly aligned; this is a kludge to keep 4737 * certain applications happy. 4738 * 4739 * NB: the channel is setup each time we transition to the 4740 * RUN state to avoid filling it in for each frame. 4741 */ 4742 sc->sc_tx_th_len = roundup(sizeof(sc->sc_tx_th), sizeof(u_int32_t)); 4743 sc->sc_tx_th.wt_ihdr.it_len = htole16(sc->sc_tx_th_len); 4744 sc->sc_tx_th.wt_ihdr.it_present = htole32(ATH_TX_RADIOTAP_PRESENT); 4745 4746 sc->sc_rx_th_len = roundup(sizeof(sc->sc_rx_th), sizeof(u_int32_t)); 4747 sc->sc_rx_th.wr_ihdr.it_len = htole16(sc->sc_rx_th_len); 4748 sc->sc_rx_th.wr_ihdr.it_present = htole32(ATH_RX_RADIOTAP_PRESENT); 4749 } 4750 4751 /* 4752 * Announce various information on device/driver attach. 4753 */ 4754 static void 4755 ath_announce(struct ath_softc *sc) 4756 { 4757 #define HAL_MODE_DUALBAND (HAL_MODE_11A|HAL_MODE_11B) 4758 struct ifnet *ifp = &sc->sc_if; 4759 struct ath_hal *ah = sc->sc_ah; 4760 u_int modes, cc; 4761 4762 if_printf(ifp, "mac %d.%d phy %d.%d", 4763 ah->ah_macVersion, ah->ah_macRev, 4764 ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); 4765 /* 4766 * Print radio revision(s). We check the wireless modes 4767 * to avoid falsely printing revs for inoperable parts. 4768 * Dual-band radio revs are returned in the 5Ghz rev number. 4769 */ 4770 ath_hal_getcountrycode(ah, &cc); 4771 modes = ath_hal_getwirelessmodes(ah, cc); 4772 if ((modes & HAL_MODE_DUALBAND) == HAL_MODE_DUALBAND) { 4773 if (ah->ah_analog5GhzRev && ah->ah_analog2GhzRev) 4774 printf(" 5ghz radio %d.%d 2ghz radio %d.%d", 4775 ah->ah_analog5GhzRev >> 4, 4776 ah->ah_analog5GhzRev & 0xf, 4777 ah->ah_analog2GhzRev >> 4, 4778 ah->ah_analog2GhzRev & 0xf); 4779 else 4780 printf(" radio %d.%d", ah->ah_analog5GhzRev >> 4, 4781 ah->ah_analog5GhzRev & 0xf); 4782 } else 4783 printf(" radio %d.%d", ah->ah_analog5GhzRev >> 4, 4784 ah->ah_analog5GhzRev & 0xf); 4785 printf("\n"); 4786 if (bootverbose) { 4787 int i; 4788 for (i = 0; i <= WME_AC_VO; i++) { 4789 struct ath_txq *txq = sc->sc_ac2q[i]; 4790 if_printf(ifp, "Use hw queue %u for %s traffic\n", 4791 txq->axq_qnum, ieee80211_wme_acnames[i]); 4792 } 4793 if_printf(ifp, "Use hw queue %u for CAB traffic\n", 4794 sc->sc_cabq->axq_qnum); 4795 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq); 4796 } 4797 #undef HAL_MODE_DUALBAND 4798 } 4799