1 /* $OpenBSD: ath.c,v 1.80 2009/01/21 21:53:59 grange Exp $ */ 2 /* $NetBSD: ath.c,v 1.37 2004/08/18 21:59:39 dyoung Exp $ */ 3 4 /*- 5 * Copyright (c) 2002-2004 Sam Leffler, Errno Consulting 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer, 13 * without modification. 14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 15 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 16 * redistribution must be conditioned upon including a substantially 17 * similar Disclaimer requirement for further binary redistribution. 18 * 3. Neither the names of the above-listed copyright holders nor the names 19 * of any contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 26 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 27 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 28 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 31 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGES. 34 */ 35 36 /* 37 * Driver for the Atheros Wireless LAN controller. 38 * 39 * This software is derived from work of Atsushi Onoe; his contribution 40 * is greatly appreciated. It has been modified for OpenBSD to use an 41 * open source HAL instead of the original binary-only HAL. 42 */ 43 44 #include "bpfilter.h" 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/sysctl.h> 49 #include <sys/mbuf.h> 50 #include <sys/malloc.h> 51 #include <sys/lock.h> 52 #include <sys/kernel.h> 53 #include <sys/socket.h> 54 #include <sys/sockio.h> 55 #include <sys/device.h> 56 #include <sys/errno.h> 57 #include <sys/timeout.h> 58 #include <sys/gpio.h> 59 60 #include <machine/endian.h> 61 #include <machine/bus.h> 62 63 #include <net/if.h> 64 #include <net/if_dl.h> 65 #include <net/if_media.h> 66 #include <net/if_arp.h> 67 #include <net/if_llc.h> 68 #if NBPFILTER > 0 69 #include <net/bpf.h> 70 #endif 71 #ifdef INET 72 #include <netinet/in.h> 73 #include <netinet/if_ether.h> 74 #endif 75 76 #include <net80211/ieee80211_var.h> 77 #include <net80211/ieee80211_rssadapt.h> 78 79 #include <dev/pci/pcidevs.h> 80 #include <dev/gpio/gpiovar.h> 81 82 #include <dev/ic/athvar.h> 83 84 int ath_init(struct ifnet *); 85 int ath_init1(struct ath_softc *); 86 int ath_intr1(struct ath_softc *); 87 void ath_stop(struct ifnet *); 88 void ath_start(struct ifnet *); 89 void ath_reset(struct ath_softc *, int); 90 int ath_media_change(struct ifnet *); 91 void ath_watchdog(struct ifnet *); 92 int ath_ioctl(struct ifnet *, u_long, caddr_t); 93 void ath_fatal_proc(void *, int); 94 void ath_rxorn_proc(void *, int); 95 void ath_bmiss_proc(void *, int); 96 u_int ath_chan2flags(struct ieee80211com *, struct ieee80211_channel *); 97 int ath_initkeytable(struct ath_softc *); 98 void ath_mcastfilter_accum(caddr_t, u_int32_t (*)[2]); 99 void ath_mcastfilter_compute(struct ath_softc *, u_int32_t (*)[2]); 100 u_int32_t ath_calcrxfilter(struct ath_softc *); 101 void ath_mode_init(struct ath_softc *); 102 #ifndef IEEE80211_STA_ONLY 103 int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *); 104 void ath_beacon_proc(void *, int); 105 void ath_beacon_free(struct ath_softc *); 106 #endif 107 void ath_beacon_config(struct ath_softc *); 108 int ath_desc_alloc(struct ath_softc *); 109 void ath_desc_free(struct ath_softc *); 110 struct ieee80211_node *ath_node_alloc(struct ieee80211com *); 111 struct mbuf *ath_getmbuf(int, int, u_int); 112 void ath_node_free(struct ieee80211com *, struct ieee80211_node *); 113 void ath_node_copy(struct ieee80211com *, 114 struct ieee80211_node *, const struct ieee80211_node *); 115 u_int8_t ath_node_getrssi(struct ieee80211com *, 116 const struct ieee80211_node *); 117 int ath_rxbuf_init(struct ath_softc *, struct ath_buf *); 118 void ath_rx_proc(void *, int); 119 int ath_tx_start(struct ath_softc *, struct ieee80211_node *, 120 struct ath_buf *, struct mbuf *); 121 void ath_tx_proc(void *, int); 122 int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 123 void ath_draintxq(struct ath_softc *); 124 void ath_stoprecv(struct ath_softc *); 125 int ath_startrecv(struct ath_softc *); 126 void ath_next_scan(void *); 127 int ath_set_slot_time(struct ath_softc *); 128 void ath_calibrate(void *); 129 void ath_ledstate(struct ath_softc *, enum ieee80211_state); 130 int ath_newstate(struct ieee80211com *, enum ieee80211_state, int); 131 void ath_newassoc(struct ieee80211com *, 132 struct ieee80211_node *, int); 133 int ath_getchannels(struct ath_softc *, HAL_BOOL outdoor, 134 HAL_BOOL xchanmode); 135 int ath_rate_setup(struct ath_softc *sc, u_int mode); 136 void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 137 void ath_rssadapt_updatenode(void *, struct ieee80211_node *); 138 void ath_rssadapt_updatestats(void *); 139 #ifndef IEEE80211_STA_ONLY 140 void ath_recv_mgmt(struct ieee80211com *, struct mbuf *, 141 struct ieee80211_node *, struct ieee80211_rxinfo *, int); 142 #endif 143 void ath_disable(struct ath_softc *); 144 void ath_power(int, void *); 145 146 int ath_gpio_attach(struct ath_softc *, u_int16_t); 147 int ath_gpio_pin_read(void *, int); 148 void ath_gpio_pin_write(void *, int, int); 149 void ath_gpio_pin_ctl(void *, int, int); 150 151 #ifdef AR_DEBUG 152 void ath_printrxbuf(struct ath_buf *, int); 153 void ath_printtxbuf(struct ath_buf *, int); 154 int ath_debug = 0; 155 #endif 156 157 int ath_dwelltime = 200; /* 5 channels/second */ 158 int ath_calinterval = 30; /* calibrate every 30 secs */ 159 int ath_outdoor = AH_TRUE; /* outdoor operation */ 160 int ath_xchanmode = AH_TRUE; /* enable extended channels */ 161 int ath_softcrypto = 1; /* 1=enable software crypto */ 162 163 struct cfdriver ath_cd = { 164 NULL, "ath", DV_IFNET 165 }; 166 167 #if 0 168 int 169 ath_activate(struct device *self, enum devact act) 170 { 171 struct ath_softc *sc = (struct ath_softc *)self; 172 int rv = 0, s; 173 174 s = splnet(); 175 switch (act) { 176 case DVACT_ACTIVATE: 177 break; 178 case DVACT_DEACTIVATE: 179 if_deactivate(&sc->sc_ic.ic_if); 180 break; 181 } 182 splx(s); 183 return rv; 184 } 185 #endif 186 187 int 188 ath_enable(struct ath_softc *sc) 189 { 190 if (ATH_IS_ENABLED(sc) == 0) { 191 if (sc->sc_enable != NULL && (*sc->sc_enable)(sc) != 0) { 192 printf("%s: device enable failed\n", 193 sc->sc_dev.dv_xname); 194 return (EIO); 195 } 196 sc->sc_flags |= ATH_ENABLED; 197 } 198 return (0); 199 } 200 201 void 202 ath_disable(struct ath_softc *sc) 203 { 204 if (!ATH_IS_ENABLED(sc)) 205 return; 206 if (sc->sc_disable != NULL) 207 (*sc->sc_disable)(sc); 208 sc->sc_flags &= ~ATH_ENABLED; 209 } 210 211 int 212 ath_attach(u_int16_t devid, struct ath_softc *sc) 213 { 214 struct ieee80211com *ic = &sc->sc_ic; 215 struct ifnet *ifp = &ic->ic_if; 216 struct ath_hal *ah; 217 HAL_STATUS status; 218 HAL_TXQ_INFO qinfo; 219 int error = 0, i; 220 221 DPRINTF(ATH_DEBUG_ANY, ("%s: devid 0x%x\n", __func__, devid)); 222 223 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 224 sc->sc_flags &= ~ATH_ATTACHED; /* make sure that it's not attached */ 225 226 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, 227 sc->sc_pcie, &status); 228 if (ah == NULL) { 229 printf("%s: unable to attach hardware; HAL status %d\n", 230 ifp->if_xname, status); 231 error = ENXIO; 232 goto bad; 233 } 234 if (ah->ah_abi != HAL_ABI_VERSION) { 235 printf("%s: HAL ABI mismatch detected (0x%x != 0x%x)\n", 236 ifp->if_xname, ah->ah_abi, HAL_ABI_VERSION); 237 error = ENXIO; 238 goto bad; 239 } 240 241 if (ah->ah_single_chip == AH_TRUE) { 242 printf("%s: AR%s %u.%u phy %u.%u rf %u.%u", ifp->if_xname, 243 ar5k_printver(AR5K_VERSION_DEV, devid), 244 ah->ah_mac_version, ah->ah_mac_revision, 245 ah->ah_phy_revision >> 4, ah->ah_phy_revision & 0xf, 246 ah->ah_radio_5ghz_revision >> 4, 247 ah->ah_radio_5ghz_revision & 0xf); 248 } else { 249 printf("%s: AR%s %u.%u phy %u.%u", ifp->if_xname, 250 ar5k_printver(AR5K_VERSION_VER, ah->ah_mac_srev), 251 ah->ah_mac_version, ah->ah_mac_revision, 252 ah->ah_phy_revision >> 4, ah->ah_phy_revision & 0xf); 253 printf(" rf%s %u.%u", 254 ar5k_printver(AR5K_VERSION_RAD, ah->ah_radio_5ghz_revision), 255 ah->ah_radio_5ghz_revision >> 4, 256 ah->ah_radio_5ghz_revision & 0xf); 257 if (ah->ah_radio_2ghz_revision != 0) { 258 printf(" rf%s %u.%u", 259 ar5k_printver(AR5K_VERSION_RAD, 260 ah->ah_radio_2ghz_revision), 261 ah->ah_radio_2ghz_revision >> 4, 262 ah->ah_radio_2ghz_revision & 0xf); 263 } 264 } 265 266 #if 0 267 if (ah->ah_radio_5ghz_revision >= AR5K_SREV_RAD_UNSUPP || 268 ah->ah_radio_2ghz_revision >= AR5K_SREV_RAD_UNSUPP) { 269 printf(": RF radio not supported\n"); 270 error = EOPNOTSUPP; 271 goto bad; 272 } 273 #endif 274 275 sc->sc_ah = ah; 276 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 277 278 /* 279 * Get regulation domain either stored in the EEPROM or defined 280 * as the default value. Some devices are known to have broken 281 * regulation domain values in their EEPROM. 282 */ 283 ath_hal_get_regdomain(ah, &ah->ah_regdomain); 284 285 /* 286 * Construct channel list based on the current regulation domain. 287 */ 288 error = ath_getchannels(sc, ath_outdoor, ath_xchanmode); 289 if (error != 0) 290 goto bad; 291 292 /* 293 * Setup rate tables for all potential media types. 294 */ 295 ath_rate_setup(sc, IEEE80211_MODE_11A); 296 ath_rate_setup(sc, IEEE80211_MODE_11B); 297 ath_rate_setup(sc, IEEE80211_MODE_11G); 298 ath_rate_setup(sc, IEEE80211_MODE_TURBO); 299 300 error = ath_desc_alloc(sc); 301 if (error != 0) { 302 printf(": failed to allocate descriptors: %d\n", error); 303 goto bad; 304 } 305 timeout_set(&sc->sc_scan_to, ath_next_scan, sc); 306 timeout_set(&sc->sc_cal_to, ath_calibrate, sc); 307 timeout_set(&sc->sc_rssadapt_to, ath_rssadapt_updatestats, sc); 308 309 #ifdef __FreeBSD__ 310 ATH_TXBUF_LOCK_INIT(sc); 311 ATH_TXQ_LOCK_INIT(sc); 312 #endif 313 314 ATH_TASK_INIT(&sc->sc_txtask, ath_tx_proc, sc); 315 ATH_TASK_INIT(&sc->sc_rxtask, ath_rx_proc, sc); 316 ATH_TASK_INIT(&sc->sc_rxorntask, ath_rxorn_proc, sc); 317 ATH_TASK_INIT(&sc->sc_fataltask, ath_fatal_proc, sc); 318 ATH_TASK_INIT(&sc->sc_bmisstask, ath_bmiss_proc, sc); 319 #ifndef IEEE80211_STA_ONLY 320 ATH_TASK_INIT(&sc->sc_swbatask, ath_beacon_proc, sc); 321 #endif 322 323 /* 324 * For now just pre-allocate one data queue and one 325 * beacon queue. Note that the HAL handles resetting 326 * them at the needed time. Eventually we'll want to 327 * allocate more tx queues for splitting management 328 * frames and for QOS support. 329 */ 330 sc->sc_bhalq = ath_hal_setup_tx_queue(ah, HAL_TX_QUEUE_BEACON, NULL); 331 if (sc->sc_bhalq == (u_int) -1) { 332 printf(": unable to setup a beacon xmit queue!\n"); 333 goto bad2; 334 } 335 336 for (i = 0; i <= HAL_TX_QUEUE_ID_DATA_MAX; i++) { 337 bzero(&qinfo, sizeof(qinfo)); 338 qinfo.tqi_type = HAL_TX_QUEUE_DATA; 339 qinfo.tqi_subtype = i; /* should be mapped to WME types */ 340 sc->sc_txhalq[i] = ath_hal_setup_tx_queue(ah, 341 HAL_TX_QUEUE_DATA, &qinfo); 342 if (sc->sc_txhalq[i] == (u_int) -1) { 343 printf(": unable to setup a data xmit queue %u!\n", i); 344 goto bad2; 345 } 346 } 347 348 ifp->if_softc = sc; 349 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST 350 | IFF_NOTRAILERS; 351 ifp->if_start = ath_start; 352 ifp->if_watchdog = ath_watchdog; 353 ifp->if_ioctl = ath_ioctl; 354 #ifndef __OpenBSD__ 355 ifp->if_init = ath_init; 356 ifp->if_stop = ath_stop; /* XXX */ 357 #endif 358 IFQ_SET_MAXLEN(&ifp->if_snd, ATH_TXBUF * ATH_TXDESC); 359 IFQ_SET_READY(&ifp->if_snd); 360 361 ic->ic_softc = sc; 362 ic->ic_newassoc = ath_newassoc; 363 /* XXX not right but it's not used anywhere important */ 364 ic->ic_phytype = IEEE80211_T_OFDM; 365 ic->ic_opmode = IEEE80211_M_STA; 366 ic->ic_caps = IEEE80211_C_WEP /* wep supported */ 367 | IEEE80211_C_PMGT /* power management */ 368 #ifndef IEEE80211_STA_ONLY 369 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 370 | IEEE80211_C_HOSTAP /* hostap mode */ 371 #endif 372 | IEEE80211_C_MONITOR /* monitor mode */ 373 | IEEE80211_C_SHSLOT /* short slot time supported */ 374 | IEEE80211_C_SHPREAMBLE; /* short preamble supported */ 375 if (ath_softcrypto) 376 ic->ic_caps |= IEEE80211_C_RSN; /* wpa/rsn supported */ 377 378 /* 379 * Not all chips have the VEOL support we want to use with 380 * IBSS beacon; check here for it. 381 */ 382 sc->sc_veol = ath_hal_has_veol(ah); 383 384 /* get mac address from hardware */ 385 ath_hal_get_lladdr(ah, ic->ic_myaddr); 386 387 if_attach(ifp); 388 389 /* call MI attach routine. */ 390 ieee80211_ifattach(ifp); 391 392 /* override default methods */ 393 ic->ic_node_alloc = ath_node_alloc; 394 sc->sc_node_free = ic->ic_node_free; 395 ic->ic_node_free = ath_node_free; 396 sc->sc_node_copy = ic->ic_node_copy; 397 ic->ic_node_copy = ath_node_copy; 398 ic->ic_node_getrssi = ath_node_getrssi; 399 sc->sc_newstate = ic->ic_newstate; 400 ic->ic_newstate = ath_newstate; 401 #ifndef IEEE80211_STA_ONLY 402 sc->sc_recv_mgmt = ic->ic_recv_mgmt; 403 ic->ic_recv_mgmt = ath_recv_mgmt; 404 #endif 405 ic->ic_max_rssi = AR5K_MAX_RSSI; 406 bcopy(etherbroadcastaddr, sc->sc_broadcast_addr, IEEE80211_ADDR_LEN); 407 408 /* complete initialization */ 409 ieee80211_media_init(ifp, ath_media_change, ieee80211_media_status); 410 411 #if NBPFILTER > 0 412 bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO, 413 sizeof(struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN); 414 415 sc->sc_rxtap_len = sizeof(sc->sc_rxtapu); 416 bzero(&sc->sc_rxtapu, sc->sc_rxtap_len); 417 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); 418 sc->sc_rxtap.wr_ihdr.it_present = htole32(ATH_RX_RADIOTAP_PRESENT); 419 420 sc->sc_txtap_len = sizeof(sc->sc_txtapu); 421 bzero(&sc->sc_txtapu, sc->sc_txtap_len); 422 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); 423 sc->sc_txtap.wt_ihdr.it_present = htole32(ATH_TX_RADIOTAP_PRESENT); 424 #endif 425 426 sc->sc_flags |= ATH_ATTACHED; 427 /* 428 * Make sure the interface is shutdown during reboot. 429 */ 430 sc->sc_sdhook = shutdownhook_establish(ath_shutdown, sc); 431 if (sc->sc_sdhook == NULL) 432 printf(": WARNING: unable to establish shutdown hook\n"); 433 sc->sc_powerhook = powerhook_establish(ath_power, sc); 434 if (sc->sc_powerhook == NULL) 435 printf(": WARNING: unable to establish power hook\n"); 436 437 /* 438 * Print regulation domain and the mac address. The regulation domain 439 * will be marked with a * if the EEPROM value has been overwritten. 440 */ 441 printf(", %s%s, address %s\n", 442 ieee80211_regdomain2name(ah->ah_regdomain), 443 ah->ah_regdomain != ah->ah_regdomain_hw ? "*" : "", 444 ether_sprintf(ic->ic_myaddr)); 445 446 if (ath_gpio_attach(sc, devid) == 0) 447 sc->sc_flags |= ATH_GPIO; 448 449 return 0; 450 bad2: 451 ath_desc_free(sc); 452 bad: 453 if (ah) 454 ath_hal_detach(ah); 455 sc->sc_invalid = 1; 456 return error; 457 } 458 459 int 460 ath_detach(struct ath_softc *sc, int flags) 461 { 462 struct ifnet *ifp = &sc->sc_ic.ic_if; 463 int s; 464 465 if ((sc->sc_flags & ATH_ATTACHED) == 0) 466 return (0); 467 468 config_detach_children(&sc->sc_dev, flags); 469 470 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags %x\n", __func__, ifp->if_flags)); 471 472 timeout_del(&sc->sc_scan_to); 473 timeout_del(&sc->sc_cal_to); 474 timeout_del(&sc->sc_rssadapt_to); 475 476 s = splnet(); 477 ath_stop(ifp); 478 ath_desc_free(sc); 479 ath_hal_detach(sc->sc_ah); 480 481 ieee80211_ifdetach(ifp); 482 if_detach(ifp); 483 484 splx(s); 485 if (sc->sc_powerhook != NULL) 486 powerhook_disestablish(sc->sc_powerhook); 487 if (sc->sc_sdhook != NULL) 488 shutdownhook_disestablish(sc->sc_sdhook); 489 #ifdef __FreeBSD__ 490 ATH_TXBUF_LOCK_DESTROY(sc); 491 ATH_TXQ_LOCK_DESTROY(sc); 492 #endif 493 494 return 0; 495 } 496 497 void 498 ath_power(int why, void *arg) 499 { 500 struct ath_softc *sc = arg; 501 int s; 502 503 DPRINTF(ATH_DEBUG_ANY, ("ath_power(%d)\n", why)); 504 505 s = splnet(); 506 switch (why) { 507 case PWR_SUSPEND: 508 case PWR_STANDBY: 509 ath_suspend(sc, why); 510 break; 511 case PWR_RESUME: 512 ath_resume(sc, why); 513 break; 514 #if !defined(__OpenBSD__) 515 case PWR_SOFTSUSPEND: 516 case PWR_SOFTSTANDBY: 517 case PWR_SOFTRESUME: 518 break; 519 #endif 520 } 521 splx(s); 522 } 523 524 void 525 ath_suspend(struct ath_softc *sc, int why) 526 { 527 struct ifnet *ifp = &sc->sc_ic.ic_if; 528 529 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags %x\n", __func__, ifp->if_flags)); 530 531 ath_stop(ifp); 532 if (sc->sc_power != NULL) 533 (*sc->sc_power)(sc, why); 534 } 535 536 void 537 ath_resume(struct ath_softc *sc, int why) 538 { 539 struct ifnet *ifp = &sc->sc_ic.ic_if; 540 541 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags %x\n", __func__, ifp->if_flags)); 542 543 if (ifp->if_flags & IFF_UP) { 544 ath_init(ifp); 545 #if 0 546 (void)ath_intr(sc); 547 #endif 548 if (sc->sc_power != NULL) 549 (*sc->sc_power)(sc, why); 550 if (ifp->if_flags & IFF_RUNNING) 551 ath_start(ifp); 552 } 553 } 554 555 void 556 ath_shutdown(void *arg) 557 { 558 struct ath_softc *sc = arg; 559 560 ath_stop(&sc->sc_ic.ic_if); 561 } 562 563 int 564 ath_intr(void *arg) 565 { 566 return ath_intr1((struct ath_softc *)arg); 567 } 568 569 int 570 ath_intr1(struct ath_softc *sc) 571 { 572 struct ieee80211com *ic = &sc->sc_ic; 573 struct ifnet *ifp = &ic->ic_if; 574 struct ath_hal *ah = sc->sc_ah; 575 HAL_INT status; 576 577 if (sc->sc_invalid) { 578 /* 579 * The hardware is not ready/present, don't touch anything. 580 * Note this can happen early on if the IRQ is shared. 581 */ 582 DPRINTF(ATH_DEBUG_ANY, ("%s: invalid; ignored\n", __func__)); 583 return 0; 584 } 585 if (!ath_hal_is_intr_pending(ah)) /* shared irq, not for us */ 586 return 0; 587 if ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) != (IFF_RUNNING|IFF_UP)) { 588 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags 0x%x\n", 589 __func__, ifp->if_flags)); 590 ath_hal_get_isr(ah, &status); /* clear ISR */ 591 ath_hal_set_intr(ah, 0); /* disable further intr's */ 592 return 1; /* XXX */ 593 } 594 ath_hal_get_isr(ah, &status); /* NB: clears ISR too */ 595 DPRINTF(ATH_DEBUG_INTR, ("%s: status 0x%x\n", __func__, status)); 596 status &= sc->sc_imask; /* discard unasked for bits */ 597 if (status & HAL_INT_FATAL) { 598 sc->sc_stats.ast_hardware++; 599 ath_hal_set_intr(ah, 0); /* disable intr's until reset */ 600 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_fataltask); 601 } else if (status & HAL_INT_RXORN) { 602 sc->sc_stats.ast_rxorn++; 603 ath_hal_set_intr(ah, 0); /* disable intr's until reset */ 604 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_rxorntask); 605 } else if (status & HAL_INT_MIB) { 606 DPRINTF(ATH_DEBUG_INTR, 607 ("%s: resetting MIB counters\n", __func__)); 608 sc->sc_stats.ast_mib++; 609 ath_hal_update_mib_counters(ah, &sc->sc_mib_stats); 610 } else { 611 if (status & HAL_INT_RXEOL) { 612 /* 613 * NB: the hardware should re-read the link when 614 * RXE bit is written, but it doesn't work at 615 * least on older hardware revs. 616 */ 617 sc->sc_stats.ast_rxeol++; 618 sc->sc_rxlink = NULL; 619 } 620 if (status & HAL_INT_TXURN) { 621 sc->sc_stats.ast_txurn++; 622 /* bump tx trigger level */ 623 ath_hal_update_tx_triglevel(ah, AH_TRUE); 624 } 625 if (status & HAL_INT_RX) 626 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_rxtask); 627 if (status & HAL_INT_TX) 628 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_txtask); 629 if (status & HAL_INT_SWBA) 630 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_swbatask); 631 if (status & HAL_INT_BMISS) { 632 sc->sc_stats.ast_bmiss++; 633 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_bmisstask); 634 } 635 } 636 return 1; 637 } 638 639 void 640 ath_fatal_proc(void *arg, int pending) 641 { 642 struct ath_softc *sc = arg; 643 struct ieee80211com *ic = &sc->sc_ic; 644 struct ifnet *ifp = &ic->ic_if; 645 646 if (ifp->if_flags & IFF_DEBUG) 647 printf("%s: hardware error; resetting\n", ifp->if_xname); 648 ath_reset(sc, 1); 649 } 650 651 void 652 ath_rxorn_proc(void *arg, int pending) 653 { 654 struct ath_softc *sc = arg; 655 struct ieee80211com *ic = &sc->sc_ic; 656 struct ifnet *ifp = &ic->ic_if; 657 658 if (ifp->if_flags & IFF_DEBUG) 659 printf("%s: rx FIFO overrun; resetting\n", ifp->if_xname); 660 ath_reset(sc, 1); 661 } 662 663 void 664 ath_bmiss_proc(void *arg, int pending) 665 { 666 struct ath_softc *sc = arg; 667 struct ieee80211com *ic = &sc->sc_ic; 668 669 DPRINTF(ATH_DEBUG_ANY, ("%s: pending %u\n", __func__, pending)); 670 if (ic->ic_opmode != IEEE80211_M_STA) 671 return; 672 if (ic->ic_state == IEEE80211_S_RUN) { 673 /* 674 * Rather than go directly to scan state, try to 675 * reassociate first. If that fails then the state 676 * machine will drop us into scanning after timing 677 * out waiting for a probe response. 678 */ 679 ieee80211_new_state(ic, IEEE80211_S_ASSOC, -1); 680 } 681 } 682 683 u_int 684 ath_chan2flags(struct ieee80211com *ic, struct ieee80211_channel *chan) 685 { 686 enum ieee80211_phymode mode = ieee80211_chan2mode(ic, chan); 687 688 switch (mode) { 689 case IEEE80211_MODE_AUTO: 690 return 0; 691 case IEEE80211_MODE_11A: 692 return CHANNEL_A; 693 case IEEE80211_MODE_11B: 694 return CHANNEL_B; 695 case IEEE80211_MODE_11G: 696 return CHANNEL_G; 697 case IEEE80211_MODE_TURBO: 698 return CHANNEL_T; 699 default: 700 panic("%s: unsupported mode %d", __func__, mode); 701 return 0; 702 } 703 } 704 705 int 706 ath_init(struct ifnet *ifp) 707 { 708 return ath_init1((struct ath_softc *)ifp->if_softc); 709 } 710 711 int 712 ath_init1(struct ath_softc *sc) 713 { 714 struct ieee80211com *ic = &sc->sc_ic; 715 struct ifnet *ifp = &ic->ic_if; 716 struct ieee80211_node *ni; 717 enum ieee80211_phymode mode; 718 struct ath_hal *ah = sc->sc_ah; 719 HAL_STATUS status; 720 HAL_CHANNEL hchan; 721 int error = 0, s; 722 723 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags 0x%x\n", 724 __func__, ifp->if_flags)); 725 726 if ((error = ath_enable(sc)) != 0) 727 return error; 728 729 s = splnet(); 730 /* 731 * Stop anything previously setup. This is safe 732 * whether this is the first time through or not. 733 */ 734 ath_stop(ifp); 735 736 /* 737 * Reset the link layer address to the latest value. 738 */ 739 IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl)); 740 ath_hal_set_lladdr(ah, ic->ic_myaddr); 741 742 /* 743 * The basic interface to setting the hardware in a good 744 * state is ``reset''. On return the hardware is known to 745 * be powered up and with interrupts disabled. This must 746 * be followed by initialization of the appropriate bits 747 * and then setup of the interrupt mask. 748 */ 749 hchan.channel = ic->ic_ibss_chan->ic_freq; 750 hchan.channelFlags = ath_chan2flags(ic, ic->ic_ibss_chan); 751 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, AH_TRUE, &status)) { 752 printf("%s: unable to reset hardware; hal status %u\n", 753 ifp->if_xname, status); 754 error = EIO; 755 goto done; 756 } 757 ath_set_slot_time(sc); 758 759 if ((error = ath_initkeytable(sc)) != 0) { 760 printf("%s: unable to reset the key cache\n", 761 ifp->if_xname); 762 goto done; 763 } 764 765 if ((error = ath_startrecv(sc)) != 0) { 766 printf("%s: unable to start recv logic\n", ifp->if_xname); 767 goto done; 768 } 769 770 /* 771 * Enable interrupts. 772 */ 773 sc->sc_imask = HAL_INT_RX | HAL_INT_TX 774 | HAL_INT_RXEOL | HAL_INT_RXORN 775 | HAL_INT_FATAL | HAL_INT_GLOBAL; 776 #ifndef IEEE80211_STA_ONLY 777 if (ic->ic_opmode == IEEE80211_M_HOSTAP) 778 sc->sc_imask |= HAL_INT_MIB; 779 #endif 780 ath_hal_set_intr(ah, sc->sc_imask); 781 782 ifp->if_flags |= IFF_RUNNING; 783 ic->ic_state = IEEE80211_S_INIT; 784 785 /* 786 * The hardware should be ready to go now so it's safe 787 * to kick the 802.11 state machine as it's likely to 788 * immediately call back to us to send mgmt frames. 789 */ 790 ni = ic->ic_bss; 791 ni->ni_chan = ic->ic_ibss_chan; 792 mode = ieee80211_chan2mode(ic, ni->ni_chan); 793 if (mode != sc->sc_curmode) 794 ath_setcurmode(sc, mode); 795 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 796 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1); 797 } else { 798 ieee80211_new_state(ic, IEEE80211_S_RUN, -1); 799 } 800 done: 801 splx(s); 802 return error; 803 } 804 805 void 806 ath_stop(struct ifnet *ifp) 807 { 808 struct ieee80211com *ic = (struct ieee80211com *) ifp; 809 struct ath_softc *sc = ifp->if_softc; 810 struct ath_hal *ah = sc->sc_ah; 811 int s; 812 813 DPRINTF(ATH_DEBUG_ANY, ("%s: invalid %u if_flags 0x%x\n", 814 __func__, sc->sc_invalid, ifp->if_flags)); 815 816 s = splnet(); 817 if (ifp->if_flags & IFF_RUNNING) { 818 /* 819 * Shutdown the hardware and driver: 820 * disable interrupts 821 * turn off timers 822 * clear transmit machinery 823 * clear receive machinery 824 * drain and release tx queues 825 * reclaim beacon resources 826 * reset 802.11 state machine 827 * power down hardware 828 * 829 * Note that some of this work is not possible if the 830 * hardware is gone (invalid). 831 */ 832 ifp->if_flags &= ~IFF_RUNNING; 833 ifp->if_timer = 0; 834 if (!sc->sc_invalid) 835 ath_hal_set_intr(ah, 0); 836 ath_draintxq(sc); 837 if (!sc->sc_invalid) { 838 ath_stoprecv(sc); 839 } else { 840 sc->sc_rxlink = NULL; 841 } 842 IFQ_PURGE(&ifp->if_snd); 843 #ifndef IEEE80211_STA_ONLY 844 ath_beacon_free(sc); 845 #endif 846 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 847 if (!sc->sc_invalid) { 848 ath_hal_set_power(ah, HAL_PM_FULL_SLEEP, 0); 849 } 850 ath_disable(sc); 851 } 852 splx(s); 853 } 854 855 /* 856 * Reset the hardware w/o losing operational state. This is 857 * basically a more efficient way of doing ath_stop, ath_init, 858 * followed by state transitions to the current 802.11 859 * operational state. Used to recover from errors rx overrun 860 * and to reset the hardware when rf gain settings must be reset. 861 */ 862 void 863 ath_reset(struct ath_softc *sc, int full) 864 { 865 struct ieee80211com *ic = &sc->sc_ic; 866 struct ifnet *ifp = &ic->ic_if; 867 struct ath_hal *ah = sc->sc_ah; 868 struct ieee80211_channel *c; 869 HAL_STATUS status; 870 HAL_CHANNEL hchan; 871 872 /* 873 * Convert to a HAL channel description with the flags 874 * constrained to reflect the current operating mode. 875 */ 876 c = ic->ic_ibss_chan; 877 hchan.channel = c->ic_freq; 878 hchan.channelFlags = ath_chan2flags(ic, c); 879 880 ath_hal_set_intr(ah, 0); /* disable interrupts */ 881 ath_draintxq(sc); /* stop xmit side */ 882 ath_stoprecv(sc); /* stop recv side */ 883 /* NB: indicate channel change so we do a full reset */ 884 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, 885 full ? AH_TRUE : AH_FALSE, &status)) { 886 printf("%s: %s: unable to reset hardware; hal status %u\n", 887 ifp->if_xname, __func__, status); 888 } 889 ath_set_slot_time(sc); 890 /* In case channel changed, save as a node channel */ 891 ic->ic_bss->ni_chan = ic->ic_ibss_chan; 892 ath_hal_set_intr(ah, sc->sc_imask); 893 if (ath_startrecv(sc) != 0) /* restart recv */ 894 printf("%s: %s: unable to start recv logic\n", ifp->if_xname, 895 __func__); 896 ath_start(ifp); /* restart xmit */ 897 if (ic->ic_state == IEEE80211_S_RUN) 898 ath_beacon_config(sc); /* restart beacons */ 899 } 900 901 void 902 ath_start(struct ifnet *ifp) 903 { 904 struct ath_softc *sc = ifp->if_softc; 905 struct ath_hal *ah = sc->sc_ah; 906 struct ieee80211com *ic = &sc->sc_ic; 907 struct ieee80211_node *ni; 908 struct ath_buf *bf; 909 struct mbuf *m; 910 struct ieee80211_frame *wh; 911 int s; 912 913 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING || 914 sc->sc_invalid) 915 return; 916 for (;;) { 917 /* 918 * Grab a TX buffer and associated resources. 919 */ 920 s = splnet(); 921 bf = TAILQ_FIRST(&sc->sc_txbuf); 922 if (bf != NULL) 923 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list); 924 splx(s); 925 if (bf == NULL) { 926 DPRINTF(ATH_DEBUG_ANY, ("%s: out of xmit buffers\n", 927 __func__)); 928 sc->sc_stats.ast_tx_qstop++; 929 ifp->if_flags |= IFF_OACTIVE; 930 break; 931 } 932 /* 933 * Poll the management queue for frames; they 934 * have priority over normal data frames. 935 */ 936 IF_DEQUEUE(&ic->ic_mgtq, m); 937 if (m == NULL) { 938 /* 939 * No data frames go out unless we're associated. 940 */ 941 if (ic->ic_state != IEEE80211_S_RUN) { 942 DPRINTF(ATH_DEBUG_ANY, 943 ("%s: ignore data packet, state %u\n", 944 __func__, ic->ic_state)); 945 sc->sc_stats.ast_tx_discard++; 946 s = splnet(); 947 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 948 splx(s); 949 break; 950 } 951 IFQ_DEQUEUE(&ifp->if_snd, m); 952 if (m == NULL) { 953 s = splnet(); 954 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 955 splx(s); 956 break; 957 } 958 ifp->if_opackets++; 959 960 #if NBPFILTER > 0 961 if (ifp->if_bpf) 962 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 963 #endif 964 965 /* 966 * Encapsulate the packet in prep for transmission. 967 */ 968 m = ieee80211_encap(ifp, m, &ni); 969 if (m == NULL) { 970 DPRINTF(ATH_DEBUG_ANY, 971 ("%s: encapsulation failure\n", 972 __func__)); 973 sc->sc_stats.ast_tx_encap++; 974 goto bad; 975 } 976 wh = mtod(m, struct ieee80211_frame *); 977 } else { 978 /* 979 * Hack! The referenced node pointer is in the 980 * rcvif field of the packet header. This is 981 * placed there by ieee80211_mgmt_output because 982 * we need to hold the reference with the frame 983 * and there's no other way (other than packet 984 * tags which we consider too expensive to use) 985 * to pass it along. 986 */ 987 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 988 m->m_pkthdr.rcvif = NULL; 989 990 wh = mtod(m, struct ieee80211_frame *); 991 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == 992 IEEE80211_FC0_SUBTYPE_PROBE_RESP) { 993 /* fill time stamp */ 994 u_int64_t tsf; 995 u_int32_t *tstamp; 996 997 tsf = ath_hal_get_tsf64(ah); 998 /* XXX: adjust 100us delay to xmit */ 999 tsf += 100; 1000 tstamp = (u_int32_t *)&wh[1]; 1001 tstamp[0] = htole32(tsf & 0xffffffff); 1002 tstamp[1] = htole32(tsf >> 32); 1003 } 1004 sc->sc_stats.ast_tx_mgmt++; 1005 } 1006 1007 if (ath_tx_start(sc, ni, bf, m)) { 1008 bad: 1009 s = splnet(); 1010 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 1011 splx(s); 1012 ifp->if_oerrors++; 1013 if (ni != NULL) 1014 ieee80211_release_node(ic, ni); 1015 continue; 1016 } 1017 1018 sc->sc_tx_timer = 5; 1019 ifp->if_timer = 1; 1020 } 1021 } 1022 1023 int 1024 ath_media_change(struct ifnet *ifp) 1025 { 1026 int error; 1027 1028 error = ieee80211_media_change(ifp); 1029 if (error == ENETRESET) { 1030 if ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) == 1031 (IFF_RUNNING|IFF_UP)) 1032 ath_init(ifp); /* XXX lose error */ 1033 error = 0; 1034 } 1035 return error; 1036 } 1037 1038 void 1039 ath_watchdog(struct ifnet *ifp) 1040 { 1041 struct ath_softc *sc = ifp->if_softc; 1042 1043 ifp->if_timer = 0; 1044 if ((ifp->if_flags & IFF_RUNNING) == 0 || sc->sc_invalid) 1045 return; 1046 if (sc->sc_tx_timer) { 1047 if (--sc->sc_tx_timer == 0) { 1048 printf("%s: device timeout\n", ifp->if_xname); 1049 ath_reset(sc, 1); 1050 ifp->if_oerrors++; 1051 sc->sc_stats.ast_watchdog++; 1052 return; 1053 } 1054 ifp->if_timer = 1; 1055 } 1056 1057 ieee80211_watchdog(ifp); 1058 } 1059 1060 int 1061 ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1062 { 1063 struct ath_softc *sc = ifp->if_softc; 1064 struct ieee80211com *ic = &sc->sc_ic; 1065 struct ifreq *ifr = (struct ifreq *)data; 1066 struct ifaddr *ifa = (struct ifaddr *)data; 1067 int error = 0, s; 1068 1069 s = splnet(); 1070 switch (cmd) { 1071 case SIOCSIFADDR: 1072 ifp->if_flags |= IFF_UP; 1073 #ifdef INET 1074 if (ifa->ifa_addr->sa_family == AF_INET) { 1075 arp_ifinit(&ic->ic_ac, ifa); 1076 } 1077 #endif /* INET */ 1078 /* FALLTHROUGH */ 1079 case SIOCSIFFLAGS: 1080 if (ifp->if_flags & IFF_UP) { 1081 if (ifp->if_flags & IFF_RUNNING) { 1082 /* 1083 * To avoid rescanning another access point, 1084 * do not call ath_init() here. Instead, 1085 * only reflect promisc mode settings. 1086 */ 1087 ath_mode_init(sc); 1088 } else { 1089 /* 1090 * Beware of being called during detach to 1091 * reset promiscuous mode. In that case we 1092 * will still be marked UP but not RUNNING. 1093 * However trying to re-init the interface 1094 * is the wrong thing to do as we've already 1095 * torn down much of our state. There's 1096 * probably a better way to deal with this. 1097 */ 1098 if (!sc->sc_invalid) 1099 ath_init(ifp); /* XXX lose error */ 1100 } 1101 } else 1102 ath_stop(ifp); 1103 break; 1104 case SIOCADDMULTI: 1105 case SIOCDELMULTI: 1106 #ifdef __FreeBSD__ 1107 /* 1108 * The upper layer has already installed/removed 1109 * the multicast address(es), just recalculate the 1110 * multicast filter for the card. 1111 */ 1112 if (ifp->if_flags & IFF_RUNNING) 1113 ath_mode_init(sc); 1114 #endif 1115 error = (cmd == SIOCADDMULTI) ? 1116 ether_addmulti(ifr, &sc->sc_ic.ic_ac) : 1117 ether_delmulti(ifr, &sc->sc_ic.ic_ac); 1118 if (error == ENETRESET) { 1119 if (ifp->if_flags & IFF_RUNNING) 1120 ath_mode_init(sc); 1121 error = 0; 1122 } 1123 break; 1124 case SIOCGATHSTATS: 1125 error = copyout(&sc->sc_stats, 1126 ifr->ifr_data, sizeof (sc->sc_stats)); 1127 break; 1128 default: 1129 error = ieee80211_ioctl(ifp, cmd, data); 1130 if (error == ENETRESET) { 1131 if ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) == 1132 (IFF_RUNNING|IFF_UP)) { 1133 if (ic->ic_opmode != IEEE80211_M_MONITOR) 1134 ath_init(ifp); /* XXX lose error */ 1135 else 1136 ath_reset(sc, 1); 1137 } 1138 error = 0; 1139 } 1140 break; 1141 } 1142 splx(s); 1143 return error; 1144 } 1145 1146 /* 1147 * Fill the hardware key cache with key entries. 1148 */ 1149 int 1150 ath_initkeytable(struct ath_softc *sc) 1151 { 1152 struct ieee80211com *ic = &sc->sc_ic; 1153 struct ath_hal *ah = sc->sc_ah; 1154 int i; 1155 1156 if (ath_softcrypto) { 1157 /* 1158 * Disable the hardware crypto engine and reset the key cache 1159 * to allow software crypto operation for WEP/RSN/WPA2 1160 */ 1161 if (ic->ic_flags & (IEEE80211_F_WEPON|IEEE80211_F_RSNON)) 1162 (void)ath_hal_softcrypto(ah, AH_TRUE); 1163 else 1164 (void)ath_hal_softcrypto(ah, AH_FALSE); 1165 return (0); 1166 } 1167 1168 /* WEP is disabled, we only support WEP in hardware yet */ 1169 if ((ic->ic_flags & IEEE80211_F_WEPON) == 0) 1170 return (0); 1171 1172 /* 1173 * Setup the hardware after reset: the key cache is filled as 1174 * needed and the receive engine is set going. Frame transmit 1175 * is handled entirely in the frame output path; there's nothing 1176 * to do here except setup the interrupt mask. 1177 */ 1178 1179 /* XXX maybe should reset all keys when !WEPON */ 1180 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 1181 struct ieee80211_key *k = &ic->ic_nw_keys[i]; 1182 if (k->k_len == 0) 1183 ath_hal_reset_key(ah, i); 1184 else { 1185 HAL_KEYVAL hk; 1186 1187 bzero(&hk, sizeof(hk)); 1188 /* 1189 * Pad the key to a supported key length. It 1190 * is always a good idea to use full-length 1191 * keys without padded zeros but this seems 1192 * to be the default behaviour used by many 1193 * implementations. 1194 */ 1195 if (k->k_cipher == IEEE80211_CIPHER_WEP40) 1196 hk.wk_len = AR5K_KEYVAL_LENGTH_40; 1197 else if (k->k_cipher == IEEE80211_CIPHER_WEP104) 1198 hk.wk_len = AR5K_KEYVAL_LENGTH_104; 1199 else 1200 return (EINVAL); 1201 bcopy(k->k_key, hk.wk_key, hk.wk_len); 1202 1203 if (ath_hal_set_key(ah, i, &hk) != AH_TRUE) 1204 return (EINVAL); 1205 } 1206 } 1207 1208 return (0); 1209 } 1210 1211 void 1212 ath_mcastfilter_accum(caddr_t dl, u_int32_t (*mfilt)[2]) 1213 { 1214 u_int32_t val; 1215 u_int8_t pos; 1216 1217 val = LE_READ_4(dl + 0); 1218 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 1219 val = LE_READ_4(dl + 3); 1220 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 1221 pos &= 0x3f; 1222 (*mfilt)[pos / 32] |= (1 << (pos % 32)); 1223 } 1224 1225 void 1226 ath_mcastfilter_compute(struct ath_softc *sc, u_int32_t (*mfilt)[2]) 1227 { 1228 struct ifnet *ifp = &sc->sc_ic.ic_if; 1229 struct ether_multi *enm; 1230 struct ether_multistep estep; 1231 1232 ETHER_FIRST_MULTI(estep, &sc->sc_ic.ic_ac, enm); 1233 while (enm != NULL) { 1234 /* XXX Punt on ranges. */ 1235 if (!IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) { 1236 (*mfilt)[0] = (*mfilt)[1] = ~((u_int32_t)0); 1237 ifp->if_flags |= IFF_ALLMULTI; 1238 return; 1239 } 1240 ath_mcastfilter_accum(enm->enm_addrlo, mfilt); 1241 ETHER_NEXT_MULTI(estep, enm); 1242 } 1243 ifp->if_flags &= ~IFF_ALLMULTI; 1244 } 1245 1246 /* 1247 * Calculate the receive filter according to the 1248 * operating mode and state: 1249 * 1250 * o always accept unicast, broadcast, and multicast traffic 1251 * o maintain current state of phy error reception 1252 * o probe request frames are accepted only when operating in 1253 * hostap, adhoc, or monitor modes 1254 * o enable promiscuous mode according to the interface state 1255 * o accept beacons: 1256 * - when operating in adhoc mode so the 802.11 layer creates 1257 * node table entries for peers, 1258 * - when operating in station mode for collecting rssi data when 1259 * the station is otherwise quiet, or 1260 * - when scanning 1261 */ 1262 u_int32_t 1263 ath_calcrxfilter(struct ath_softc *sc) 1264 { 1265 struct ieee80211com *ic = &sc->sc_ic; 1266 struct ath_hal *ah = sc->sc_ah; 1267 struct ifnet *ifp = &ic->ic_if; 1268 u_int32_t rfilt; 1269 1270 rfilt = (ath_hal_get_rx_filter(ah) & HAL_RX_FILTER_PHYERR) 1271 | HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST; 1272 if (ic->ic_opmode != IEEE80211_M_STA) 1273 rfilt |= HAL_RX_FILTER_PROBEREQ; 1274 #ifndef IEEE80211_STA_ONLY 1275 if (ic->ic_opmode != IEEE80211_M_AHDEMO) 1276 #endif 1277 rfilt |= HAL_RX_FILTER_BEACON; 1278 if (ifp->if_flags & IFF_PROMISC) 1279 rfilt |= HAL_RX_FILTER_PROM; 1280 return rfilt; 1281 } 1282 1283 void 1284 ath_mode_init(struct ath_softc *sc) 1285 { 1286 struct ath_hal *ah = sc->sc_ah; 1287 u_int32_t rfilt, mfilt[2]; 1288 1289 /* configure rx filter */ 1290 rfilt = ath_calcrxfilter(sc); 1291 ath_hal_set_rx_filter(ah, rfilt); 1292 1293 /* configure operational mode */ 1294 ath_hal_set_opmode(ah); 1295 1296 /* calculate and install multicast filter */ 1297 mfilt[0] = mfilt[1] = 0; 1298 ath_mcastfilter_compute(sc, &mfilt); 1299 ath_hal_set_mcast_filter(ah, mfilt[0], mfilt[1]); 1300 DPRINTF(ATH_DEBUG_MODE, ("%s: RX filter 0x%x, MC filter %08x:%08x\n", 1301 __func__, rfilt, mfilt[0], mfilt[1])); 1302 } 1303 1304 struct mbuf * 1305 ath_getmbuf(int flags, int type, u_int pktlen) 1306 { 1307 struct mbuf *m; 1308 1309 KASSERT(pktlen <= MCLBYTES, ("802.11 packet too large: %u", pktlen)); 1310 #ifdef __FreeBSD__ 1311 if (pktlen <= MHLEN) { 1312 MGETHDR(m, flags, type); 1313 } else { 1314 m = m_getcl(flags, type, M_PKTHDR); 1315 } 1316 #else 1317 MGETHDR(m, flags, type); 1318 if (m != NULL && pktlen > MHLEN) { 1319 MCLGET(m, flags); 1320 if ((m->m_flags & M_EXT) == 0) { 1321 m_free(m); 1322 m = NULL; 1323 } 1324 } 1325 #endif 1326 return m; 1327 } 1328 1329 #ifndef IEEE80211_STA_ONLY 1330 int 1331 ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni) 1332 { 1333 struct ieee80211com *ic = &sc->sc_ic; 1334 struct ath_hal *ah = sc->sc_ah; 1335 struct ath_buf *bf; 1336 struct ath_desc *ds; 1337 struct mbuf *m; 1338 int error; 1339 u_int8_t rate; 1340 const HAL_RATE_TABLE *rt; 1341 u_int flags = 0; 1342 1343 bf = sc->sc_bcbuf; 1344 if (bf->bf_m != NULL) { 1345 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1346 m_freem(bf->bf_m); 1347 bf->bf_m = NULL; 1348 bf->bf_node = NULL; 1349 } 1350 /* 1351 * NB: the beacon data buffer must be 32-bit aligned; 1352 * we assume the mbuf routines will return us something 1353 * with this alignment (perhaps should assert). 1354 */ 1355 m = ieee80211_beacon_alloc(ic, ni); 1356 if (m == NULL) { 1357 DPRINTF(ATH_DEBUG_BEACON, ("%s: cannot get mbuf/cluster\n", 1358 __func__)); 1359 sc->sc_stats.ast_be_nombuf++; 1360 return ENOMEM; 1361 } 1362 1363 DPRINTF(ATH_DEBUG_BEACON, ("%s: m %p len %u\n", __func__, m, m->m_len)); 1364 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m, 1365 BUS_DMA_NOWAIT); 1366 if (error != 0) { 1367 m_freem(m); 1368 return error; 1369 } 1370 KASSERT(bf->bf_nseg == 1, 1371 ("%s: multi-segment packet; nseg %u", __func__, bf->bf_nseg)); 1372 bf->bf_m = m; 1373 1374 /* setup descriptors */ 1375 ds = bf->bf_desc; 1376 bzero(ds, sizeof(struct ath_desc)); 1377 1378 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_veol) { 1379 ds->ds_link = bf->bf_daddr; /* link to self */ 1380 flags |= HAL_TXDESC_VEOL; 1381 } else { 1382 ds->ds_link = 0; 1383 } 1384 ds->ds_data = bf->bf_segs[0].ds_addr; 1385 1386 DPRINTF(ATH_DEBUG_ANY, ("%s: segaddr %p seglen %u\n", __func__, 1387 (caddr_t)bf->bf_segs[0].ds_addr, (u_int)bf->bf_segs[0].ds_len)); 1388 1389 /* 1390 * Calculate rate code. 1391 * XXX everything at min xmit rate 1392 */ 1393 rt = sc->sc_currates; 1394 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 1395 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) { 1396 rate = rt->info[0].rateCode | rt->info[0].shortPreamble; 1397 } else { 1398 rate = rt->info[0].rateCode; 1399 } 1400 1401 flags = HAL_TXDESC_NOACK; 1402 if (ic->ic_opmode == IEEE80211_M_IBSS) 1403 flags |= HAL_TXDESC_VEOL; 1404 1405 if (!ath_hal_setup_tx_desc(ah, ds 1406 , m->m_pkthdr.len + IEEE80211_CRC_LEN /* packet length */ 1407 , sizeof(struct ieee80211_frame) /* header length */ 1408 , HAL_PKT_TYPE_BEACON /* Atheros packet type */ 1409 , 60 /* txpower XXX */ 1410 , rate, 1 /* series 0 rate/tries */ 1411 , HAL_TXKEYIX_INVALID /* no encryption */ 1412 , 0 /* antenna mode */ 1413 , flags /* no ack for beacons */ 1414 , 0 /* rts/cts rate */ 1415 , 0 /* rts/cts duration */ 1416 )) { 1417 printf("%s: ath_hal_setup_tx_desc failed\n", __func__); 1418 return -1; 1419 } 1420 /* NB: beacon's BufLen must be a multiple of 4 bytes */ 1421 /* XXX verify mbuf data area covers this roundup */ 1422 if (!ath_hal_fill_tx_desc(ah, ds 1423 , roundup(bf->bf_segs[0].ds_len, 4) /* buffer length */ 1424 , AH_TRUE /* first segment */ 1425 , AH_TRUE /* last segment */ 1426 )) { 1427 printf("%s: ath_hal_fill_tx_desc failed\n", __func__); 1428 return -1; 1429 } 1430 1431 /* XXX it is not appropriate to bus_dmamap_sync? -dcy */ 1432 1433 return 0; 1434 } 1435 1436 void 1437 ath_beacon_proc(void *arg, int pending) 1438 { 1439 struct ath_softc *sc = arg; 1440 struct ieee80211com *ic = &sc->sc_ic; 1441 struct ath_buf *bf = sc->sc_bcbuf; 1442 struct ath_hal *ah = sc->sc_ah; 1443 1444 DPRINTF(ATH_DEBUG_BEACON_PROC, ("%s: pending %u\n", __func__, pending)); 1445 if (ic->ic_opmode == IEEE80211_M_STA || 1446 bf == NULL || bf->bf_m == NULL) { 1447 DPRINTF(ATH_DEBUG_ANY, ("%s: ic_flags=%x bf=%p bf_m=%p\n", 1448 __func__, ic->ic_flags, bf, bf ? bf->bf_m : NULL)); 1449 return; 1450 } 1451 /* TODO: update beacon to reflect PS poll state */ 1452 if (!ath_hal_stop_tx_dma(ah, sc->sc_bhalq)) { 1453 DPRINTF(ATH_DEBUG_ANY, ("%s: beacon queue %u did not stop?\n", 1454 __func__, sc->sc_bhalq)); 1455 } 1456 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 1457 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1458 1459 ath_hal_put_tx_buf(ah, sc->sc_bhalq, bf->bf_daddr); 1460 ath_hal_tx_start(ah, sc->sc_bhalq); 1461 DPRINTF(ATH_DEBUG_BEACON_PROC, 1462 ("%s: TXDP%u = %p (%p)\n", __func__, 1463 sc->sc_bhalq, (caddr_t)bf->bf_daddr, bf->bf_desc)); 1464 } 1465 1466 void 1467 ath_beacon_free(struct ath_softc *sc) 1468 { 1469 struct ath_buf *bf = sc->sc_bcbuf; 1470 1471 if (bf->bf_m != NULL) { 1472 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1473 m_freem(bf->bf_m); 1474 bf->bf_m = NULL; 1475 bf->bf_node = NULL; 1476 } 1477 } 1478 #endif /* IEEE80211_STA_ONLY */ 1479 1480 /* 1481 * Configure the beacon and sleep timers. 1482 * 1483 * When operating as an AP this resets the TSF and sets 1484 * up the hardware to notify us when we need to issue beacons. 1485 * 1486 * When operating in station mode this sets up the beacon 1487 * timers according to the timestamp of the last received 1488 * beacon and the current TSF, configures PCF and DTIM 1489 * handling, programs the sleep registers so the hardware 1490 * will wakeup in time to receive beacons, and configures 1491 * the beacon miss handling so we'll receive a BMISS 1492 * interrupt when we stop seeing beacons from the AP 1493 * we've associated with. 1494 */ 1495 void 1496 ath_beacon_config(struct ath_softc *sc) 1497 { 1498 #define MS_TO_TU(x) (((x) * 1000) / 1024) 1499 struct ath_hal *ah = sc->sc_ah; 1500 struct ieee80211com *ic = &sc->sc_ic; 1501 struct ieee80211_node *ni = ic->ic_bss; 1502 u_int32_t nexttbtt, intval; 1503 1504 nexttbtt = (LE_READ_4(ni->ni_tstamp + 4) << 22) | 1505 (LE_READ_4(ni->ni_tstamp) >> 10); 1506 intval = MAX(1, ni->ni_intval) & HAL_BEACON_PERIOD; 1507 if (nexttbtt == 0) { /* e.g. for ap mode */ 1508 nexttbtt = intval; 1509 } else if (intval) { 1510 nexttbtt = roundup(nexttbtt, intval); 1511 } 1512 DPRINTF(ATH_DEBUG_BEACON, ("%s: intval %u nexttbtt %u\n", 1513 __func__, ni->ni_intval, nexttbtt)); 1514 if (ic->ic_opmode == IEEE80211_M_STA) { 1515 HAL_BEACON_STATE bs; 1516 u_int32_t bmisstime; 1517 1518 /* NB: no PCF support right now */ 1519 bzero(&bs, sizeof(bs)); 1520 bs.bs_intval = intval; 1521 bs.bs_nexttbtt = nexttbtt; 1522 bs.bs_dtimperiod = bs.bs_intval; 1523 bs.bs_nextdtim = nexttbtt; 1524 /* 1525 * Calculate the number of consecutive beacons to miss 1526 * before taking a BMISS interrupt. The configuration 1527 * is specified in ms, so we need to convert that to 1528 * TU's and then calculate based on the beacon interval. 1529 * Note that we clamp the result to at most 10 beacons. 1530 */ 1531 bmisstime = MAX(7, ic->ic_bmisstimeout); 1532 bs.bs_bmissthreshold = howmany(bmisstime, intval); 1533 if (bs.bs_bmissthreshold > 7) { 1534 bs.bs_bmissthreshold = 7; 1535 } else if (bs.bs_bmissthreshold <= 0) { 1536 bs.bs_bmissthreshold = 1; 1537 } 1538 1539 /* 1540 * Calculate sleep duration. The configuration is 1541 * given in ms. We insure a multiple of the beacon 1542 * period is used. Also, if the sleep duration is 1543 * greater than the DTIM period then it makes senses 1544 * to make it a multiple of that. 1545 * 1546 * XXX fixed at 100ms 1547 */ 1548 bs.bs_sleepduration = 1549 roundup(MS_TO_TU(100), bs.bs_intval); 1550 if (bs.bs_sleepduration > bs.bs_dtimperiod) { 1551 bs.bs_sleepduration = 1552 roundup(bs.bs_sleepduration, bs.bs_dtimperiod); 1553 } 1554 1555 DPRINTF(ATH_DEBUG_BEACON, 1556 ("%s: intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u" 1557 " sleep %u\n" 1558 , __func__ 1559 , bs.bs_intval 1560 , bs.bs_nexttbtt 1561 , bs.bs_dtimperiod 1562 , bs.bs_nextdtim 1563 , bs.bs_bmissthreshold 1564 , bs.bs_sleepduration 1565 )); 1566 ath_hal_set_intr(ah, 0); 1567 ath_hal_set_beacon_timers(ah, &bs, 0/*XXX*/, 0, 0); 1568 sc->sc_imask |= HAL_INT_BMISS; 1569 ath_hal_set_intr(ah, sc->sc_imask); 1570 } 1571 #ifndef IEEE80211_STA_ONLY 1572 else { 1573 ath_hal_set_intr(ah, 0); 1574 if (nexttbtt == intval) 1575 intval |= HAL_BEACON_RESET_TSF; 1576 if (ic->ic_opmode == IEEE80211_M_IBSS) { 1577 /* 1578 * In IBSS mode enable the beacon timers but only 1579 * enable SWBA interrupts if we need to manually 1580 * prepare beacon frames. Otherwise we use a 1581 * self-linked tx descriptor and let the hardware 1582 * deal with things. 1583 */ 1584 intval |= HAL_BEACON_ENA; 1585 if (!sc->sc_veol) 1586 sc->sc_imask |= HAL_INT_SWBA; 1587 } else if (ic->ic_opmode == IEEE80211_M_HOSTAP) { 1588 /* 1589 * In AP mode we enable the beacon timers and 1590 * SWBA interrupts to prepare beacon frames. 1591 */ 1592 intval |= HAL_BEACON_ENA; 1593 sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */ 1594 } 1595 ath_hal_init_beacon(ah, nexttbtt, intval); 1596 ath_hal_set_intr(ah, sc->sc_imask); 1597 /* 1598 * When using a self-linked beacon descriptor in IBBS 1599 * mode load it once here. 1600 */ 1601 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_veol) 1602 ath_beacon_proc(sc, 0); 1603 } 1604 #endif 1605 } 1606 1607 int 1608 ath_desc_alloc(struct ath_softc *sc) 1609 { 1610 int i, bsize, error = -1; 1611 struct ath_desc *ds; 1612 struct ath_buf *bf; 1613 1614 /* allocate descriptors */ 1615 sc->sc_desc_len = sizeof(struct ath_desc) * 1616 (ATH_TXBUF * ATH_TXDESC + ATH_RXBUF + 1); 1617 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_desc_len, PAGE_SIZE, 1618 0, &sc->sc_dseg, 1, &sc->sc_dnseg, 0)) != 0) { 1619 printf("%s: unable to allocate control data, error = %d\n", 1620 sc->sc_dev.dv_xname, error); 1621 goto fail0; 1622 } 1623 1624 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg, 1625 sc->sc_desc_len, (caddr_t *)&sc->sc_desc, BUS_DMA_COHERENT)) != 0) { 1626 printf("%s: unable to map control data, error = %d\n", 1627 sc->sc_dev.dv_xname, error); 1628 goto fail1; 1629 } 1630 1631 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_desc_len, 1, 1632 sc->sc_desc_len, 0, 0, &sc->sc_ddmamap)) != 0) { 1633 printf("%s: unable to create control data DMA map, " 1634 "error = %d\n", sc->sc_dev.dv_xname, error); 1635 goto fail2; 1636 } 1637 1638 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_ddmamap, sc->sc_desc, 1639 sc->sc_desc_len, NULL, 0)) != 0) { 1640 printf("%s: unable to load control data DMA map, error = %d\n", 1641 sc->sc_dev.dv_xname, error); 1642 goto fail3; 1643 } 1644 1645 ds = sc->sc_desc; 1646 sc->sc_desc_paddr = sc->sc_ddmamap->dm_segs[0].ds_addr; 1647 1648 DPRINTF(ATH_DEBUG_XMIT_DESC|ATH_DEBUG_RECV_DESC, 1649 ("ath_desc_alloc: DMA map: %p (%lu) -> %p (%lu)\n", 1650 ds, (u_long)sc->sc_desc_len, 1651 (caddr_t) sc->sc_desc_paddr, /*XXX*/ (u_long) sc->sc_desc_len)); 1652 1653 /* allocate buffers */ 1654 bsize = sizeof(struct ath_buf) * (ATH_TXBUF + ATH_RXBUF + 1); 1655 bf = malloc(bsize, M_DEVBUF, M_NOWAIT | M_ZERO); 1656 if (bf == NULL) { 1657 printf("%s: unable to allocate Tx/Rx buffers\n", 1658 sc->sc_dev.dv_xname); 1659 error = ENOMEM; 1660 goto fail3; 1661 } 1662 sc->sc_bufptr = bf; 1663 1664 TAILQ_INIT(&sc->sc_rxbuf); 1665 for (i = 0; i < ATH_RXBUF; i++, bf++, ds++) { 1666 bf->bf_desc = ds; 1667 bf->bf_daddr = sc->sc_desc_paddr + 1668 ((caddr_t)ds - (caddr_t)sc->sc_desc); 1669 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1670 MCLBYTES, 0, 0, &bf->bf_dmamap)) != 0) { 1671 printf("%s: unable to create Rx dmamap, error = %d\n", 1672 sc->sc_dev.dv_xname, error); 1673 goto fail4; 1674 } 1675 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 1676 } 1677 1678 TAILQ_INIT(&sc->sc_txbuf); 1679 for (i = 0; i < ATH_TXBUF; i++, bf++, ds += ATH_TXDESC) { 1680 bf->bf_desc = ds; 1681 bf->bf_daddr = sc->sc_desc_paddr + 1682 ((caddr_t)ds - (caddr_t)sc->sc_desc); 1683 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1684 ATH_TXDESC, MCLBYTES, 0, 0, &bf->bf_dmamap)) != 0) { 1685 printf("%s: unable to create Tx dmamap, error = %d\n", 1686 sc->sc_dev.dv_xname, error); 1687 goto fail5; 1688 } 1689 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 1690 } 1691 TAILQ_INIT(&sc->sc_txq); 1692 1693 /* beacon buffer */ 1694 bf->bf_desc = ds; 1695 bf->bf_daddr = sc->sc_desc_paddr + ((caddr_t)ds - (caddr_t)sc->sc_desc); 1696 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 0, 1697 &bf->bf_dmamap)) != 0) { 1698 printf("%s: unable to create beacon dmamap, error = %d\n", 1699 sc->sc_dev.dv_xname, error); 1700 goto fail5; 1701 } 1702 sc->sc_bcbuf = bf; 1703 return 0; 1704 1705 fail5: 1706 for (i = ATH_RXBUF; i < ATH_RXBUF + ATH_TXBUF; i++) { 1707 if (sc->sc_bufptr[i].bf_dmamap == NULL) 1708 continue; 1709 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bufptr[i].bf_dmamap); 1710 } 1711 fail4: 1712 for (i = 0; i < ATH_RXBUF; i++) { 1713 if (sc->sc_bufptr[i].bf_dmamap == NULL) 1714 continue; 1715 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bufptr[i].bf_dmamap); 1716 } 1717 fail3: 1718 bus_dmamap_unload(sc->sc_dmat, sc->sc_ddmamap); 1719 fail2: 1720 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ddmamap); 1721 sc->sc_ddmamap = NULL; 1722 fail1: 1723 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_desc, sc->sc_desc_len); 1724 fail0: 1725 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg); 1726 return error; 1727 } 1728 1729 void 1730 ath_desc_free(struct ath_softc *sc) 1731 { 1732 struct ath_buf *bf; 1733 1734 bus_dmamap_unload(sc->sc_dmat, sc->sc_ddmamap); 1735 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ddmamap); 1736 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg); 1737 1738 TAILQ_FOREACH(bf, &sc->sc_txq, bf_list) { 1739 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1740 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 1741 m_freem(bf->bf_m); 1742 } 1743 TAILQ_FOREACH(bf, &sc->sc_txbuf, bf_list) 1744 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 1745 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 1746 if (bf->bf_m) { 1747 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1748 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 1749 m_freem(bf->bf_m); 1750 bf->bf_m = NULL; 1751 } 1752 } 1753 if (sc->sc_bcbuf != NULL) { 1754 bus_dmamap_unload(sc->sc_dmat, sc->sc_bcbuf->bf_dmamap); 1755 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bcbuf->bf_dmamap); 1756 sc->sc_bcbuf = NULL; 1757 } 1758 1759 TAILQ_INIT(&sc->sc_rxbuf); 1760 TAILQ_INIT(&sc->sc_txbuf); 1761 TAILQ_INIT(&sc->sc_txq); 1762 free(sc->sc_bufptr, M_DEVBUF); 1763 sc->sc_bufptr = NULL; 1764 } 1765 1766 struct ieee80211_node * 1767 ath_node_alloc(struct ieee80211com *ic) 1768 { 1769 struct ath_node *an; 1770 1771 an = malloc(sizeof(*an), M_DEVBUF, M_NOWAIT | M_ZERO); 1772 if (an) { 1773 int i; 1774 for (i = 0; i < ATH_RHIST_SIZE; i++) 1775 an->an_rx_hist[i].arh_ticks = ATH_RHIST_NOTIME; 1776 an->an_rx_hist_next = ATH_RHIST_SIZE-1; 1777 return &an->an_node; 1778 } else 1779 return NULL; 1780 } 1781 1782 void 1783 ath_node_free(struct ieee80211com *ic, struct ieee80211_node *ni) 1784 { 1785 struct ath_softc *sc = ic->ic_if.if_softc; 1786 struct ath_buf *bf; 1787 1788 TAILQ_FOREACH(bf, &sc->sc_txq, bf_list) { 1789 if (bf->bf_node == ni) 1790 bf->bf_node = NULL; 1791 } 1792 (*sc->sc_node_free)(ic, ni); 1793 } 1794 1795 void 1796 ath_node_copy(struct ieee80211com *ic, 1797 struct ieee80211_node *dst, const struct ieee80211_node *src) 1798 { 1799 struct ath_softc *sc = ic->ic_if.if_softc; 1800 1801 bcopy(&src[1], &dst[1], 1802 sizeof(struct ath_node) - sizeof(struct ieee80211_node)); 1803 (*sc->sc_node_copy)(ic, dst, src); 1804 } 1805 1806 u_int8_t 1807 ath_node_getrssi(struct ieee80211com *ic, const struct ieee80211_node *ni) 1808 { 1809 const struct ath_node *an = ATH_NODE(ni); 1810 int i, now, nsamples, rssi; 1811 1812 /* 1813 * Calculate the average over the last second of sampled data. 1814 */ 1815 now = ATH_TICKS(); 1816 nsamples = 0; 1817 rssi = 0; 1818 i = an->an_rx_hist_next; 1819 do { 1820 const struct ath_recv_hist *rh = &an->an_rx_hist[i]; 1821 if (rh->arh_ticks == ATH_RHIST_NOTIME) 1822 goto done; 1823 if (now - rh->arh_ticks > hz) 1824 goto done; 1825 rssi += rh->arh_rssi; 1826 nsamples++; 1827 if (i == 0) { 1828 i = ATH_RHIST_SIZE-1; 1829 } else { 1830 i--; 1831 } 1832 } while (i != an->an_rx_hist_next); 1833 done: 1834 /* 1835 * Return either the average or the last known 1836 * value if there is no recent data. 1837 */ 1838 return (nsamples ? rssi / nsamples : an->an_rx_hist[i].arh_rssi); 1839 } 1840 1841 int 1842 ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 1843 { 1844 struct ath_hal *ah = sc->sc_ah; 1845 int error; 1846 struct mbuf *m; 1847 struct ath_desc *ds; 1848 1849 m = bf->bf_m; 1850 if (m == NULL) { 1851 /* 1852 * NB: by assigning a page to the rx dma buffer we 1853 * implicitly satisfy the Atheros requirement that 1854 * this buffer be cache-line-aligned and sized to be 1855 * multiple of the cache line size. Not doing this 1856 * causes weird stuff to happen (for the 5210 at least). 1857 */ 1858 m = ath_getmbuf(M_DONTWAIT, MT_DATA, MCLBYTES); 1859 if (m == NULL) { 1860 DPRINTF(ATH_DEBUG_ANY, 1861 ("%s: no mbuf/cluster\n", __func__)); 1862 sc->sc_stats.ast_rx_nombuf++; 1863 return ENOMEM; 1864 } 1865 bf->bf_m = m; 1866 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 1867 1868 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m, 1869 BUS_DMA_NOWAIT); 1870 if (error != 0) { 1871 DPRINTF(ATH_DEBUG_ANY, 1872 ("%s: ath_bus_dmamap_load_mbuf failed;" 1873 " error %d\n", __func__, error)); 1874 sc->sc_stats.ast_rx_busdma++; 1875 return error; 1876 } 1877 KASSERT(bf->bf_nseg == 1, 1878 ("ath_rxbuf_init: multi-segment packet; nseg %u", 1879 bf->bf_nseg)); 1880 } 1881 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 1882 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1883 1884 /* 1885 * Setup descriptors. For receive we always terminate 1886 * the descriptor list with a self-linked entry so we'll 1887 * not get overrun under high load (as can happen with a 1888 * 5212 when ANI processing enables PHY errors). 1889 * 1890 * To insure the last descriptor is self-linked we create 1891 * each descriptor as self-linked and add it to the end. As 1892 * each additional descriptor is added the previous self-linked 1893 * entry is ``fixed'' naturally. This should be safe even 1894 * if DMA is happening. When processing RX interrupts we 1895 * never remove/process the last, self-linked, entry on the 1896 * descriptor list. This insures the hardware always has 1897 * someplace to write a new frame. 1898 */ 1899 ds = bf->bf_desc; 1900 bzero(ds, sizeof(struct ath_desc)); 1901 #ifndef IEEE80211_STA_ONLY 1902 if (sc->sc_ic.ic_opmode != IEEE80211_M_HOSTAP) 1903 ds->ds_link = bf->bf_daddr; /* link to self */ 1904 #endif 1905 ds->ds_data = bf->bf_segs[0].ds_addr; 1906 ath_hal_setup_rx_desc(ah, ds 1907 , m->m_len /* buffer size */ 1908 , 0 1909 ); 1910 1911 if (sc->sc_rxlink != NULL) 1912 *sc->sc_rxlink = bf->bf_daddr; 1913 sc->sc_rxlink = &ds->ds_link; 1914 return 0; 1915 } 1916 1917 void 1918 ath_rx_proc(void *arg, int npending) 1919 { 1920 #define PA2DESC(_sc, _pa) \ 1921 ((struct ath_desc *)((caddr_t)(_sc)->sc_desc + \ 1922 ((_pa) - (_sc)->sc_desc_paddr))) 1923 struct ath_softc *sc = arg; 1924 struct ath_buf *bf; 1925 struct ieee80211com *ic = &sc->sc_ic; 1926 struct ifnet *ifp = &ic->ic_if; 1927 struct ath_hal *ah = sc->sc_ah; 1928 struct ath_desc *ds; 1929 struct mbuf *m; 1930 struct ieee80211_frame *wh; 1931 struct ieee80211_frame whbuf; 1932 struct ieee80211_rxinfo rxi; 1933 struct ieee80211_node *ni; 1934 struct ath_node *an; 1935 struct ath_recv_hist *rh; 1936 int len; 1937 u_int phyerr; 1938 HAL_STATUS status; 1939 1940 DPRINTF(ATH_DEBUG_RX_PROC, ("%s: pending %u\n", __func__, npending)); 1941 do { 1942 bf = TAILQ_FIRST(&sc->sc_rxbuf); 1943 if (bf == NULL) { /* NB: shouldn't happen */ 1944 printf("%s: ath_rx_proc: no buffer!\n", ifp->if_xname); 1945 break; 1946 } 1947 ds = bf->bf_desc; 1948 if (ds->ds_link == bf->bf_daddr) { 1949 /* NB: never process the self-linked entry at the end */ 1950 break; 1951 } 1952 m = bf->bf_m; 1953 if (m == NULL) { /* NB: shouldn't happen */ 1954 printf("%s: ath_rx_proc: no mbuf!\n", ifp->if_xname); 1955 continue; 1956 } 1957 /* XXX sync descriptor memory */ 1958 /* 1959 * Must provide the virtual address of the current 1960 * descriptor, the physical address, and the virtual 1961 * address of the next descriptor in the h/w chain. 1962 * This allows the HAL to look ahead to see if the 1963 * hardware is done with a descriptor by checking the 1964 * done bit in the following descriptor and the address 1965 * of the current descriptor the DMA engine is working 1966 * on. All this is necessary because of our use of 1967 * a self-linked list to avoid rx overruns. 1968 */ 1969 status = ath_hal_proc_rx_desc(ah, ds, 1970 bf->bf_daddr, PA2DESC(sc, ds->ds_link)); 1971 #ifdef AR_DEBUG 1972 if (ath_debug & ATH_DEBUG_RECV_DESC) 1973 ath_printrxbuf(bf, status == HAL_OK); 1974 #endif 1975 if (status == HAL_EINPROGRESS) 1976 break; 1977 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 1978 1979 if (ds->ds_rxstat.rs_more) { 1980 /* 1981 * Frame spans multiple descriptors; this 1982 * cannot happen yet as we don't support 1983 * jumbograms. If not in monitor mode, 1984 * discard the frame. 1985 */ 1986 1987 /* 1988 * Enable this if you want to see error 1989 * frames in Monitor mode. 1990 */ 1991 #ifdef ERROR_FRAMES 1992 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 1993 /* XXX statistic */ 1994 goto rx_next; 1995 } 1996 #endif 1997 /* fall thru for monitor mode handling... */ 1998 1999 } else if (ds->ds_rxstat.rs_status != 0) { 2000 if (ds->ds_rxstat.rs_status & HAL_RXERR_CRC) 2001 sc->sc_stats.ast_rx_crcerr++; 2002 if (ds->ds_rxstat.rs_status & HAL_RXERR_FIFO) 2003 sc->sc_stats.ast_rx_fifoerr++; 2004 if (ds->ds_rxstat.rs_status & HAL_RXERR_DECRYPT) 2005 sc->sc_stats.ast_rx_badcrypt++; 2006 if (ds->ds_rxstat.rs_status & HAL_RXERR_PHY) { 2007 sc->sc_stats.ast_rx_phyerr++; 2008 phyerr = ds->ds_rxstat.rs_phyerr & 0x1f; 2009 sc->sc_stats.ast_rx_phy[phyerr]++; 2010 } 2011 2012 /* 2013 * reject error frames, we normally don't want 2014 * to see them in monitor mode. 2015 */ 2016 if ((ds->ds_rxstat.rs_status & HAL_RXERR_DECRYPT ) || 2017 (ds->ds_rxstat.rs_status & HAL_RXERR_PHY)) 2018 goto rx_next; 2019 2020 /* 2021 * In monitor mode, allow through packets that 2022 * cannot be decrypted 2023 */ 2024 if ((ds->ds_rxstat.rs_status & ~HAL_RXERR_DECRYPT) || 2025 sc->sc_ic.ic_opmode != IEEE80211_M_MONITOR) 2026 goto rx_next; 2027 } 2028 2029 len = ds->ds_rxstat.rs_datalen; 2030 if (len < IEEE80211_MIN_LEN) { 2031 DPRINTF(ATH_DEBUG_RECV, ("%s: short packet %d\n", 2032 __func__, len)); 2033 sc->sc_stats.ast_rx_tooshort++; 2034 goto rx_next; 2035 } 2036 2037 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 2038 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2039 2040 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2041 bf->bf_m = NULL; 2042 m->m_pkthdr.rcvif = ifp; 2043 m->m_pkthdr.len = m->m_len = len; 2044 2045 #if NBPFILTER > 0 2046 if (sc->sc_drvbpf) { 2047 struct mbuf mb; 2048 2049 sc->sc_rxtap.wr_flags = IEEE80211_RADIOTAP_F_FCS; 2050 sc->sc_rxtap.wr_rate = 2051 sc->sc_hwmap[ds->ds_rxstat.rs_rate] & 2052 IEEE80211_RATE_VAL; 2053 sc->sc_rxtap.wr_antenna = ds->ds_rxstat.rs_antenna; 2054 sc->sc_rxtap.wr_rssi = ds->ds_rxstat.rs_rssi; 2055 sc->sc_rxtap.wr_max_rssi = ic->ic_max_rssi; 2056 2057 mb.m_data = (caddr_t)&sc->sc_rxtap; 2058 mb.m_len = sc->sc_rxtap_len; 2059 mb.m_next = m; 2060 mb.m_nextpkt = NULL; 2061 mb.m_type = 0; 2062 mb.m_flags = 0; 2063 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN); 2064 } 2065 #endif 2066 m_adj(m, -IEEE80211_CRC_LEN); 2067 wh = mtod(m, struct ieee80211_frame *); 2068 rxi.rxi_flags = 0; 2069 if (!ath_softcrypto && (wh->i_fc[1] & IEEE80211_FC1_WEP)) { 2070 /* 2071 * WEP is decrypted by hardware. Clear WEP bit 2072 * and trim WEP header for ieee80211_input(). 2073 */ 2074 wh->i_fc[1] &= ~IEEE80211_FC1_WEP; 2075 bcopy(wh, &whbuf, sizeof(whbuf)); 2076 m_adj(m, IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN); 2077 wh = mtod(m, struct ieee80211_frame *); 2078 bcopy(&whbuf, wh, sizeof(whbuf)); 2079 /* 2080 * Also trim WEP ICV from the tail. 2081 */ 2082 m_adj(m, -IEEE80211_WEP_CRCLEN); 2083 /* 2084 * The header has probably moved. 2085 */ 2086 wh = mtod(m, struct ieee80211_frame *); 2087 2088 rxi.rxi_flags |= IEEE80211_RXI_HWDEC; 2089 } 2090 2091 /* 2092 * Locate the node for sender, track state, and 2093 * then pass this node (referenced) up to the 802.11 2094 * layer for its use. 2095 */ 2096 ni = ieee80211_find_rxnode(ic, wh); 2097 2098 /* 2099 * Record driver-specific state. 2100 */ 2101 an = ATH_NODE(ni); 2102 if (++(an->an_rx_hist_next) == ATH_RHIST_SIZE) 2103 an->an_rx_hist_next = 0; 2104 rh = &an->an_rx_hist[an->an_rx_hist_next]; 2105 rh->arh_ticks = ATH_TICKS(); 2106 rh->arh_rssi = ds->ds_rxstat.rs_rssi; 2107 rh->arh_antenna = ds->ds_rxstat.rs_antenna; 2108 2109 /* 2110 * Send frame up for processing. 2111 */ 2112 rxi.rxi_rssi = ds->ds_rxstat.rs_rssi; 2113 rxi.rxi_tstamp = ds->ds_rxstat.rs_tstamp; 2114 ieee80211_input(ifp, m, ni, &rxi); 2115 2116 /* Handle the rate adaption */ 2117 ieee80211_rssadapt_input(ic, ni, &an->an_rssadapt, 2118 ds->ds_rxstat.rs_rssi); 2119 2120 /* 2121 * The frame may have caused the node to be marked for 2122 * reclamation (e.g. in response to a DEAUTH message) 2123 * so use release_node here instead of unref_node. 2124 */ 2125 ieee80211_release_node(ic, ni); 2126 2127 rx_next: 2128 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 2129 } while (ath_rxbuf_init(sc, bf) == 0); 2130 2131 ath_hal_set_rx_signal(ah); /* rx signal state monitoring */ 2132 ath_hal_start_rx(ah); /* in case of RXEOL */ 2133 #undef PA2DESC 2134 } 2135 2136 /* 2137 * XXX Size of an ACK control frame in bytes. 2138 */ 2139 #define IEEE80211_ACK_SIZE (2+2+IEEE80211_ADDR_LEN+4) 2140 2141 int 2142 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, 2143 struct ath_buf *bf, struct mbuf *m0) 2144 { 2145 struct ieee80211com *ic = &sc->sc_ic; 2146 struct ath_hal *ah = sc->sc_ah; 2147 struct ifnet *ifp = &sc->sc_ic.ic_if; 2148 int i, error, iswep, hdrlen, pktlen, len, s; 2149 u_int8_t rix, cix, txrate, ctsrate; 2150 struct ath_desc *ds; 2151 struct mbuf *m; 2152 struct ieee80211_frame *wh; 2153 struct ieee80211_key *k; 2154 u_int32_t iv; 2155 u_int8_t *ivp; 2156 u_int8_t hdrbuf[sizeof(struct ieee80211_frame) + 2157 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN]; 2158 u_int subtype, flags, ctsduration, antenna; 2159 HAL_PKT_TYPE atype; 2160 const HAL_RATE_TABLE *rt; 2161 HAL_BOOL shortPreamble; 2162 struct ath_node *an; 2163 u_int8_t hwqueue = HAL_TX_QUEUE_ID_DATA_MIN; 2164 2165 wh = mtod(m0, struct ieee80211_frame *); 2166 iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED; 2167 hdrlen = sizeof(struct ieee80211_frame); 2168 pktlen = m0->m_pkthdr.len; 2169 2170 if (ath_softcrypto && iswep) { 2171 k = ieee80211_get_txkey(ic, wh, ni); 2172 if ((m0 = ieee80211_encrypt(ic, m0, k)) == NULL) 2173 return ENOMEM; 2174 wh = mtod(m0, struct ieee80211_frame *); 2175 2176 /* reset len in case we got a new mbuf */ 2177 pktlen = m0->m_pkthdr.len; 2178 } else if (!ath_softcrypto && iswep) { 2179 bcopy(mtod(m0, caddr_t), hdrbuf, hdrlen); 2180 m_adj(m0, hdrlen); 2181 M_PREPEND(m0, sizeof(hdrbuf), M_DONTWAIT); 2182 if (m0 == NULL) { 2183 sc->sc_stats.ast_tx_nombuf++; 2184 return ENOMEM; 2185 } 2186 ivp = hdrbuf + hdrlen; 2187 wh = mtod(m0, struct ieee80211_frame *); 2188 /* 2189 * XXX 2190 * IV must not duplicate during the lifetime of the key. 2191 * But no mechanism to renew keys is defined in IEEE 802.11 2192 * for WEP. And the IV may be duplicated at other stations 2193 * because the session key itself is shared. So we use a 2194 * pseudo random IV for now, though it is not the right way. 2195 * 2196 * NB: Rather than use a strictly random IV we select a 2197 * random one to start and then increment the value for 2198 * each frame. This is an explicit tradeoff between 2199 * overhead and security. Given the basic insecurity of 2200 * WEP this seems worthwhile. 2201 */ 2202 2203 /* 2204 * Skip 'bad' IVs from Fluhrer/Mantin/Shamir: 2205 * (B, 255, N) with 3 <= B < 16 and 0 <= N <= 255 2206 */ 2207 iv = ic->ic_iv; 2208 if ((iv & 0xff00) == 0xff00) { 2209 int B = (iv & 0xff0000) >> 16; 2210 if (3 <= B && B < 16) 2211 iv = (B+1) << 16; 2212 } 2213 ic->ic_iv = iv + 1; 2214 2215 /* 2216 * NB: Preserve byte order of IV for packet 2217 * sniffers; it doesn't matter otherwise. 2218 */ 2219 #if AH_BYTE_ORDER == AH_BIG_ENDIAN 2220 ivp[0] = iv >> 0; 2221 ivp[1] = iv >> 8; 2222 ivp[2] = iv >> 16; 2223 #else 2224 ivp[2] = iv >> 0; 2225 ivp[1] = iv >> 8; 2226 ivp[0] = iv >> 16; 2227 #endif 2228 ivp[3] = ic->ic_wep_txkey << 6; /* Key ID and pad */ 2229 bcopy(hdrbuf, mtod(m0, caddr_t), sizeof(hdrbuf)); 2230 /* 2231 * The length of hdrlen and pktlen must be increased for WEP 2232 */ 2233 len = IEEE80211_WEP_IVLEN + 2234 IEEE80211_WEP_KIDLEN + 2235 IEEE80211_WEP_CRCLEN; 2236 hdrlen += len; 2237 pktlen += len; 2238 } 2239 pktlen += IEEE80211_CRC_LEN; 2240 2241 /* 2242 * Load the DMA map so any coalescing is done. This 2243 * also calculates the number of descriptors we need. 2244 */ 2245 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m0, 2246 BUS_DMA_NOWAIT); 2247 /* 2248 * Discard null packets and check for packets that 2249 * require too many TX descriptors. We try to convert 2250 * the latter to a cluster. 2251 */ 2252 if (error == EFBIG) { /* too many desc's, linearize */ 2253 sc->sc_stats.ast_tx_linear++; 2254 MGETHDR(m, M_DONTWAIT, MT_DATA); 2255 if (m == NULL) { 2256 sc->sc_stats.ast_tx_nombuf++; 2257 m_freem(m0); 2258 return ENOMEM; 2259 } 2260 2261 M_DUP_PKTHDR(m, m0); 2262 MCLGET(m, M_DONTWAIT); 2263 if ((m->m_flags & M_EXT) == 0) { 2264 sc->sc_stats.ast_tx_nomcl++; 2265 m_freem(m0); 2266 m_free(m); 2267 return ENOMEM; 2268 } 2269 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 2270 m_freem(m0); 2271 m->m_len = m->m_pkthdr.len; 2272 m0 = m; 2273 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m0, 2274 BUS_DMA_NOWAIT); 2275 if (error != 0) { 2276 sc->sc_stats.ast_tx_busdma++; 2277 m_freem(m0); 2278 return error; 2279 } 2280 KASSERT(bf->bf_nseg == 1, 2281 ("ath_tx_start: packet not one segment; nseg %u", 2282 bf->bf_nseg)); 2283 } else if (error != 0) { 2284 sc->sc_stats.ast_tx_busdma++; 2285 m_freem(m0); 2286 return error; 2287 } else if (bf->bf_nseg == 0) { /* null packet, discard */ 2288 sc->sc_stats.ast_tx_nodata++; 2289 m_freem(m0); 2290 return EIO; 2291 } 2292 DPRINTF(ATH_DEBUG_XMIT, ("%s: m %p len %u\n", __func__, m0, pktlen)); 2293 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 2294 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 2295 bf->bf_m = m0; 2296 bf->bf_node = ni; /* NB: held reference */ 2297 an = ATH_NODE(ni); 2298 2299 /* setup descriptors */ 2300 ds = bf->bf_desc; 2301 rt = sc->sc_currates; 2302 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 2303 2304 /* 2305 * Calculate Atheros packet type from IEEE80211 packet header 2306 * and setup for rate calculations. 2307 */ 2308 bf->bf_id.id_node = NULL; 2309 atype = HAL_PKT_TYPE_NORMAL; /* default */ 2310 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 2311 case IEEE80211_FC0_TYPE_MGT: 2312 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2313 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) { 2314 atype = HAL_PKT_TYPE_BEACON; 2315 } else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) { 2316 atype = HAL_PKT_TYPE_PROBE_RESP; 2317 } else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) { 2318 atype = HAL_PKT_TYPE_ATIM; 2319 } 2320 rix = 0; /* XXX lowest rate */ 2321 break; 2322 case IEEE80211_FC0_TYPE_CTL: 2323 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2324 if (subtype == IEEE80211_FC0_SUBTYPE_PS_POLL) 2325 atype = HAL_PKT_TYPE_PSPOLL; 2326 rix = 0; /* XXX lowest rate */ 2327 break; 2328 default: 2329 /* remember link conditions for rate adaptation algorithm */ 2330 if (ic->ic_fixed_rate == -1) { 2331 bf->bf_id.id_len = m0->m_pkthdr.len; 2332 bf->bf_id.id_rateidx = ni->ni_txrate; 2333 bf->bf_id.id_node = ni; 2334 bf->bf_id.id_rssi = ath_node_getrssi(ic, ni); 2335 } 2336 ni->ni_txrate = ieee80211_rssadapt_choose(&an->an_rssadapt, 2337 &ni->ni_rates, wh, m0->m_pkthdr.len, ic->ic_fixed_rate, 2338 ifp->if_xname, 0); 2339 rix = sc->sc_rixmap[ni->ni_rates.rs_rates[ni->ni_txrate] & 2340 IEEE80211_RATE_VAL]; 2341 if (rix == 0xff) { 2342 printf("%s: bogus xmit rate 0x%x (idx 0x%x)\n", 2343 ifp->if_xname, ni->ni_rates.rs_rates[ni->ni_txrate], 2344 ni->ni_txrate); 2345 sc->sc_stats.ast_tx_badrate++; 2346 m_freem(m0); 2347 return EIO; 2348 } 2349 break; 2350 } 2351 2352 /* 2353 * NB: the 802.11 layer marks whether or not we should 2354 * use short preamble based on the current mode and 2355 * negotiated parameters. 2356 */ 2357 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 2358 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 2359 txrate = rt->info[rix].rateCode | rt->info[rix].shortPreamble; 2360 shortPreamble = AH_TRUE; 2361 sc->sc_stats.ast_tx_shortpre++; 2362 } else { 2363 txrate = rt->info[rix].rateCode; 2364 shortPreamble = AH_FALSE; 2365 } 2366 2367 /* 2368 * Calculate miscellaneous flags. 2369 */ 2370 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for wep errors */ 2371 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2372 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 2373 sc->sc_stats.ast_tx_noack++; 2374 } else if (pktlen > ic->ic_rtsthreshold) { 2375 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 2376 sc->sc_stats.ast_tx_rts++; 2377 } 2378 2379 /* 2380 * Calculate duration. This logically belongs in the 802.11 2381 * layer but it lacks sufficient information to calculate it. 2382 */ 2383 if ((flags & HAL_TXDESC_NOACK) == 0 && 2384 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 2385 u_int16_t dur; 2386 /* 2387 * XXX not right with fragmentation. 2388 */ 2389 dur = ath_hal_computetxtime(ah, rt, IEEE80211_ACK_SIZE, 2390 rix, shortPreamble); 2391 *((u_int16_t*) wh->i_dur) = htole16(dur); 2392 } 2393 2394 /* 2395 * Calculate RTS/CTS rate and duration if needed. 2396 */ 2397 ctsduration = 0; 2398 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) { 2399 /* 2400 * CTS transmit rate is derived from the transmit rate 2401 * by looking in the h/w rate table. We must also factor 2402 * in whether or not a short preamble is to be used. 2403 */ 2404 cix = rt->info[rix].controlRate; 2405 ctsrate = rt->info[cix].rateCode; 2406 if (shortPreamble) 2407 ctsrate |= rt->info[cix].shortPreamble; 2408 /* 2409 * Compute the transmit duration based on the size 2410 * of an ACK frame. We call into the HAL to do the 2411 * computation since it depends on the characteristics 2412 * of the actual PHY being used. 2413 */ 2414 if (flags & HAL_TXDESC_RTSENA) { /* SIFS + CTS */ 2415 ctsduration += ath_hal_computetxtime(ah, 2416 rt, IEEE80211_ACK_SIZE, cix, shortPreamble); 2417 } 2418 /* SIFS + data */ 2419 ctsduration += ath_hal_computetxtime(ah, 2420 rt, pktlen, rix, shortPreamble); 2421 if ((flags & HAL_TXDESC_NOACK) == 0) { /* SIFS + ACK */ 2422 ctsduration += ath_hal_computetxtime(ah, 2423 rt, IEEE80211_ACK_SIZE, cix, shortPreamble); 2424 } 2425 } else 2426 ctsrate = 0; 2427 2428 /* 2429 * For now use the antenna on which the last good 2430 * frame was received on. We assume this field is 2431 * initialized to 0 which gives us ``auto'' or the 2432 * ``default'' antenna. 2433 */ 2434 if (an->an_tx_antenna) { 2435 antenna = an->an_tx_antenna; 2436 } else { 2437 antenna = an->an_rx_hist[an->an_rx_hist_next].arh_antenna; 2438 } 2439 2440 #if NBPFILTER > 0 2441 if (ic->ic_rawbpf) 2442 bpf_mtap(ic->ic_rawbpf, m0, BPF_DIRECTION_OUT); 2443 2444 if (sc->sc_drvbpf) { 2445 struct mbuf mb; 2446 2447 sc->sc_txtap.wt_flags = 0; 2448 if (shortPreamble) 2449 sc->sc_txtap.wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2450 if (!ath_softcrypto && iswep) 2451 sc->sc_txtap.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2452 sc->sc_txtap.wt_rate = ni->ni_rates.rs_rates[ni->ni_txrate] & 2453 IEEE80211_RATE_VAL; 2454 sc->sc_txtap.wt_txpower = 30; 2455 sc->sc_txtap.wt_antenna = antenna; 2456 sc->sc_txtap.wt_hwqueue = hwqueue; 2457 2458 mb.m_data = (caddr_t)&sc->sc_txtap; 2459 mb.m_len = sc->sc_txtap_len; 2460 mb.m_next = m0; 2461 mb.m_nextpkt = NULL; 2462 mb.m_type = 0; 2463 mb.m_flags = 0; 2464 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT); 2465 } 2466 #endif 2467 2468 /* 2469 * Formulate first tx descriptor with tx controls. 2470 */ 2471 /* XXX check return value? */ 2472 ath_hal_setup_tx_desc(ah, ds 2473 , pktlen /* packet length */ 2474 , hdrlen /* header length */ 2475 , atype /* Atheros packet type */ 2476 , 60 /* txpower XXX */ 2477 , txrate, 1+10 /* series 0 rate/tries */ 2478 , iswep ? sc->sc_ic.ic_wep_txkey : HAL_TXKEYIX_INVALID 2479 , antenna /* antenna mode */ 2480 , flags /* flags */ 2481 , ctsrate /* rts/cts rate */ 2482 , ctsduration /* rts/cts duration */ 2483 ); 2484 #ifdef notyet 2485 ath_hal_setup_xtx_desc(ah, ds 2486 , AH_FALSE /* short preamble */ 2487 , 0, 0 /* series 1 rate/tries */ 2488 , 0, 0 /* series 2 rate/tries */ 2489 , 0, 0 /* series 3 rate/tries */ 2490 ); 2491 #endif 2492 /* 2493 * Fillin the remainder of the descriptor info. 2494 */ 2495 for (i = 0; i < bf->bf_nseg; i++, ds++) { 2496 ds->ds_data = bf->bf_segs[i].ds_addr; 2497 if (i == bf->bf_nseg - 1) { 2498 ds->ds_link = 0; 2499 } else { 2500 ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1); 2501 } 2502 ath_hal_fill_tx_desc(ah, ds 2503 , bf->bf_segs[i].ds_len /* segment length */ 2504 , i == 0 /* first segment */ 2505 , i == bf->bf_nseg - 1 /* last segment */ 2506 ); 2507 DPRINTF(ATH_DEBUG_XMIT, 2508 ("%s: %d: %08x %08x %08x %08x %08x %08x\n", 2509 __func__, i, ds->ds_link, ds->ds_data, 2510 ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1])); 2511 } 2512 2513 /* 2514 * Insert the frame on the outbound list and 2515 * pass it on to the hardware. 2516 */ 2517 s = splnet(); 2518 TAILQ_INSERT_TAIL(&sc->sc_txq, bf, bf_list); 2519 if (sc->sc_txlink == NULL) { 2520 ath_hal_put_tx_buf(ah, sc->sc_txhalq[hwqueue], bf->bf_daddr); 2521 DPRINTF(ATH_DEBUG_XMIT, ("%s: TXDP0 = %p (%p)\n", __func__, 2522 (caddr_t)bf->bf_daddr, bf->bf_desc)); 2523 } else { 2524 *sc->sc_txlink = bf->bf_daddr; 2525 DPRINTF(ATH_DEBUG_XMIT, ("%s: link(%p)=%p (%p)\n", __func__, 2526 sc->sc_txlink, (caddr_t)bf->bf_daddr, bf->bf_desc)); 2527 } 2528 sc->sc_txlink = &bf->bf_desc[bf->bf_nseg - 1].ds_link; 2529 splx(s); 2530 2531 ath_hal_tx_start(ah, sc->sc_txhalq[hwqueue]); 2532 return 0; 2533 } 2534 2535 void 2536 ath_tx_proc(void *arg, int npending) 2537 { 2538 struct ath_softc *sc = arg; 2539 struct ath_hal *ah = sc->sc_ah; 2540 struct ath_buf *bf; 2541 struct ieee80211com *ic = &sc->sc_ic; 2542 struct ifnet *ifp = &ic->ic_if; 2543 struct ath_desc *ds; 2544 struct ieee80211_node *ni; 2545 struct ath_node *an; 2546 int sr, lr, s; 2547 HAL_STATUS status; 2548 2549 for (;;) { 2550 s = splnet(); 2551 bf = TAILQ_FIRST(&sc->sc_txq); 2552 if (bf == NULL) { 2553 sc->sc_txlink = NULL; 2554 splx(s); 2555 break; 2556 } 2557 /* only the last descriptor is needed */ 2558 ds = &bf->bf_desc[bf->bf_nseg - 1]; 2559 status = ath_hal_proc_tx_desc(ah, ds); 2560 #ifdef AR_DEBUG 2561 if (ath_debug & ATH_DEBUG_XMIT_DESC) 2562 ath_printtxbuf(bf, status == HAL_OK); 2563 #endif 2564 if (status == HAL_EINPROGRESS) { 2565 splx(s); 2566 break; 2567 } 2568 TAILQ_REMOVE(&sc->sc_txq, bf, bf_list); 2569 splx(s); 2570 2571 ni = bf->bf_node; 2572 if (ni != NULL) { 2573 an = (struct ath_node *) ni; 2574 if (ds->ds_txstat.ts_status == 0) { 2575 if (bf->bf_id.id_node != NULL) 2576 ieee80211_rssadapt_raise_rate(ic, 2577 &an->an_rssadapt, &bf->bf_id); 2578 an->an_tx_antenna = ds->ds_txstat.ts_antenna; 2579 } else { 2580 if (bf->bf_id.id_node != NULL) 2581 ieee80211_rssadapt_lower_rate(ic, ni, 2582 &an->an_rssadapt, &bf->bf_id); 2583 ifp->if_oerrors++; 2584 if (ds->ds_txstat.ts_status & HAL_TXERR_XRETRY) 2585 sc->sc_stats.ast_tx_xretries++; 2586 if (ds->ds_txstat.ts_status & HAL_TXERR_FIFO) 2587 sc->sc_stats.ast_tx_fifoerr++; 2588 if (ds->ds_txstat.ts_status & HAL_TXERR_FILT) 2589 sc->sc_stats.ast_tx_filtered++; 2590 an->an_tx_antenna = 0; /* invalidate */ 2591 } 2592 sr = ds->ds_txstat.ts_shortretry; 2593 lr = ds->ds_txstat.ts_longretry; 2594 sc->sc_stats.ast_tx_shortretry += sr; 2595 sc->sc_stats.ast_tx_longretry += lr; 2596 /* 2597 * Reclaim reference to node. 2598 * 2599 * NB: the node may be reclaimed here if, for example 2600 * this is a DEAUTH message that was sent and the 2601 * node was timed out due to inactivity. 2602 */ 2603 ieee80211_release_node(ic, ni); 2604 } 2605 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 2606 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2607 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2608 m_freem(bf->bf_m); 2609 bf->bf_m = NULL; 2610 bf->bf_node = NULL; 2611 2612 s = splnet(); 2613 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 2614 splx(s); 2615 } 2616 ifp->if_flags &= ~IFF_OACTIVE; 2617 sc->sc_tx_timer = 0; 2618 2619 ath_start(ifp); 2620 } 2621 2622 /* 2623 * Drain the transmit queue and reclaim resources. 2624 */ 2625 void 2626 ath_draintxq(struct ath_softc *sc) 2627 { 2628 struct ath_hal *ah = sc->sc_ah; 2629 struct ieee80211com *ic = &sc->sc_ic; 2630 struct ifnet *ifp = &ic->ic_if; 2631 struct ieee80211_node *ni; 2632 struct ath_buf *bf; 2633 int s, i; 2634 2635 /* XXX return value */ 2636 if (!sc->sc_invalid) { 2637 for (i = 0; i <= HAL_TX_QUEUE_ID_DATA_MAX; i++) { 2638 /* don't touch the hardware if marked invalid */ 2639 (void) ath_hal_stop_tx_dma(ah, sc->sc_txhalq[i]); 2640 DPRINTF(ATH_DEBUG_RESET, 2641 ("%s: tx queue %d (%p), link %p\n", __func__, i, 2642 (caddr_t)(u_intptr_t)ath_hal_get_tx_buf(ah, 2643 sc->sc_txhalq[i]), sc->sc_txlink)); 2644 } 2645 (void) ath_hal_stop_tx_dma(ah, sc->sc_bhalq); 2646 DPRINTF(ATH_DEBUG_RESET, 2647 ("%s: beacon queue (%p)\n", __func__, 2648 (caddr_t)(u_intptr_t)ath_hal_get_tx_buf(ah, sc->sc_bhalq))); 2649 } 2650 for (;;) { 2651 s = splnet(); 2652 bf = TAILQ_FIRST(&sc->sc_txq); 2653 if (bf == NULL) { 2654 sc->sc_txlink = NULL; 2655 splx(s); 2656 break; 2657 } 2658 TAILQ_REMOVE(&sc->sc_txq, bf, bf_list); 2659 splx(s); 2660 #ifdef AR_DEBUG 2661 if (ath_debug & ATH_DEBUG_RESET) { 2662 ath_printtxbuf(bf, 2663 ath_hal_proc_tx_desc(ah, bf->bf_desc) == HAL_OK); 2664 } 2665 #endif /* AR_DEBUG */ 2666 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2667 m_freem(bf->bf_m); 2668 bf->bf_m = NULL; 2669 ni = bf->bf_node; 2670 bf->bf_node = NULL; 2671 s = splnet(); 2672 if (ni != NULL) { 2673 /* 2674 * Reclaim node reference. 2675 */ 2676 ieee80211_release_node(ic, ni); 2677 } 2678 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 2679 splx(s); 2680 } 2681 ifp->if_flags &= ~IFF_OACTIVE; 2682 sc->sc_tx_timer = 0; 2683 } 2684 2685 /* 2686 * Disable the receive h/w in preparation for a reset. 2687 */ 2688 void 2689 ath_stoprecv(struct ath_softc *sc) 2690 { 2691 #define PA2DESC(_sc, _pa) \ 2692 ((struct ath_desc *)((caddr_t)(_sc)->sc_desc + \ 2693 ((_pa) - (_sc)->sc_desc_paddr))) 2694 struct ath_hal *ah = sc->sc_ah; 2695 2696 ath_hal_stop_pcu_recv(ah); /* disable PCU */ 2697 ath_hal_set_rx_filter(ah, 0); /* clear recv filter */ 2698 ath_hal_stop_rx_dma(ah); /* disable DMA engine */ 2699 #ifdef AR_DEBUG 2700 if (ath_debug & ATH_DEBUG_RESET) { 2701 struct ath_buf *bf; 2702 2703 printf("%s: rx queue %p, link %p\n", __func__, 2704 (caddr_t)(u_intptr_t)ath_hal_get_rx_buf(ah), sc->sc_rxlink); 2705 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 2706 struct ath_desc *ds = bf->bf_desc; 2707 if (ath_hal_proc_rx_desc(ah, ds, bf->bf_daddr, 2708 PA2DESC(sc, ds->ds_link)) == HAL_OK) 2709 ath_printrxbuf(bf, 1); 2710 } 2711 } 2712 #endif 2713 sc->sc_rxlink = NULL; /* just in case */ 2714 #undef PA2DESC 2715 } 2716 2717 /* 2718 * Enable the receive h/w following a reset. 2719 */ 2720 int 2721 ath_startrecv(struct ath_softc *sc) 2722 { 2723 struct ath_hal *ah = sc->sc_ah; 2724 struct ath_buf *bf; 2725 2726 sc->sc_rxlink = NULL; 2727 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 2728 int error = ath_rxbuf_init(sc, bf); 2729 if (error != 0) { 2730 DPRINTF(ATH_DEBUG_RECV, 2731 ("%s: ath_rxbuf_init failed %d\n", 2732 __func__, error)); 2733 return error; 2734 } 2735 } 2736 2737 bf = TAILQ_FIRST(&sc->sc_rxbuf); 2738 ath_hal_put_rx_buf(ah, bf->bf_daddr); 2739 ath_hal_start_rx(ah); /* enable recv descriptors */ 2740 ath_mode_init(sc); /* set filters, etc. */ 2741 ath_hal_start_rx_pcu(ah); /* re-enable PCU/DMA engine */ 2742 return 0; 2743 } 2744 2745 /* 2746 * Set/change channels. If the channel is really being changed, 2747 * it's done by resetting the chip. To accomplish this we must 2748 * first cleanup any pending DMA, then restart stuff after a la 2749 * ath_init. 2750 */ 2751 int 2752 ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 2753 { 2754 struct ath_hal *ah = sc->sc_ah; 2755 struct ieee80211com *ic = &sc->sc_ic; 2756 struct ifnet *ifp = &ic->ic_if; 2757 2758 DPRINTF(ATH_DEBUG_ANY, ("%s: %u (%u MHz) -> %u (%u MHz)\n", __func__, 2759 ieee80211_chan2ieee(ic, ic->ic_ibss_chan), 2760 ic->ic_ibss_chan->ic_freq, 2761 ieee80211_chan2ieee(ic, chan), chan->ic_freq)); 2762 if (chan != ic->ic_ibss_chan) { 2763 HAL_STATUS status; 2764 HAL_CHANNEL hchan; 2765 enum ieee80211_phymode mode; 2766 2767 /* 2768 * To switch channels clear any pending DMA operations; 2769 * wait long enough for the RX fifo to drain, reset the 2770 * hardware at the new frequency, and then re-enable 2771 * the relevant bits of the h/w. 2772 */ 2773 ath_hal_set_intr(ah, 0); /* disable interrupts */ 2774 ath_draintxq(sc); /* clear pending tx frames */ 2775 ath_stoprecv(sc); /* turn off frame recv */ 2776 /* 2777 * Convert to a HAL channel description with 2778 * the flags constrained to reflect the current 2779 * operating mode. 2780 */ 2781 hchan.channel = chan->ic_freq; 2782 hchan.channelFlags = ath_chan2flags(ic, chan); 2783 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, AH_TRUE, 2784 &status)) { 2785 printf("%s: ath_chan_set: unable to reset " 2786 "channel %u (%u MHz)\n", ifp->if_xname, 2787 ieee80211_chan2ieee(ic, chan), chan->ic_freq); 2788 return EIO; 2789 } 2790 ath_set_slot_time(sc); 2791 /* 2792 * Re-enable rx framework. 2793 */ 2794 if (ath_startrecv(sc) != 0) { 2795 printf("%s: ath_chan_set: unable to restart recv " 2796 "logic\n", ifp->if_xname); 2797 return EIO; 2798 } 2799 2800 #if NBPFILTER > 0 2801 /* 2802 * Update BPF state. 2803 */ 2804 sc->sc_txtap.wt_chan_freq = sc->sc_rxtap.wr_chan_freq = 2805 htole16(chan->ic_freq); 2806 sc->sc_txtap.wt_chan_flags = sc->sc_rxtap.wr_chan_flags = 2807 htole16(chan->ic_flags); 2808 #endif 2809 2810 /* 2811 * Change channels and update the h/w rate map 2812 * if we're switching; e.g. 11a to 11b/g. 2813 */ 2814 ic->ic_ibss_chan = chan; 2815 mode = ieee80211_chan2mode(ic, chan); 2816 if (mode != sc->sc_curmode) 2817 ath_setcurmode(sc, mode); 2818 2819 /* 2820 * Re-enable interrupts. 2821 */ 2822 ath_hal_set_intr(ah, sc->sc_imask); 2823 } 2824 return 0; 2825 } 2826 2827 void 2828 ath_next_scan(void *arg) 2829 { 2830 struct ath_softc *sc = arg; 2831 struct ieee80211com *ic = &sc->sc_ic; 2832 struct ifnet *ifp = &ic->ic_if; 2833 int s; 2834 2835 /* don't call ath_start w/o network interrupts blocked */ 2836 s = splnet(); 2837 2838 if (ic->ic_state == IEEE80211_S_SCAN) 2839 ieee80211_next_scan(ifp); 2840 splx(s); 2841 } 2842 2843 int 2844 ath_set_slot_time(struct ath_softc *sc) 2845 { 2846 struct ath_hal *ah = sc->sc_ah; 2847 struct ieee80211com *ic = &sc->sc_ic; 2848 2849 if (ic->ic_flags & IEEE80211_F_SHSLOT) 2850 return (ath_hal_set_slot_time(ah, HAL_SLOT_TIME_9)); 2851 2852 return (0); 2853 } 2854 2855 /* 2856 * Periodically recalibrate the PHY to account 2857 * for temperature/environment changes. 2858 */ 2859 void 2860 ath_calibrate(void *arg) 2861 { 2862 struct ath_softc *sc = arg; 2863 struct ath_hal *ah = sc->sc_ah; 2864 struct ieee80211com *ic = &sc->sc_ic; 2865 struct ieee80211_channel *c; 2866 HAL_CHANNEL hchan; 2867 int s; 2868 2869 sc->sc_stats.ast_per_cal++; 2870 2871 /* 2872 * Convert to a HAL channel description with the flags 2873 * constrained to reflect the current operating mode. 2874 */ 2875 c = ic->ic_ibss_chan; 2876 hchan.channel = c->ic_freq; 2877 hchan.channelFlags = ath_chan2flags(ic, c); 2878 2879 s = splnet(); 2880 DPRINTF(ATH_DEBUG_CALIBRATE, 2881 ("%s: channel %u/%x\n", __func__, c->ic_freq, c->ic_flags)); 2882 2883 if (ath_hal_get_rf_gain(ah) == HAL_RFGAIN_NEED_CHANGE) { 2884 /* 2885 * Rfgain is out of bounds, reset the chip 2886 * to load new gain values. 2887 */ 2888 sc->sc_stats.ast_per_rfgain++; 2889 ath_reset(sc, 1); 2890 } 2891 if (!ath_hal_calibrate(ah, &hchan)) { 2892 DPRINTF(ATH_DEBUG_ANY, 2893 ("%s: calibration of channel %u failed\n", 2894 __func__, c->ic_freq)); 2895 sc->sc_stats.ast_per_calfail++; 2896 } 2897 timeout_add_sec(&sc->sc_cal_to, ath_calinterval); 2898 splx(s); 2899 } 2900 2901 void 2902 ath_ledstate(struct ath_softc *sc, enum ieee80211_state state) 2903 { 2904 HAL_LED_STATE led = HAL_LED_INIT; 2905 u_int32_t softled = AR5K_SOFTLED_OFF; 2906 2907 switch (state) { 2908 case IEEE80211_S_INIT: 2909 break; 2910 case IEEE80211_S_SCAN: 2911 led = HAL_LED_SCAN; 2912 break; 2913 case IEEE80211_S_AUTH: 2914 led = HAL_LED_AUTH; 2915 break; 2916 case IEEE80211_S_ASSOC: 2917 led = HAL_LED_ASSOC; 2918 softled = AR5K_SOFTLED_ON; 2919 break; 2920 case IEEE80211_S_RUN: 2921 led = HAL_LED_RUN; 2922 softled = AR5K_SOFTLED_ON; 2923 break; 2924 } 2925 2926 ath_hal_set_ledstate(sc->sc_ah, led); 2927 if (sc->sc_softled) { 2928 ath_hal_set_gpio_output(sc->sc_ah, AR5K_SOFTLED_PIN); 2929 ath_hal_set_gpio(sc->sc_ah, AR5K_SOFTLED_PIN, softled); 2930 } 2931 } 2932 2933 int 2934 ath_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg) 2935 { 2936 struct ifnet *ifp = &ic->ic_if; 2937 struct ath_softc *sc = ifp->if_softc; 2938 struct ath_hal *ah = sc->sc_ah; 2939 struct ieee80211_node *ni; 2940 const u_int8_t *bssid; 2941 int error, i; 2942 2943 u_int32_t rfilt; 2944 2945 DPRINTF(ATH_DEBUG_ANY, ("%s: %s -> %s\n", __func__, 2946 ieee80211_state_name[ic->ic_state], 2947 ieee80211_state_name[nstate])); 2948 2949 timeout_del(&sc->sc_scan_to); 2950 timeout_del(&sc->sc_cal_to); 2951 ath_ledstate(sc, nstate); 2952 2953 if (nstate == IEEE80211_S_INIT) { 2954 timeout_del(&sc->sc_rssadapt_to); 2955 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 2956 ath_hal_set_intr(ah, sc->sc_imask); 2957 return (*sc->sc_newstate)(ic, nstate, arg); 2958 } 2959 ni = ic->ic_bss; 2960 error = ath_chan_set(sc, ni->ni_chan); 2961 if (error != 0) 2962 goto bad; 2963 rfilt = ath_calcrxfilter(sc); 2964 if (nstate == IEEE80211_S_SCAN || 2965 ic->ic_opmode == IEEE80211_M_MONITOR) { 2966 bssid = sc->sc_broadcast_addr; 2967 } else { 2968 bssid = ni->ni_bssid; 2969 } 2970 ath_hal_set_rx_filter(ah, rfilt); 2971 DPRINTF(ATH_DEBUG_ANY, ("%s: RX filter 0x%x bssid %s\n", 2972 __func__, rfilt, ether_sprintf((u_char*)bssid))); 2973 2974 if (nstate == IEEE80211_S_RUN && ic->ic_opmode == IEEE80211_M_STA) { 2975 ath_hal_set_associd(ah, bssid, ni->ni_associd); 2976 } else { 2977 ath_hal_set_associd(ah, bssid, 0); 2978 } 2979 2980 if (!ath_softcrypto && (ic->ic_flags & IEEE80211_F_WEPON)) { 2981 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 2982 if (ath_hal_is_key_valid(ah, i)) 2983 ath_hal_set_key_lladdr(ah, i, bssid); 2984 } 2985 } 2986 2987 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 2988 /* nothing to do */ 2989 } else if (nstate == IEEE80211_S_RUN) { 2990 DPRINTF(ATH_DEBUG_ANY, ("%s(RUN): " 2991 "ic_flags=0x%08x iv=%d bssid=%s " 2992 "capinfo=0x%04x chan=%d\n", 2993 __func__, 2994 ic->ic_flags, 2995 ni->ni_intval, 2996 ether_sprintf(ni->ni_bssid), 2997 ni->ni_capinfo, 2998 ieee80211_chan2ieee(ic, ni->ni_chan))); 2999 3000 /* 3001 * Allocate and setup the beacon frame for AP or adhoc mode. 3002 */ 3003 #ifndef IEEE80211_STA_ONLY 3004 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 3005 ic->ic_opmode == IEEE80211_M_IBSS) { 3006 error = ath_beacon_alloc(sc, ni); 3007 if (error != 0) 3008 goto bad; 3009 } 3010 #endif 3011 /* 3012 * Configure the beacon and sleep timers. 3013 */ 3014 ath_beacon_config(sc); 3015 } else { 3016 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 3017 ath_hal_set_intr(ah, sc->sc_imask); 3018 } 3019 3020 /* 3021 * Invoke the parent method to complete the work. 3022 */ 3023 error = (*sc->sc_newstate)(ic, nstate, arg); 3024 3025 if (nstate == IEEE80211_S_RUN) { 3026 /* start periodic recalibration timer */ 3027 timeout_add_sec(&sc->sc_cal_to, ath_calinterval); 3028 3029 if (ic->ic_opmode != IEEE80211_M_MONITOR) 3030 timeout_add(&sc->sc_rssadapt_to, hz / 10); 3031 } else if (nstate == IEEE80211_S_SCAN) { 3032 /* start ap/neighbor scan timer */ 3033 timeout_add_msec(&sc->sc_scan_to, ath_dwelltime); 3034 } 3035 bad: 3036 return error; 3037 } 3038 3039 #ifndef IEEE80211_STA_ONLY 3040 void 3041 ath_recv_mgmt(struct ieee80211com *ic, struct mbuf *m, 3042 struct ieee80211_node *ni, struct ieee80211_rxinfo *rxi, int subtype) 3043 { 3044 struct ath_softc *sc = (struct ath_softc*)ic->ic_softc; 3045 struct ath_hal *ah = sc->sc_ah; 3046 3047 (*sc->sc_recv_mgmt)(ic, m, ni, rxi, subtype); 3048 3049 switch (subtype) { 3050 case IEEE80211_FC0_SUBTYPE_PROBE_RESP: 3051 case IEEE80211_FC0_SUBTYPE_BEACON: 3052 if (ic->ic_opmode != IEEE80211_M_IBSS || 3053 ic->ic_state != IEEE80211_S_RUN) 3054 break; 3055 if (ieee80211_ibss_merge(ic, ni, ath_hal_get_tsf64(ah)) == 3056 ENETRESET) 3057 ath_hal_set_associd(ah, ic->ic_bss->ni_bssid, 0); 3058 break; 3059 default: 3060 break; 3061 } 3062 return; 3063 } 3064 #endif 3065 3066 /* 3067 * Setup driver-specific state for a newly associated node. 3068 * Note that we're called also on a re-associate, the isnew 3069 * param tells us if this is the first time or not. 3070 */ 3071 void 3072 ath_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, int isnew) 3073 { 3074 if (ic->ic_opmode == IEEE80211_M_MONITOR) 3075 return; 3076 } 3077 3078 int 3079 ath_getchannels(struct ath_softc *sc, HAL_BOOL outdoor, HAL_BOOL xchanmode) 3080 { 3081 struct ieee80211com *ic = &sc->sc_ic; 3082 struct ifnet *ifp = &ic->ic_if; 3083 struct ath_hal *ah = sc->sc_ah; 3084 HAL_CHANNEL *chans; 3085 int i, ix, nchan; 3086 3087 sc->sc_nchan = 0; 3088 chans = malloc(IEEE80211_CHAN_MAX * sizeof(HAL_CHANNEL), 3089 M_TEMP, M_NOWAIT); 3090 if (chans == NULL) { 3091 printf("%s: unable to allocate channel table\n", ifp->if_xname); 3092 return ENOMEM; 3093 } 3094 if (!ath_hal_init_channels(ah, chans, IEEE80211_CHAN_MAX, &nchan, 3095 HAL_MODE_ALL, outdoor, xchanmode)) { 3096 printf("%s: unable to collect channel list from hal\n", 3097 ifp->if_xname); 3098 free(chans, M_TEMP); 3099 return EINVAL; 3100 } 3101 3102 /* 3103 * Convert HAL channels to ieee80211 ones and insert 3104 * them in the table according to their channel number. 3105 */ 3106 for (i = 0; i < nchan; i++) { 3107 HAL_CHANNEL *c = &chans[i]; 3108 ix = ieee80211_mhz2ieee(c->channel, c->channelFlags); 3109 if (ix > IEEE80211_CHAN_MAX) { 3110 printf("%s: bad hal channel %u (%u/%x) ignored\n", 3111 ifp->if_xname, ix, c->channel, c->channelFlags); 3112 continue; 3113 } 3114 DPRINTF(ATH_DEBUG_ANY, 3115 ("%s: HAL channel %d/%d freq %d flags %#04x idx %d\n", 3116 sc->sc_dev.dv_xname, i, nchan, c->channel, c->channelFlags, 3117 ix)); 3118 /* NB: flags are known to be compatible */ 3119 if (ic->ic_channels[ix].ic_freq == 0) { 3120 ic->ic_channels[ix].ic_freq = c->channel; 3121 ic->ic_channels[ix].ic_flags = c->channelFlags; 3122 } else { 3123 /* channels overlap; e.g. 11g and 11b */ 3124 ic->ic_channels[ix].ic_flags |= c->channelFlags; 3125 } 3126 /* count valid channels */ 3127 sc->sc_nchan++; 3128 } 3129 free(chans, M_TEMP); 3130 3131 if (sc->sc_nchan < 1) { 3132 printf("%s: no valid channels for regdomain %s(%u)\n", 3133 ifp->if_xname, ieee80211_regdomain2name(ah->ah_regdomain), 3134 ah->ah_regdomain); 3135 return ENOENT; 3136 } 3137 3138 /* set an initial channel */ 3139 ic->ic_ibss_chan = &ic->ic_channels[0]; 3140 3141 return 0; 3142 } 3143 3144 int 3145 ath_rate_setup(struct ath_softc *sc, u_int mode) 3146 { 3147 struct ath_hal *ah = sc->sc_ah; 3148 struct ieee80211com *ic = &sc->sc_ic; 3149 const HAL_RATE_TABLE *rt; 3150 struct ieee80211_rateset *rs; 3151 int i, maxrates; 3152 3153 switch (mode) { 3154 case IEEE80211_MODE_11A: 3155 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_11A); 3156 break; 3157 case IEEE80211_MODE_11B: 3158 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_11B); 3159 break; 3160 case IEEE80211_MODE_11G: 3161 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_11G); 3162 break; 3163 case IEEE80211_MODE_TURBO: 3164 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_TURBO); 3165 break; 3166 default: 3167 DPRINTF(ATH_DEBUG_ANY, 3168 ("%s: invalid mode %u\n", __func__, mode)); 3169 return 0; 3170 } 3171 rt = sc->sc_rates[mode]; 3172 if (rt == NULL) 3173 return 0; 3174 if (rt->rateCount > IEEE80211_RATE_MAXSIZE) { 3175 DPRINTF(ATH_DEBUG_ANY, 3176 ("%s: rate table too small (%u > %u)\n", 3177 __func__, rt->rateCount, IEEE80211_RATE_MAXSIZE)); 3178 maxrates = IEEE80211_RATE_MAXSIZE; 3179 } else { 3180 maxrates = rt->rateCount; 3181 } 3182 rs = &ic->ic_sup_rates[mode]; 3183 for (i = 0; i < maxrates; i++) 3184 rs->rs_rates[i] = rt->info[i].dot11Rate; 3185 rs->rs_nrates = maxrates; 3186 return 1; 3187 } 3188 3189 void 3190 ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 3191 { 3192 const HAL_RATE_TABLE *rt; 3193 int i; 3194 3195 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 3196 rt = sc->sc_rates[mode]; 3197 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 3198 for (i = 0; i < rt->rateCount; i++) 3199 sc->sc_rixmap[rt->info[i].dot11Rate & IEEE80211_RATE_VAL] = i; 3200 bzero(sc->sc_hwmap, sizeof(sc->sc_hwmap)); 3201 for (i = 0; i < 32; i++) 3202 sc->sc_hwmap[i] = rt->info[rt->rateCodeToIndex[i]].dot11Rate; 3203 sc->sc_currates = rt; 3204 sc->sc_curmode = mode; 3205 } 3206 3207 void 3208 ath_rssadapt_updatenode(void *arg, struct ieee80211_node *ni) 3209 { 3210 struct ath_node *an = ATH_NODE(ni); 3211 3212 ieee80211_rssadapt_updatestats(&an->an_rssadapt); 3213 } 3214 3215 void 3216 ath_rssadapt_updatestats(void *arg) 3217 { 3218 struct ath_softc *sc = (struct ath_softc *)arg; 3219 struct ieee80211com *ic = &sc->sc_ic; 3220 3221 if (ic->ic_opmode == IEEE80211_M_STA) { 3222 ath_rssadapt_updatenode(arg, ic->ic_bss); 3223 } else { 3224 ieee80211_iterate_nodes(ic, ath_rssadapt_updatenode, arg); 3225 } 3226 3227 timeout_add(&sc->sc_rssadapt_to, hz / 10); 3228 } 3229 3230 #ifdef AR_DEBUG 3231 void 3232 ath_printrxbuf(struct ath_buf *bf, int done) 3233 { 3234 struct ath_desc *ds; 3235 int i; 3236 3237 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { 3238 printf("R%d (%p %p) %08x %08x %08x %08x %08x %08x %c\n", 3239 i, ds, (struct ath_desc *)bf->bf_daddr + i, 3240 ds->ds_link, ds->ds_data, 3241 ds->ds_ctl0, ds->ds_ctl1, 3242 ds->ds_hw[0], ds->ds_hw[1], 3243 !done ? ' ' : (ds->ds_rxstat.rs_status == 0) ? '*' : '!'); 3244 } 3245 } 3246 3247 void 3248 ath_printtxbuf(struct ath_buf *bf, int done) 3249 { 3250 struct ath_desc *ds; 3251 int i; 3252 3253 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { 3254 printf("T%d (%p %p) " 3255 "%08x %08x %08x %08x %08x %08x %08x %08x %c\n", 3256 i, ds, (struct ath_desc *)bf->bf_daddr + i, 3257 ds->ds_link, ds->ds_data, 3258 ds->ds_ctl0, ds->ds_ctl1, 3259 ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3], 3260 !done ? ' ' : (ds->ds_txstat.ts_status == 0) ? '*' : '!'); 3261 } 3262 } 3263 #endif /* AR_DEBUG */ 3264 3265 int 3266 ath_gpio_attach(struct ath_softc *sc, u_int16_t devid) 3267 { 3268 struct ath_hal *ah = sc->sc_ah; 3269 struct gpiobus_attach_args gba; 3270 int i; 3271 3272 if (ah->ah_gpio_npins < 1) 3273 return 0; 3274 3275 /* Initialize gpio pins array */ 3276 for (i = 0; i < ah->ah_gpio_npins && i < AR5K_MAX_GPIO; i++) { 3277 sc->sc_gpio_pins[i].pin_num = i; 3278 sc->sc_gpio_pins[i].pin_caps = GPIO_PIN_INPUT | 3279 GPIO_PIN_OUTPUT; 3280 3281 /* Set pin mode to input */ 3282 ath_hal_set_gpio_input(ah, i); 3283 sc->sc_gpio_pins[i].pin_flags = GPIO_PIN_INPUT; 3284 3285 /* Get pin input */ 3286 sc->sc_gpio_pins[i].pin_state = ath_hal_get_gpio(ah, i) ? 3287 GPIO_PIN_HIGH : GPIO_PIN_LOW; 3288 } 3289 3290 /* Enable GPIO-controlled software LED if available */ 3291 if ((ah->ah_version == AR5K_AR5211) || 3292 (devid == PCI_PRODUCT_ATHEROS_AR5212_IBM)) { 3293 sc->sc_softled = 1; 3294 ath_hal_set_gpio_output(ah, AR5K_SOFTLED_PIN); 3295 ath_hal_set_gpio(ah, AR5K_SOFTLED_PIN, AR5K_SOFTLED_OFF); 3296 } 3297 3298 /* Create gpio controller tag */ 3299 sc->sc_gpio_gc.gp_cookie = sc; 3300 sc->sc_gpio_gc.gp_pin_read = ath_gpio_pin_read; 3301 sc->sc_gpio_gc.gp_pin_write = ath_gpio_pin_write; 3302 sc->sc_gpio_gc.gp_pin_ctl = ath_gpio_pin_ctl; 3303 3304 gba.gba_name = "gpio"; 3305 gba.gba_gc = &sc->sc_gpio_gc; 3306 gba.gba_pins = sc->sc_gpio_pins; 3307 gba.gba_npins = ah->ah_gpio_npins; 3308 3309 #ifdef notyet 3310 #if NGPIO > 0 3311 if (config_found(&sc->sc_dev, &gba, gpiobus_print) == NULL) 3312 return (ENODEV); 3313 #endif 3314 #endif 3315 3316 return (0); 3317 } 3318 3319 int 3320 ath_gpio_pin_read(void *arg, int pin) 3321 { 3322 struct ath_softc *sc = arg; 3323 struct ath_hal *ah = sc->sc_ah; 3324 return (ath_hal_get_gpio(ah, pin) ? GPIO_PIN_HIGH : GPIO_PIN_LOW); 3325 } 3326 3327 void 3328 ath_gpio_pin_write(void *arg, int pin, int value) 3329 { 3330 struct ath_softc *sc = arg; 3331 struct ath_hal *ah = sc->sc_ah; 3332 ath_hal_set_gpio(ah, pin, value ? GPIO_PIN_HIGH : GPIO_PIN_LOW); 3333 } 3334 3335 void 3336 ath_gpio_pin_ctl(void *arg, int pin, int flags) 3337 { 3338 struct ath_softc *sc = arg; 3339 struct ath_hal *ah = sc->sc_ah; 3340 3341 if (flags & GPIO_PIN_INPUT) { 3342 ath_hal_set_gpio_input(ah, pin); 3343 } else if (flags & GPIO_PIN_OUTPUT) { 3344 ath_hal_set_gpio_output(ah, pin); 3345 } 3346 } 3347