1 /* $OpenBSD: ath.c,v 1.83 2009/09/13 14:42:52 krw Exp $ */ 2 /* $NetBSD: ath.c,v 1.37 2004/08/18 21:59:39 dyoung Exp $ */ 3 4 /*- 5 * Copyright (c) 2002-2004 Sam Leffler, Errno Consulting 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer, 13 * without modification. 14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 15 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 16 * redistribution must be conditioned upon including a substantially 17 * similar Disclaimer requirement for further binary redistribution. 18 * 3. Neither the names of the above-listed copyright holders nor the names 19 * of any contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 26 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 27 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 28 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 31 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGES. 34 */ 35 36 /* 37 * Driver for the Atheros Wireless LAN controller. 38 * 39 * This software is derived from work of Atsushi Onoe; his contribution 40 * is greatly appreciated. It has been modified for OpenBSD to use an 41 * open source HAL instead of the original binary-only HAL. 42 */ 43 44 #include "bpfilter.h" 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/sysctl.h> 49 #include <sys/mbuf.h> 50 #include <sys/malloc.h> 51 #include <sys/lock.h> 52 #include <sys/kernel.h> 53 #include <sys/socket.h> 54 #include <sys/sockio.h> 55 #include <sys/device.h> 56 #include <sys/errno.h> 57 #include <sys/timeout.h> 58 #include <sys/gpio.h> 59 60 #include <machine/endian.h> 61 #include <machine/bus.h> 62 63 #include <net/if.h> 64 #include <net/if_dl.h> 65 #include <net/if_media.h> 66 #include <net/if_arp.h> 67 #include <net/if_llc.h> 68 #if NBPFILTER > 0 69 #include <net/bpf.h> 70 #endif 71 #ifdef INET 72 #include <netinet/in.h> 73 #include <netinet/if_ether.h> 74 #endif 75 76 #include <net80211/ieee80211_var.h> 77 #include <net80211/ieee80211_rssadapt.h> 78 79 #include <dev/pci/pcidevs.h> 80 #include <dev/gpio/gpiovar.h> 81 82 #include <dev/ic/athvar.h> 83 84 int ath_init(struct ifnet *); 85 int ath_init1(struct ath_softc *); 86 int ath_intr1(struct ath_softc *); 87 void ath_stop(struct ifnet *); 88 void ath_start(struct ifnet *); 89 void ath_reset(struct ath_softc *, int); 90 int ath_media_change(struct ifnet *); 91 void ath_watchdog(struct ifnet *); 92 int ath_ioctl(struct ifnet *, u_long, caddr_t); 93 void ath_fatal_proc(void *, int); 94 void ath_rxorn_proc(void *, int); 95 void ath_bmiss_proc(void *, int); 96 u_int ath_chan2flags(struct ieee80211com *, struct ieee80211_channel *); 97 int ath_initkeytable(struct ath_softc *); 98 void ath_mcastfilter_accum(caddr_t, u_int32_t (*)[2]); 99 void ath_mcastfilter_compute(struct ath_softc *, u_int32_t (*)[2]); 100 u_int32_t ath_calcrxfilter(struct ath_softc *); 101 void ath_mode_init(struct ath_softc *); 102 #ifndef IEEE80211_STA_ONLY 103 int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *); 104 void ath_beacon_proc(void *, int); 105 void ath_beacon_free(struct ath_softc *); 106 #endif 107 void ath_beacon_config(struct ath_softc *); 108 int ath_desc_alloc(struct ath_softc *); 109 void ath_desc_free(struct ath_softc *); 110 struct ieee80211_node *ath_node_alloc(struct ieee80211com *); 111 struct mbuf *ath_getmbuf(int, int, u_int); 112 void ath_node_free(struct ieee80211com *, struct ieee80211_node *); 113 void ath_node_copy(struct ieee80211com *, 114 struct ieee80211_node *, const struct ieee80211_node *); 115 u_int8_t ath_node_getrssi(struct ieee80211com *, 116 const struct ieee80211_node *); 117 int ath_rxbuf_init(struct ath_softc *, struct ath_buf *); 118 void ath_rx_proc(void *, int); 119 int ath_tx_start(struct ath_softc *, struct ieee80211_node *, 120 struct ath_buf *, struct mbuf *); 121 void ath_tx_proc(void *, int); 122 int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 123 void ath_draintxq(struct ath_softc *); 124 void ath_stoprecv(struct ath_softc *); 125 int ath_startrecv(struct ath_softc *); 126 void ath_next_scan(void *); 127 int ath_set_slot_time(struct ath_softc *); 128 void ath_calibrate(void *); 129 void ath_ledstate(struct ath_softc *, enum ieee80211_state); 130 int ath_newstate(struct ieee80211com *, enum ieee80211_state, int); 131 void ath_newassoc(struct ieee80211com *, 132 struct ieee80211_node *, int); 133 int ath_getchannels(struct ath_softc *, HAL_BOOL outdoor, 134 HAL_BOOL xchanmode); 135 int ath_rate_setup(struct ath_softc *sc, u_int mode); 136 void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 137 void ath_rssadapt_updatenode(void *, struct ieee80211_node *); 138 void ath_rssadapt_updatestats(void *); 139 #ifndef IEEE80211_STA_ONLY 140 void ath_recv_mgmt(struct ieee80211com *, struct mbuf *, 141 struct ieee80211_node *, struct ieee80211_rxinfo *, int); 142 #endif 143 void ath_disable(struct ath_softc *); 144 void ath_power(int, void *); 145 146 int ath_gpio_attach(struct ath_softc *, u_int16_t); 147 int ath_gpio_pin_read(void *, int); 148 void ath_gpio_pin_write(void *, int, int); 149 void ath_gpio_pin_ctl(void *, int, int); 150 151 #ifdef AR_DEBUG 152 void ath_printrxbuf(struct ath_buf *, int); 153 void ath_printtxbuf(struct ath_buf *, int); 154 int ath_debug = 0; 155 #endif 156 157 int ath_dwelltime = 200; /* 5 channels/second */ 158 int ath_calinterval = 30; /* calibrate every 30 secs */ 159 int ath_outdoor = AH_TRUE; /* outdoor operation */ 160 int ath_xchanmode = AH_TRUE; /* enable extended channels */ 161 int ath_softcrypto = 1; /* 1=enable software crypto */ 162 163 struct cfdriver ath_cd = { 164 NULL, "ath", DV_IFNET 165 }; 166 167 #if 0 168 int 169 ath_activate(struct device *self, enum devact act) 170 { 171 struct ath_softc *sc = (struct ath_softc *)self; 172 int rv = 0, s; 173 174 s = splnet(); 175 switch (act) { 176 case DVACT_ACTIVATE: 177 break; 178 case DVACT_DEACTIVATE: 179 if_deactivate(&sc->sc_ic.ic_if); 180 break; 181 } 182 splx(s); 183 return rv; 184 } 185 #endif 186 187 int 188 ath_enable(struct ath_softc *sc) 189 { 190 if (ATH_IS_ENABLED(sc) == 0) { 191 if (sc->sc_enable != NULL && (*sc->sc_enable)(sc) != 0) { 192 printf("%s: device enable failed\n", 193 sc->sc_dev.dv_xname); 194 return (EIO); 195 } 196 sc->sc_flags |= ATH_ENABLED; 197 } 198 return (0); 199 } 200 201 void 202 ath_disable(struct ath_softc *sc) 203 { 204 if (!ATH_IS_ENABLED(sc)) 205 return; 206 if (sc->sc_disable != NULL) 207 (*sc->sc_disable)(sc); 208 sc->sc_flags &= ~ATH_ENABLED; 209 } 210 211 int 212 ath_attach(u_int16_t devid, struct ath_softc *sc) 213 { 214 struct ieee80211com *ic = &sc->sc_ic; 215 struct ifnet *ifp = &ic->ic_if; 216 struct ath_hal *ah; 217 HAL_STATUS status; 218 HAL_TXQ_INFO qinfo; 219 int error = 0, i; 220 221 DPRINTF(ATH_DEBUG_ANY, ("%s: devid 0x%x\n", __func__, devid)); 222 223 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 224 sc->sc_flags &= ~ATH_ATTACHED; /* make sure that it's not attached */ 225 226 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, 227 sc->sc_pcie, &status); 228 if (ah == NULL) { 229 printf("%s: unable to attach hardware; HAL status %d\n", 230 ifp->if_xname, status); 231 error = ENXIO; 232 goto bad; 233 } 234 if (ah->ah_abi != HAL_ABI_VERSION) { 235 printf("%s: HAL ABI mismatch detected (0x%x != 0x%x)\n", 236 ifp->if_xname, ah->ah_abi, HAL_ABI_VERSION); 237 error = ENXIO; 238 goto bad; 239 } 240 241 if (ah->ah_single_chip == AH_TRUE) { 242 printf("%s: AR%s %u.%u phy %u.%u rf %u.%u", ifp->if_xname, 243 ar5k_printver(AR5K_VERSION_DEV, devid), 244 ah->ah_mac_version, ah->ah_mac_revision, 245 ah->ah_phy_revision >> 4, ah->ah_phy_revision & 0xf, 246 ah->ah_radio_5ghz_revision >> 4, 247 ah->ah_radio_5ghz_revision & 0xf); 248 } else { 249 printf("%s: AR%s %u.%u phy %u.%u", ifp->if_xname, 250 ar5k_printver(AR5K_VERSION_VER, ah->ah_mac_srev), 251 ah->ah_mac_version, ah->ah_mac_revision, 252 ah->ah_phy_revision >> 4, ah->ah_phy_revision & 0xf); 253 printf(" rf%s %u.%u", 254 ar5k_printver(AR5K_VERSION_RAD, ah->ah_radio_5ghz_revision), 255 ah->ah_radio_5ghz_revision >> 4, 256 ah->ah_radio_5ghz_revision & 0xf); 257 if (ah->ah_radio_2ghz_revision != 0) { 258 printf(" rf%s %u.%u", 259 ar5k_printver(AR5K_VERSION_RAD, 260 ah->ah_radio_2ghz_revision), 261 ah->ah_radio_2ghz_revision >> 4, 262 ah->ah_radio_2ghz_revision & 0xf); 263 } 264 } 265 266 #if 0 267 if (ah->ah_radio_5ghz_revision >= AR5K_SREV_RAD_UNSUPP || 268 ah->ah_radio_2ghz_revision >= AR5K_SREV_RAD_UNSUPP) { 269 printf(": RF radio not supported\n"); 270 error = EOPNOTSUPP; 271 goto bad; 272 } 273 #endif 274 275 sc->sc_ah = ah; 276 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 277 278 /* 279 * Get regulation domain either stored in the EEPROM or defined 280 * as the default value. Some devices are known to have broken 281 * regulation domain values in their EEPROM. 282 */ 283 ath_hal_get_regdomain(ah, &ah->ah_regdomain); 284 285 /* 286 * Construct channel list based on the current regulation domain. 287 */ 288 error = ath_getchannels(sc, ath_outdoor, ath_xchanmode); 289 if (error != 0) 290 goto bad; 291 292 /* 293 * Setup rate tables for all potential media types. 294 */ 295 ath_rate_setup(sc, IEEE80211_MODE_11A); 296 ath_rate_setup(sc, IEEE80211_MODE_11B); 297 ath_rate_setup(sc, IEEE80211_MODE_11G); 298 ath_rate_setup(sc, IEEE80211_MODE_TURBO); 299 300 error = ath_desc_alloc(sc); 301 if (error != 0) { 302 printf(": failed to allocate descriptors: %d\n", error); 303 goto bad; 304 } 305 timeout_set(&sc->sc_scan_to, ath_next_scan, sc); 306 timeout_set(&sc->sc_cal_to, ath_calibrate, sc); 307 timeout_set(&sc->sc_rssadapt_to, ath_rssadapt_updatestats, sc); 308 309 #ifdef __FreeBSD__ 310 ATH_TXBUF_LOCK_INIT(sc); 311 ATH_TXQ_LOCK_INIT(sc); 312 #endif 313 314 ATH_TASK_INIT(&sc->sc_txtask, ath_tx_proc, sc); 315 ATH_TASK_INIT(&sc->sc_rxtask, ath_rx_proc, sc); 316 ATH_TASK_INIT(&sc->sc_rxorntask, ath_rxorn_proc, sc); 317 ATH_TASK_INIT(&sc->sc_fataltask, ath_fatal_proc, sc); 318 ATH_TASK_INIT(&sc->sc_bmisstask, ath_bmiss_proc, sc); 319 #ifndef IEEE80211_STA_ONLY 320 ATH_TASK_INIT(&sc->sc_swbatask, ath_beacon_proc, sc); 321 #endif 322 323 /* 324 * For now just pre-allocate one data queue and one 325 * beacon queue. Note that the HAL handles resetting 326 * them at the needed time. Eventually we'll want to 327 * allocate more tx queues for splitting management 328 * frames and for QOS support. 329 */ 330 sc->sc_bhalq = ath_hal_setup_tx_queue(ah, HAL_TX_QUEUE_BEACON, NULL); 331 if (sc->sc_bhalq == (u_int) -1) { 332 printf(": unable to setup a beacon xmit queue!\n"); 333 goto bad2; 334 } 335 336 for (i = 0; i <= HAL_TX_QUEUE_ID_DATA_MAX; i++) { 337 bzero(&qinfo, sizeof(qinfo)); 338 qinfo.tqi_type = HAL_TX_QUEUE_DATA; 339 qinfo.tqi_subtype = i; /* should be mapped to WME types */ 340 sc->sc_txhalq[i] = ath_hal_setup_tx_queue(ah, 341 HAL_TX_QUEUE_DATA, &qinfo); 342 if (sc->sc_txhalq[i] == (u_int) -1) { 343 printf(": unable to setup a data xmit queue %u!\n", i); 344 goto bad2; 345 } 346 } 347 348 ifp->if_softc = sc; 349 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST 350 | IFF_NOTRAILERS; 351 ifp->if_start = ath_start; 352 ifp->if_watchdog = ath_watchdog; 353 ifp->if_ioctl = ath_ioctl; 354 #ifndef __OpenBSD__ 355 ifp->if_init = ath_init; 356 ifp->if_stop = ath_stop; /* XXX */ 357 #endif 358 IFQ_SET_MAXLEN(&ifp->if_snd, ATH_TXBUF * ATH_TXDESC); 359 IFQ_SET_READY(&ifp->if_snd); 360 361 ic->ic_softc = sc; 362 ic->ic_newassoc = ath_newassoc; 363 /* XXX not right but it's not used anywhere important */ 364 ic->ic_phytype = IEEE80211_T_OFDM; 365 ic->ic_opmode = IEEE80211_M_STA; 366 ic->ic_caps = IEEE80211_C_WEP /* wep supported */ 367 | IEEE80211_C_PMGT /* power management */ 368 #ifndef IEEE80211_STA_ONLY 369 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 370 | IEEE80211_C_HOSTAP /* hostap mode */ 371 #endif 372 | IEEE80211_C_MONITOR /* monitor mode */ 373 | IEEE80211_C_SHSLOT /* short slot time supported */ 374 | IEEE80211_C_SHPREAMBLE; /* short preamble supported */ 375 if (ath_softcrypto) 376 ic->ic_caps |= IEEE80211_C_RSN; /* wpa/rsn supported */ 377 378 /* 379 * Not all chips have the VEOL support we want to use with 380 * IBSS beacon; check here for it. 381 */ 382 sc->sc_veol = ath_hal_has_veol(ah); 383 384 /* get mac address from hardware */ 385 ath_hal_get_lladdr(ah, ic->ic_myaddr); 386 387 if_attach(ifp); 388 389 /* call MI attach routine. */ 390 ieee80211_ifattach(ifp); 391 392 /* override default methods */ 393 ic->ic_node_alloc = ath_node_alloc; 394 sc->sc_node_free = ic->ic_node_free; 395 ic->ic_node_free = ath_node_free; 396 sc->sc_node_copy = ic->ic_node_copy; 397 ic->ic_node_copy = ath_node_copy; 398 ic->ic_node_getrssi = ath_node_getrssi; 399 sc->sc_newstate = ic->ic_newstate; 400 ic->ic_newstate = ath_newstate; 401 #ifndef IEEE80211_STA_ONLY 402 sc->sc_recv_mgmt = ic->ic_recv_mgmt; 403 ic->ic_recv_mgmt = ath_recv_mgmt; 404 #endif 405 ic->ic_max_rssi = AR5K_MAX_RSSI; 406 bcopy(etherbroadcastaddr, sc->sc_broadcast_addr, IEEE80211_ADDR_LEN); 407 408 /* complete initialization */ 409 ieee80211_media_init(ifp, ath_media_change, ieee80211_media_status); 410 411 #if NBPFILTER > 0 412 bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO, 413 sizeof(struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN); 414 415 sc->sc_rxtap_len = sizeof(sc->sc_rxtapu); 416 bzero(&sc->sc_rxtapu, sc->sc_rxtap_len); 417 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); 418 sc->sc_rxtap.wr_ihdr.it_present = htole32(ATH_RX_RADIOTAP_PRESENT); 419 420 sc->sc_txtap_len = sizeof(sc->sc_txtapu); 421 bzero(&sc->sc_txtapu, sc->sc_txtap_len); 422 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); 423 sc->sc_txtap.wt_ihdr.it_present = htole32(ATH_TX_RADIOTAP_PRESENT); 424 #endif 425 426 sc->sc_flags |= ATH_ATTACHED; 427 /* 428 * Make sure the interface is shutdown during reboot. 429 */ 430 sc->sc_powerhook = powerhook_establish(ath_power, sc); 431 if (sc->sc_powerhook == NULL) 432 printf(": WARNING: unable to establish power hook\n"); 433 434 /* 435 * Print regulation domain and the mac address. The regulation domain 436 * will be marked with a * if the EEPROM value has been overwritten. 437 */ 438 printf(", %s%s, address %s\n", 439 ieee80211_regdomain2name(ah->ah_regdomain), 440 ah->ah_regdomain != ah->ah_regdomain_hw ? "*" : "", 441 ether_sprintf(ic->ic_myaddr)); 442 443 if (ath_gpio_attach(sc, devid) == 0) 444 sc->sc_flags |= ATH_GPIO; 445 446 return 0; 447 bad2: 448 ath_desc_free(sc); 449 bad: 450 if (ah) 451 ath_hal_detach(ah); 452 sc->sc_invalid = 1; 453 return error; 454 } 455 456 int 457 ath_detach(struct ath_softc *sc, int flags) 458 { 459 struct ifnet *ifp = &sc->sc_ic.ic_if; 460 int s; 461 462 if ((sc->sc_flags & ATH_ATTACHED) == 0) 463 return (0); 464 465 config_detach_children(&sc->sc_dev, flags); 466 467 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags %x\n", __func__, ifp->if_flags)); 468 469 timeout_del(&sc->sc_scan_to); 470 timeout_del(&sc->sc_cal_to); 471 timeout_del(&sc->sc_rssadapt_to); 472 473 s = splnet(); 474 ath_stop(ifp); 475 ath_desc_free(sc); 476 ath_hal_detach(sc->sc_ah); 477 478 ieee80211_ifdetach(ifp); 479 if_detach(ifp); 480 481 splx(s); 482 if (sc->sc_powerhook != NULL) 483 powerhook_disestablish(sc->sc_powerhook); 484 #ifdef __FreeBSD__ 485 ATH_TXBUF_LOCK_DESTROY(sc); 486 ATH_TXQ_LOCK_DESTROY(sc); 487 #endif 488 489 return 0; 490 } 491 492 void 493 ath_power(int why, void *arg) 494 { 495 struct ath_softc *sc = arg; 496 int s; 497 498 DPRINTF(ATH_DEBUG_ANY, ("ath_power(%d)\n", why)); 499 500 s = splnet(); 501 switch (why) { 502 case PWR_SUSPEND: 503 case PWR_STANDBY: 504 ath_suspend(sc, why); 505 break; 506 case PWR_RESUME: 507 ath_resume(sc, why); 508 break; 509 #if !defined(__OpenBSD__) 510 case PWR_SOFTSUSPEND: 511 case PWR_SOFTSTANDBY: 512 case PWR_SOFTRESUME: 513 break; 514 #endif 515 } 516 splx(s); 517 } 518 519 void 520 ath_suspend(struct ath_softc *sc, int why) 521 { 522 struct ifnet *ifp = &sc->sc_ic.ic_if; 523 524 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags %x\n", __func__, ifp->if_flags)); 525 526 ath_stop(ifp); 527 if (sc->sc_power != NULL) 528 (*sc->sc_power)(sc, why); 529 } 530 531 void 532 ath_resume(struct ath_softc *sc, int why) 533 { 534 struct ifnet *ifp = &sc->sc_ic.ic_if; 535 536 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags %x\n", __func__, ifp->if_flags)); 537 538 if (ifp->if_flags & IFF_UP) { 539 ath_init(ifp); 540 #if 0 541 (void)ath_intr(sc); 542 #endif 543 if (sc->sc_power != NULL) 544 (*sc->sc_power)(sc, why); 545 if (ifp->if_flags & IFF_RUNNING) 546 ath_start(ifp); 547 } 548 } 549 550 int 551 ath_intr(void *arg) 552 { 553 return ath_intr1((struct ath_softc *)arg); 554 } 555 556 int 557 ath_intr1(struct ath_softc *sc) 558 { 559 struct ieee80211com *ic = &sc->sc_ic; 560 struct ifnet *ifp = &ic->ic_if; 561 struct ath_hal *ah = sc->sc_ah; 562 HAL_INT status; 563 564 if (sc->sc_invalid) { 565 /* 566 * The hardware is not ready/present, don't touch anything. 567 * Note this can happen early on if the IRQ is shared. 568 */ 569 DPRINTF(ATH_DEBUG_ANY, ("%s: invalid; ignored\n", __func__)); 570 return 0; 571 } 572 if (!ath_hal_is_intr_pending(ah)) /* shared irq, not for us */ 573 return 0; 574 if ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) != (IFF_RUNNING|IFF_UP)) { 575 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags 0x%x\n", 576 __func__, ifp->if_flags)); 577 ath_hal_get_isr(ah, &status); /* clear ISR */ 578 ath_hal_set_intr(ah, 0); /* disable further intr's */ 579 return 1; /* XXX */ 580 } 581 ath_hal_get_isr(ah, &status); /* NB: clears ISR too */ 582 DPRINTF(ATH_DEBUG_INTR, ("%s: status 0x%x\n", __func__, status)); 583 status &= sc->sc_imask; /* discard unasked for bits */ 584 if (status & HAL_INT_FATAL) { 585 sc->sc_stats.ast_hardware++; 586 ath_hal_set_intr(ah, 0); /* disable intr's until reset */ 587 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_fataltask); 588 } else if (status & HAL_INT_RXORN) { 589 sc->sc_stats.ast_rxorn++; 590 ath_hal_set_intr(ah, 0); /* disable intr's until reset */ 591 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_rxorntask); 592 } else if (status & HAL_INT_MIB) { 593 DPRINTF(ATH_DEBUG_INTR, 594 ("%s: resetting MIB counters\n", __func__)); 595 sc->sc_stats.ast_mib++; 596 ath_hal_update_mib_counters(ah, &sc->sc_mib_stats); 597 } else { 598 if (status & HAL_INT_RXEOL) { 599 /* 600 * NB: the hardware should re-read the link when 601 * RXE bit is written, but it doesn't work at 602 * least on older hardware revs. 603 */ 604 sc->sc_stats.ast_rxeol++; 605 sc->sc_rxlink = NULL; 606 } 607 if (status & HAL_INT_TXURN) { 608 sc->sc_stats.ast_txurn++; 609 /* bump tx trigger level */ 610 ath_hal_update_tx_triglevel(ah, AH_TRUE); 611 } 612 if (status & HAL_INT_RX) 613 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_rxtask); 614 if (status & HAL_INT_TX) 615 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_txtask); 616 if (status & HAL_INT_SWBA) 617 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_swbatask); 618 if (status & HAL_INT_BMISS) { 619 sc->sc_stats.ast_bmiss++; 620 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_bmisstask); 621 } 622 } 623 return 1; 624 } 625 626 void 627 ath_fatal_proc(void *arg, int pending) 628 { 629 struct ath_softc *sc = arg; 630 struct ieee80211com *ic = &sc->sc_ic; 631 struct ifnet *ifp = &ic->ic_if; 632 633 if (ifp->if_flags & IFF_DEBUG) 634 printf("%s: hardware error; resetting\n", ifp->if_xname); 635 ath_reset(sc, 1); 636 } 637 638 void 639 ath_rxorn_proc(void *arg, int pending) 640 { 641 struct ath_softc *sc = arg; 642 struct ieee80211com *ic = &sc->sc_ic; 643 struct ifnet *ifp = &ic->ic_if; 644 645 if (ifp->if_flags & IFF_DEBUG) 646 printf("%s: rx FIFO overrun; resetting\n", ifp->if_xname); 647 ath_reset(sc, 1); 648 } 649 650 void 651 ath_bmiss_proc(void *arg, int pending) 652 { 653 struct ath_softc *sc = arg; 654 struct ieee80211com *ic = &sc->sc_ic; 655 656 DPRINTF(ATH_DEBUG_ANY, ("%s: pending %u\n", __func__, pending)); 657 if (ic->ic_opmode != IEEE80211_M_STA) 658 return; 659 if (ic->ic_state == IEEE80211_S_RUN) { 660 /* 661 * Rather than go directly to scan state, try to 662 * reassociate first. If that fails then the state 663 * machine will drop us into scanning after timing 664 * out waiting for a probe response. 665 */ 666 ieee80211_new_state(ic, IEEE80211_S_ASSOC, -1); 667 } 668 } 669 670 u_int 671 ath_chan2flags(struct ieee80211com *ic, struct ieee80211_channel *chan) 672 { 673 enum ieee80211_phymode mode = ieee80211_chan2mode(ic, chan); 674 675 switch (mode) { 676 case IEEE80211_MODE_AUTO: 677 return 0; 678 case IEEE80211_MODE_11A: 679 return CHANNEL_A; 680 case IEEE80211_MODE_11B: 681 return CHANNEL_B; 682 case IEEE80211_MODE_11G: 683 return CHANNEL_G; 684 case IEEE80211_MODE_TURBO: 685 return CHANNEL_T; 686 default: 687 panic("%s: unsupported mode %d", __func__, mode); 688 return 0; 689 } 690 } 691 692 int 693 ath_init(struct ifnet *ifp) 694 { 695 return ath_init1((struct ath_softc *)ifp->if_softc); 696 } 697 698 int 699 ath_init1(struct ath_softc *sc) 700 { 701 struct ieee80211com *ic = &sc->sc_ic; 702 struct ifnet *ifp = &ic->ic_if; 703 struct ieee80211_node *ni; 704 enum ieee80211_phymode mode; 705 struct ath_hal *ah = sc->sc_ah; 706 HAL_STATUS status; 707 HAL_CHANNEL hchan; 708 int error = 0, s; 709 710 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags 0x%x\n", 711 __func__, ifp->if_flags)); 712 713 if ((error = ath_enable(sc)) != 0) 714 return error; 715 716 s = splnet(); 717 /* 718 * Stop anything previously setup. This is safe 719 * whether this is the first time through or not. 720 */ 721 ath_stop(ifp); 722 723 /* 724 * Reset the link layer address to the latest value. 725 */ 726 IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl)); 727 ath_hal_set_lladdr(ah, ic->ic_myaddr); 728 729 /* 730 * The basic interface to setting the hardware in a good 731 * state is ``reset''. On return the hardware is known to 732 * be powered up and with interrupts disabled. This must 733 * be followed by initialization of the appropriate bits 734 * and then setup of the interrupt mask. 735 */ 736 hchan.channel = ic->ic_ibss_chan->ic_freq; 737 hchan.channelFlags = ath_chan2flags(ic, ic->ic_ibss_chan); 738 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, AH_TRUE, &status)) { 739 printf("%s: unable to reset hardware; hal status %u\n", 740 ifp->if_xname, status); 741 error = EIO; 742 goto done; 743 } 744 ath_set_slot_time(sc); 745 746 if ((error = ath_initkeytable(sc)) != 0) { 747 printf("%s: unable to reset the key cache\n", 748 ifp->if_xname); 749 goto done; 750 } 751 752 if ((error = ath_startrecv(sc)) != 0) { 753 printf("%s: unable to start recv logic\n", ifp->if_xname); 754 goto done; 755 } 756 757 /* 758 * Enable interrupts. 759 */ 760 sc->sc_imask = HAL_INT_RX | HAL_INT_TX 761 | HAL_INT_RXEOL | HAL_INT_RXORN 762 | HAL_INT_FATAL | HAL_INT_GLOBAL; 763 #ifndef IEEE80211_STA_ONLY 764 if (ic->ic_opmode == IEEE80211_M_HOSTAP) 765 sc->sc_imask |= HAL_INT_MIB; 766 #endif 767 ath_hal_set_intr(ah, sc->sc_imask); 768 769 ifp->if_flags |= IFF_RUNNING; 770 ic->ic_state = IEEE80211_S_INIT; 771 772 /* 773 * The hardware should be ready to go now so it's safe 774 * to kick the 802.11 state machine as it's likely to 775 * immediately call back to us to send mgmt frames. 776 */ 777 ni = ic->ic_bss; 778 ni->ni_chan = ic->ic_ibss_chan; 779 mode = ieee80211_chan2mode(ic, ni->ni_chan); 780 if (mode != sc->sc_curmode) 781 ath_setcurmode(sc, mode); 782 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 783 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1); 784 } else { 785 ieee80211_new_state(ic, IEEE80211_S_RUN, -1); 786 } 787 done: 788 splx(s); 789 return error; 790 } 791 792 void 793 ath_stop(struct ifnet *ifp) 794 { 795 struct ieee80211com *ic = (struct ieee80211com *) ifp; 796 struct ath_softc *sc = ifp->if_softc; 797 struct ath_hal *ah = sc->sc_ah; 798 int s; 799 800 DPRINTF(ATH_DEBUG_ANY, ("%s: invalid %u if_flags 0x%x\n", 801 __func__, sc->sc_invalid, ifp->if_flags)); 802 803 s = splnet(); 804 if (ifp->if_flags & IFF_RUNNING) { 805 /* 806 * Shutdown the hardware and driver: 807 * disable interrupts 808 * turn off timers 809 * clear transmit machinery 810 * clear receive machinery 811 * drain and release tx queues 812 * reclaim beacon resources 813 * reset 802.11 state machine 814 * power down hardware 815 * 816 * Note that some of this work is not possible if the 817 * hardware is gone (invalid). 818 */ 819 ifp->if_flags &= ~IFF_RUNNING; 820 ifp->if_timer = 0; 821 if (!sc->sc_invalid) 822 ath_hal_set_intr(ah, 0); 823 ath_draintxq(sc); 824 if (!sc->sc_invalid) { 825 ath_stoprecv(sc); 826 } else { 827 sc->sc_rxlink = NULL; 828 } 829 IFQ_PURGE(&ifp->if_snd); 830 #ifndef IEEE80211_STA_ONLY 831 ath_beacon_free(sc); 832 #endif 833 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 834 if (!sc->sc_invalid) { 835 ath_hal_set_power(ah, HAL_PM_FULL_SLEEP, 0); 836 } 837 ath_disable(sc); 838 } 839 splx(s); 840 } 841 842 /* 843 * Reset the hardware w/o losing operational state. This is 844 * basically a more efficient way of doing ath_stop, ath_init, 845 * followed by state transitions to the current 802.11 846 * operational state. Used to recover from errors rx overrun 847 * and to reset the hardware when rf gain settings must be reset. 848 */ 849 void 850 ath_reset(struct ath_softc *sc, int full) 851 { 852 struct ieee80211com *ic = &sc->sc_ic; 853 struct ifnet *ifp = &ic->ic_if; 854 struct ath_hal *ah = sc->sc_ah; 855 struct ieee80211_channel *c; 856 HAL_STATUS status; 857 HAL_CHANNEL hchan; 858 859 /* 860 * Convert to a HAL channel description with the flags 861 * constrained to reflect the current operating mode. 862 */ 863 c = ic->ic_ibss_chan; 864 hchan.channel = c->ic_freq; 865 hchan.channelFlags = ath_chan2flags(ic, c); 866 867 ath_hal_set_intr(ah, 0); /* disable interrupts */ 868 ath_draintxq(sc); /* stop xmit side */ 869 ath_stoprecv(sc); /* stop recv side */ 870 /* NB: indicate channel change so we do a full reset */ 871 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, 872 full ? AH_TRUE : AH_FALSE, &status)) { 873 printf("%s: %s: unable to reset hardware; hal status %u\n", 874 ifp->if_xname, __func__, status); 875 } 876 ath_set_slot_time(sc); 877 /* In case channel changed, save as a node channel */ 878 ic->ic_bss->ni_chan = ic->ic_ibss_chan; 879 ath_hal_set_intr(ah, sc->sc_imask); 880 if (ath_startrecv(sc) != 0) /* restart recv */ 881 printf("%s: %s: unable to start recv logic\n", ifp->if_xname, 882 __func__); 883 ath_start(ifp); /* restart xmit */ 884 if (ic->ic_state == IEEE80211_S_RUN) 885 ath_beacon_config(sc); /* restart beacons */ 886 } 887 888 void 889 ath_start(struct ifnet *ifp) 890 { 891 struct ath_softc *sc = ifp->if_softc; 892 struct ath_hal *ah = sc->sc_ah; 893 struct ieee80211com *ic = &sc->sc_ic; 894 struct ieee80211_node *ni; 895 struct ath_buf *bf; 896 struct mbuf *m; 897 struct ieee80211_frame *wh; 898 int s; 899 900 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING || 901 sc->sc_invalid) 902 return; 903 for (;;) { 904 /* 905 * Grab a TX buffer and associated resources. 906 */ 907 s = splnet(); 908 bf = TAILQ_FIRST(&sc->sc_txbuf); 909 if (bf != NULL) 910 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list); 911 splx(s); 912 if (bf == NULL) { 913 DPRINTF(ATH_DEBUG_ANY, ("%s: out of xmit buffers\n", 914 __func__)); 915 sc->sc_stats.ast_tx_qstop++; 916 ifp->if_flags |= IFF_OACTIVE; 917 break; 918 } 919 /* 920 * Poll the management queue for frames; they 921 * have priority over normal data frames. 922 */ 923 IF_DEQUEUE(&ic->ic_mgtq, m); 924 if (m == NULL) { 925 /* 926 * No data frames go out unless we're associated. 927 */ 928 if (ic->ic_state != IEEE80211_S_RUN) { 929 DPRINTF(ATH_DEBUG_ANY, 930 ("%s: ignore data packet, state %u\n", 931 __func__, ic->ic_state)); 932 sc->sc_stats.ast_tx_discard++; 933 s = splnet(); 934 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 935 splx(s); 936 break; 937 } 938 IFQ_DEQUEUE(&ifp->if_snd, m); 939 if (m == NULL) { 940 s = splnet(); 941 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 942 splx(s); 943 break; 944 } 945 ifp->if_opackets++; 946 947 #if NBPFILTER > 0 948 if (ifp->if_bpf) 949 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 950 #endif 951 952 /* 953 * Encapsulate the packet in prep for transmission. 954 */ 955 m = ieee80211_encap(ifp, m, &ni); 956 if (m == NULL) { 957 DPRINTF(ATH_DEBUG_ANY, 958 ("%s: encapsulation failure\n", 959 __func__)); 960 sc->sc_stats.ast_tx_encap++; 961 goto bad; 962 } 963 wh = mtod(m, struct ieee80211_frame *); 964 } else { 965 /* 966 * Hack! The referenced node pointer is in the 967 * rcvif field of the packet header. This is 968 * placed there by ieee80211_mgmt_output because 969 * we need to hold the reference with the frame 970 * and there's no other way (other than packet 971 * tags which we consider too expensive to use) 972 * to pass it along. 973 */ 974 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 975 m->m_pkthdr.rcvif = NULL; 976 977 wh = mtod(m, struct ieee80211_frame *); 978 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == 979 IEEE80211_FC0_SUBTYPE_PROBE_RESP) { 980 /* fill time stamp */ 981 u_int64_t tsf; 982 u_int32_t *tstamp; 983 984 tsf = ath_hal_get_tsf64(ah); 985 /* XXX: adjust 100us delay to xmit */ 986 tsf += 100; 987 tstamp = (u_int32_t *)&wh[1]; 988 tstamp[0] = htole32(tsf & 0xffffffff); 989 tstamp[1] = htole32(tsf >> 32); 990 } 991 sc->sc_stats.ast_tx_mgmt++; 992 } 993 994 if (ath_tx_start(sc, ni, bf, m)) { 995 bad: 996 s = splnet(); 997 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 998 splx(s); 999 ifp->if_oerrors++; 1000 if (ni != NULL) 1001 ieee80211_release_node(ic, ni); 1002 continue; 1003 } 1004 1005 sc->sc_tx_timer = 5; 1006 ifp->if_timer = 1; 1007 } 1008 } 1009 1010 int 1011 ath_media_change(struct ifnet *ifp) 1012 { 1013 int error; 1014 1015 error = ieee80211_media_change(ifp); 1016 if (error == ENETRESET) { 1017 if ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) == 1018 (IFF_RUNNING|IFF_UP)) 1019 ath_init(ifp); /* XXX lose error */ 1020 error = 0; 1021 } 1022 return error; 1023 } 1024 1025 void 1026 ath_watchdog(struct ifnet *ifp) 1027 { 1028 struct ath_softc *sc = ifp->if_softc; 1029 1030 ifp->if_timer = 0; 1031 if ((ifp->if_flags & IFF_RUNNING) == 0 || sc->sc_invalid) 1032 return; 1033 if (sc->sc_tx_timer) { 1034 if (--sc->sc_tx_timer == 0) { 1035 printf("%s: device timeout\n", ifp->if_xname); 1036 ath_reset(sc, 1); 1037 ifp->if_oerrors++; 1038 sc->sc_stats.ast_watchdog++; 1039 return; 1040 } 1041 ifp->if_timer = 1; 1042 } 1043 1044 ieee80211_watchdog(ifp); 1045 } 1046 1047 int 1048 ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1049 { 1050 struct ath_softc *sc = ifp->if_softc; 1051 struct ieee80211com *ic = &sc->sc_ic; 1052 struct ifreq *ifr = (struct ifreq *)data; 1053 struct ifaddr *ifa = (struct ifaddr *)data; 1054 int error = 0, s; 1055 1056 s = splnet(); 1057 switch (cmd) { 1058 case SIOCSIFADDR: 1059 ifp->if_flags |= IFF_UP; 1060 #ifdef INET 1061 if (ifa->ifa_addr->sa_family == AF_INET) { 1062 arp_ifinit(&ic->ic_ac, ifa); 1063 } 1064 #endif /* INET */ 1065 /* FALLTHROUGH */ 1066 case SIOCSIFFLAGS: 1067 if (ifp->if_flags & IFF_UP) { 1068 if (ifp->if_flags & IFF_RUNNING) { 1069 /* 1070 * To avoid rescanning another access point, 1071 * do not call ath_init() here. Instead, 1072 * only reflect promisc mode settings. 1073 */ 1074 ath_mode_init(sc); 1075 } else { 1076 /* 1077 * Beware of being called during detach to 1078 * reset promiscuous mode. In that case we 1079 * will still be marked UP but not RUNNING. 1080 * However trying to re-init the interface 1081 * is the wrong thing to do as we've already 1082 * torn down much of our state. There's 1083 * probably a better way to deal with this. 1084 */ 1085 if (!sc->sc_invalid) 1086 ath_init(ifp); /* XXX lose error */ 1087 } 1088 } else 1089 ath_stop(ifp); 1090 break; 1091 case SIOCADDMULTI: 1092 case SIOCDELMULTI: 1093 #ifdef __FreeBSD__ 1094 /* 1095 * The upper layer has already installed/removed 1096 * the multicast address(es), just recalculate the 1097 * multicast filter for the card. 1098 */ 1099 if (ifp->if_flags & IFF_RUNNING) 1100 ath_mode_init(sc); 1101 #endif 1102 error = (cmd == SIOCADDMULTI) ? 1103 ether_addmulti(ifr, &sc->sc_ic.ic_ac) : 1104 ether_delmulti(ifr, &sc->sc_ic.ic_ac); 1105 if (error == ENETRESET) { 1106 if (ifp->if_flags & IFF_RUNNING) 1107 ath_mode_init(sc); 1108 error = 0; 1109 } 1110 break; 1111 case SIOCGATHSTATS: 1112 error = copyout(&sc->sc_stats, 1113 ifr->ifr_data, sizeof (sc->sc_stats)); 1114 break; 1115 default: 1116 error = ieee80211_ioctl(ifp, cmd, data); 1117 if (error == ENETRESET) { 1118 if ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) == 1119 (IFF_RUNNING|IFF_UP)) { 1120 if (ic->ic_opmode != IEEE80211_M_MONITOR) 1121 ath_init(ifp); /* XXX lose error */ 1122 else 1123 ath_reset(sc, 1); 1124 } 1125 error = 0; 1126 } 1127 break; 1128 } 1129 splx(s); 1130 return error; 1131 } 1132 1133 /* 1134 * Fill the hardware key cache with key entries. 1135 */ 1136 int 1137 ath_initkeytable(struct ath_softc *sc) 1138 { 1139 struct ieee80211com *ic = &sc->sc_ic; 1140 struct ath_hal *ah = sc->sc_ah; 1141 int i; 1142 1143 if (ath_softcrypto) { 1144 /* 1145 * Disable the hardware crypto engine and reset the key cache 1146 * to allow software crypto operation for WEP/RSN/WPA2 1147 */ 1148 if (ic->ic_flags & (IEEE80211_F_WEPON|IEEE80211_F_RSNON)) 1149 (void)ath_hal_softcrypto(ah, AH_TRUE); 1150 else 1151 (void)ath_hal_softcrypto(ah, AH_FALSE); 1152 return (0); 1153 } 1154 1155 /* WEP is disabled, we only support WEP in hardware yet */ 1156 if ((ic->ic_flags & IEEE80211_F_WEPON) == 0) 1157 return (0); 1158 1159 /* 1160 * Setup the hardware after reset: the key cache is filled as 1161 * needed and the receive engine is set going. Frame transmit 1162 * is handled entirely in the frame output path; there's nothing 1163 * to do here except setup the interrupt mask. 1164 */ 1165 1166 /* XXX maybe should reset all keys when !WEPON */ 1167 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 1168 struct ieee80211_key *k = &ic->ic_nw_keys[i]; 1169 if (k->k_len == 0) 1170 ath_hal_reset_key(ah, i); 1171 else { 1172 HAL_KEYVAL hk; 1173 1174 bzero(&hk, sizeof(hk)); 1175 /* 1176 * Pad the key to a supported key length. It 1177 * is always a good idea to use full-length 1178 * keys without padded zeros but this seems 1179 * to be the default behaviour used by many 1180 * implementations. 1181 */ 1182 if (k->k_cipher == IEEE80211_CIPHER_WEP40) 1183 hk.wk_len = AR5K_KEYVAL_LENGTH_40; 1184 else if (k->k_cipher == IEEE80211_CIPHER_WEP104) 1185 hk.wk_len = AR5K_KEYVAL_LENGTH_104; 1186 else 1187 return (EINVAL); 1188 bcopy(k->k_key, hk.wk_key, hk.wk_len); 1189 1190 if (ath_hal_set_key(ah, i, &hk) != AH_TRUE) 1191 return (EINVAL); 1192 } 1193 } 1194 1195 return (0); 1196 } 1197 1198 void 1199 ath_mcastfilter_accum(caddr_t dl, u_int32_t (*mfilt)[2]) 1200 { 1201 u_int32_t val; 1202 u_int8_t pos; 1203 1204 val = LE_READ_4(dl + 0); 1205 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 1206 val = LE_READ_4(dl + 3); 1207 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 1208 pos &= 0x3f; 1209 (*mfilt)[pos / 32] |= (1 << (pos % 32)); 1210 } 1211 1212 void 1213 ath_mcastfilter_compute(struct ath_softc *sc, u_int32_t (*mfilt)[2]) 1214 { 1215 struct ifnet *ifp = &sc->sc_ic.ic_if; 1216 struct ether_multi *enm; 1217 struct ether_multistep estep; 1218 1219 ETHER_FIRST_MULTI(estep, &sc->sc_ic.ic_ac, enm); 1220 while (enm != NULL) { 1221 /* XXX Punt on ranges. */ 1222 if (!IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) { 1223 (*mfilt)[0] = (*mfilt)[1] = ~((u_int32_t)0); 1224 ifp->if_flags |= IFF_ALLMULTI; 1225 return; 1226 } 1227 ath_mcastfilter_accum(enm->enm_addrlo, mfilt); 1228 ETHER_NEXT_MULTI(estep, enm); 1229 } 1230 ifp->if_flags &= ~IFF_ALLMULTI; 1231 } 1232 1233 /* 1234 * Calculate the receive filter according to the 1235 * operating mode and state: 1236 * 1237 * o always accept unicast, broadcast, and multicast traffic 1238 * o maintain current state of phy error reception 1239 * o probe request frames are accepted only when operating in 1240 * hostap, adhoc, or monitor modes 1241 * o enable promiscuous mode according to the interface state 1242 * o accept beacons: 1243 * - when operating in adhoc mode so the 802.11 layer creates 1244 * node table entries for peers, 1245 * - when operating in station mode for collecting rssi data when 1246 * the station is otherwise quiet, or 1247 * - when scanning 1248 */ 1249 u_int32_t 1250 ath_calcrxfilter(struct ath_softc *sc) 1251 { 1252 struct ieee80211com *ic = &sc->sc_ic; 1253 struct ath_hal *ah = sc->sc_ah; 1254 struct ifnet *ifp = &ic->ic_if; 1255 u_int32_t rfilt; 1256 1257 rfilt = (ath_hal_get_rx_filter(ah) & HAL_RX_FILTER_PHYERR) 1258 | HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST; 1259 if (ic->ic_opmode != IEEE80211_M_STA) 1260 rfilt |= HAL_RX_FILTER_PROBEREQ; 1261 #ifndef IEEE80211_STA_ONLY 1262 if (ic->ic_opmode != IEEE80211_M_AHDEMO) 1263 #endif 1264 rfilt |= HAL_RX_FILTER_BEACON; 1265 if (ifp->if_flags & IFF_PROMISC) 1266 rfilt |= HAL_RX_FILTER_PROM; 1267 return rfilt; 1268 } 1269 1270 void 1271 ath_mode_init(struct ath_softc *sc) 1272 { 1273 struct ath_hal *ah = sc->sc_ah; 1274 u_int32_t rfilt, mfilt[2]; 1275 1276 /* configure rx filter */ 1277 rfilt = ath_calcrxfilter(sc); 1278 ath_hal_set_rx_filter(ah, rfilt); 1279 1280 /* configure operational mode */ 1281 ath_hal_set_opmode(ah); 1282 1283 /* calculate and install multicast filter */ 1284 mfilt[0] = mfilt[1] = 0; 1285 ath_mcastfilter_compute(sc, &mfilt); 1286 ath_hal_set_mcast_filter(ah, mfilt[0], mfilt[1]); 1287 DPRINTF(ATH_DEBUG_MODE, ("%s: RX filter 0x%x, MC filter %08x:%08x\n", 1288 __func__, rfilt, mfilt[0], mfilt[1])); 1289 } 1290 1291 struct mbuf * 1292 ath_getmbuf(int flags, int type, u_int pktlen) 1293 { 1294 struct mbuf *m; 1295 1296 KASSERT(pktlen <= MCLBYTES, ("802.11 packet too large: %u", pktlen)); 1297 #ifdef __FreeBSD__ 1298 if (pktlen <= MHLEN) { 1299 MGETHDR(m, flags, type); 1300 } else { 1301 m = m_getcl(flags, type, M_PKTHDR); 1302 } 1303 #else 1304 MGETHDR(m, flags, type); 1305 if (m != NULL && pktlen > MHLEN) { 1306 MCLGET(m, flags); 1307 if ((m->m_flags & M_EXT) == 0) { 1308 m_free(m); 1309 m = NULL; 1310 } 1311 } 1312 #endif 1313 return m; 1314 } 1315 1316 #ifndef IEEE80211_STA_ONLY 1317 int 1318 ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni) 1319 { 1320 struct ieee80211com *ic = &sc->sc_ic; 1321 struct ath_hal *ah = sc->sc_ah; 1322 struct ath_buf *bf; 1323 struct ath_desc *ds; 1324 struct mbuf *m; 1325 int error; 1326 u_int8_t rate; 1327 const HAL_RATE_TABLE *rt; 1328 u_int flags = 0; 1329 1330 bf = sc->sc_bcbuf; 1331 if (bf->bf_m != NULL) { 1332 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1333 m_freem(bf->bf_m); 1334 bf->bf_m = NULL; 1335 bf->bf_node = NULL; 1336 } 1337 /* 1338 * NB: the beacon data buffer must be 32-bit aligned; 1339 * we assume the mbuf routines will return us something 1340 * with this alignment (perhaps should assert). 1341 */ 1342 m = ieee80211_beacon_alloc(ic, ni); 1343 if (m == NULL) { 1344 DPRINTF(ATH_DEBUG_BEACON, ("%s: cannot get mbuf/cluster\n", 1345 __func__)); 1346 sc->sc_stats.ast_be_nombuf++; 1347 return ENOMEM; 1348 } 1349 1350 DPRINTF(ATH_DEBUG_BEACON, ("%s: m %p len %u\n", __func__, m, m->m_len)); 1351 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m, 1352 BUS_DMA_NOWAIT); 1353 if (error != 0) { 1354 m_freem(m); 1355 return error; 1356 } 1357 KASSERT(bf->bf_nseg == 1, 1358 ("%s: multi-segment packet; nseg %u", __func__, bf->bf_nseg)); 1359 bf->bf_m = m; 1360 1361 /* setup descriptors */ 1362 ds = bf->bf_desc; 1363 bzero(ds, sizeof(struct ath_desc)); 1364 1365 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_veol) { 1366 ds->ds_link = bf->bf_daddr; /* link to self */ 1367 flags |= HAL_TXDESC_VEOL; 1368 } else { 1369 ds->ds_link = 0; 1370 } 1371 ds->ds_data = bf->bf_segs[0].ds_addr; 1372 1373 DPRINTF(ATH_DEBUG_ANY, ("%s: segaddr %p seglen %u\n", __func__, 1374 (caddr_t)bf->bf_segs[0].ds_addr, (u_int)bf->bf_segs[0].ds_len)); 1375 1376 /* 1377 * Calculate rate code. 1378 * XXX everything at min xmit rate 1379 */ 1380 rt = sc->sc_currates; 1381 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 1382 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) { 1383 rate = rt->info[0].rateCode | rt->info[0].shortPreamble; 1384 } else { 1385 rate = rt->info[0].rateCode; 1386 } 1387 1388 flags = HAL_TXDESC_NOACK; 1389 if (ic->ic_opmode == IEEE80211_M_IBSS) 1390 flags |= HAL_TXDESC_VEOL; 1391 1392 if (!ath_hal_setup_tx_desc(ah, ds 1393 , m->m_pkthdr.len + IEEE80211_CRC_LEN /* packet length */ 1394 , sizeof(struct ieee80211_frame) /* header length */ 1395 , HAL_PKT_TYPE_BEACON /* Atheros packet type */ 1396 , 60 /* txpower XXX */ 1397 , rate, 1 /* series 0 rate/tries */ 1398 , HAL_TXKEYIX_INVALID /* no encryption */ 1399 , 0 /* antenna mode */ 1400 , flags /* no ack for beacons */ 1401 , 0 /* rts/cts rate */ 1402 , 0 /* rts/cts duration */ 1403 )) { 1404 printf("%s: ath_hal_setup_tx_desc failed\n", __func__); 1405 return -1; 1406 } 1407 /* NB: beacon's BufLen must be a multiple of 4 bytes */ 1408 /* XXX verify mbuf data area covers this roundup */ 1409 if (!ath_hal_fill_tx_desc(ah, ds 1410 , roundup(bf->bf_segs[0].ds_len, 4) /* buffer length */ 1411 , AH_TRUE /* first segment */ 1412 , AH_TRUE /* last segment */ 1413 )) { 1414 printf("%s: ath_hal_fill_tx_desc failed\n", __func__); 1415 return -1; 1416 } 1417 1418 /* XXX it is not appropriate to bus_dmamap_sync? -dcy */ 1419 1420 return 0; 1421 } 1422 1423 void 1424 ath_beacon_proc(void *arg, int pending) 1425 { 1426 struct ath_softc *sc = arg; 1427 struct ieee80211com *ic = &sc->sc_ic; 1428 struct ath_buf *bf = sc->sc_bcbuf; 1429 struct ath_hal *ah = sc->sc_ah; 1430 1431 DPRINTF(ATH_DEBUG_BEACON_PROC, ("%s: pending %u\n", __func__, pending)); 1432 if (ic->ic_opmode == IEEE80211_M_STA || 1433 bf == NULL || bf->bf_m == NULL) { 1434 DPRINTF(ATH_DEBUG_ANY, ("%s: ic_flags=%x bf=%p bf_m=%p\n", 1435 __func__, ic->ic_flags, bf, bf ? bf->bf_m : NULL)); 1436 return; 1437 } 1438 /* TODO: update beacon to reflect PS poll state */ 1439 if (!ath_hal_stop_tx_dma(ah, sc->sc_bhalq)) { 1440 DPRINTF(ATH_DEBUG_ANY, ("%s: beacon queue %u did not stop?\n", 1441 __func__, sc->sc_bhalq)); 1442 } 1443 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 1444 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1445 1446 ath_hal_put_tx_buf(ah, sc->sc_bhalq, bf->bf_daddr); 1447 ath_hal_tx_start(ah, sc->sc_bhalq); 1448 DPRINTF(ATH_DEBUG_BEACON_PROC, 1449 ("%s: TXDP%u = %p (%p)\n", __func__, 1450 sc->sc_bhalq, (caddr_t)bf->bf_daddr, bf->bf_desc)); 1451 } 1452 1453 void 1454 ath_beacon_free(struct ath_softc *sc) 1455 { 1456 struct ath_buf *bf = sc->sc_bcbuf; 1457 1458 if (bf->bf_m != NULL) { 1459 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1460 m_freem(bf->bf_m); 1461 bf->bf_m = NULL; 1462 bf->bf_node = NULL; 1463 } 1464 } 1465 #endif /* IEEE80211_STA_ONLY */ 1466 1467 /* 1468 * Configure the beacon and sleep timers. 1469 * 1470 * When operating as an AP this resets the TSF and sets 1471 * up the hardware to notify us when we need to issue beacons. 1472 * 1473 * When operating in station mode this sets up the beacon 1474 * timers according to the timestamp of the last received 1475 * beacon and the current TSF, configures PCF and DTIM 1476 * handling, programs the sleep registers so the hardware 1477 * will wakeup in time to receive beacons, and configures 1478 * the beacon miss handling so we'll receive a BMISS 1479 * interrupt when we stop seeing beacons from the AP 1480 * we've associated with. 1481 */ 1482 void 1483 ath_beacon_config(struct ath_softc *sc) 1484 { 1485 #define MS_TO_TU(x) (((x) * 1000) / 1024) 1486 struct ath_hal *ah = sc->sc_ah; 1487 struct ieee80211com *ic = &sc->sc_ic; 1488 struct ieee80211_node *ni = ic->ic_bss; 1489 u_int32_t nexttbtt, intval; 1490 1491 nexttbtt = (LE_READ_4(ni->ni_tstamp + 4) << 22) | 1492 (LE_READ_4(ni->ni_tstamp) >> 10); 1493 intval = MAX(1, ni->ni_intval) & HAL_BEACON_PERIOD; 1494 if (nexttbtt == 0) { /* e.g. for ap mode */ 1495 nexttbtt = intval; 1496 } else if (intval) { 1497 nexttbtt = roundup(nexttbtt, intval); 1498 } 1499 DPRINTF(ATH_DEBUG_BEACON, ("%s: intval %u nexttbtt %u\n", 1500 __func__, ni->ni_intval, nexttbtt)); 1501 if (ic->ic_opmode == IEEE80211_M_STA) { 1502 HAL_BEACON_STATE bs; 1503 u_int32_t bmisstime; 1504 1505 /* NB: no PCF support right now */ 1506 bzero(&bs, sizeof(bs)); 1507 bs.bs_intval = intval; 1508 bs.bs_nexttbtt = nexttbtt; 1509 bs.bs_dtimperiod = bs.bs_intval; 1510 bs.bs_nextdtim = nexttbtt; 1511 /* 1512 * Calculate the number of consecutive beacons to miss 1513 * before taking a BMISS interrupt. The configuration 1514 * is specified in ms, so we need to convert that to 1515 * TU's and then calculate based on the beacon interval. 1516 * Note that we clamp the result to at most 10 beacons. 1517 */ 1518 bmisstime = MAX(7, ic->ic_bmisstimeout); 1519 bs.bs_bmissthreshold = howmany(bmisstime, intval); 1520 if (bs.bs_bmissthreshold > 7) { 1521 bs.bs_bmissthreshold = 7; 1522 } else if (bs.bs_bmissthreshold <= 0) { 1523 bs.bs_bmissthreshold = 1; 1524 } 1525 1526 /* 1527 * Calculate sleep duration. The configuration is 1528 * given in ms. We insure a multiple of the beacon 1529 * period is used. Also, if the sleep duration is 1530 * greater than the DTIM period then it makes senses 1531 * to make it a multiple of that. 1532 * 1533 * XXX fixed at 100ms 1534 */ 1535 bs.bs_sleepduration = 1536 roundup(MS_TO_TU(100), bs.bs_intval); 1537 if (bs.bs_sleepduration > bs.bs_dtimperiod) { 1538 bs.bs_sleepduration = 1539 roundup(bs.bs_sleepduration, bs.bs_dtimperiod); 1540 } 1541 1542 DPRINTF(ATH_DEBUG_BEACON, 1543 ("%s: intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u" 1544 " sleep %u\n" 1545 , __func__ 1546 , bs.bs_intval 1547 , bs.bs_nexttbtt 1548 , bs.bs_dtimperiod 1549 , bs.bs_nextdtim 1550 , bs.bs_bmissthreshold 1551 , bs.bs_sleepduration 1552 )); 1553 ath_hal_set_intr(ah, 0); 1554 ath_hal_set_beacon_timers(ah, &bs, 0/*XXX*/, 0, 0); 1555 sc->sc_imask |= HAL_INT_BMISS; 1556 ath_hal_set_intr(ah, sc->sc_imask); 1557 } 1558 #ifndef IEEE80211_STA_ONLY 1559 else { 1560 ath_hal_set_intr(ah, 0); 1561 if (nexttbtt == intval) 1562 intval |= HAL_BEACON_RESET_TSF; 1563 if (ic->ic_opmode == IEEE80211_M_IBSS) { 1564 /* 1565 * In IBSS mode enable the beacon timers but only 1566 * enable SWBA interrupts if we need to manually 1567 * prepare beacon frames. Otherwise we use a 1568 * self-linked tx descriptor and let the hardware 1569 * deal with things. 1570 */ 1571 intval |= HAL_BEACON_ENA; 1572 if (!sc->sc_veol) 1573 sc->sc_imask |= HAL_INT_SWBA; 1574 } else if (ic->ic_opmode == IEEE80211_M_HOSTAP) { 1575 /* 1576 * In AP mode we enable the beacon timers and 1577 * SWBA interrupts to prepare beacon frames. 1578 */ 1579 intval |= HAL_BEACON_ENA; 1580 sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */ 1581 } 1582 ath_hal_init_beacon(ah, nexttbtt, intval); 1583 ath_hal_set_intr(ah, sc->sc_imask); 1584 /* 1585 * When using a self-linked beacon descriptor in IBBS 1586 * mode load it once here. 1587 */ 1588 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_veol) 1589 ath_beacon_proc(sc, 0); 1590 } 1591 #endif 1592 } 1593 1594 int 1595 ath_desc_alloc(struct ath_softc *sc) 1596 { 1597 int i, bsize, error = -1; 1598 struct ath_desc *ds; 1599 struct ath_buf *bf; 1600 1601 /* allocate descriptors */ 1602 sc->sc_desc_len = sizeof(struct ath_desc) * 1603 (ATH_TXBUF * ATH_TXDESC + ATH_RXBUF + 1); 1604 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_desc_len, PAGE_SIZE, 1605 0, &sc->sc_dseg, 1, &sc->sc_dnseg, 0)) != 0) { 1606 printf("%s: unable to allocate control data, error = %d\n", 1607 sc->sc_dev.dv_xname, error); 1608 goto fail0; 1609 } 1610 1611 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg, 1612 sc->sc_desc_len, (caddr_t *)&sc->sc_desc, BUS_DMA_COHERENT)) != 0) { 1613 printf("%s: unable to map control data, error = %d\n", 1614 sc->sc_dev.dv_xname, error); 1615 goto fail1; 1616 } 1617 1618 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_desc_len, 1, 1619 sc->sc_desc_len, 0, 0, &sc->sc_ddmamap)) != 0) { 1620 printf("%s: unable to create control data DMA map, " 1621 "error = %d\n", sc->sc_dev.dv_xname, error); 1622 goto fail2; 1623 } 1624 1625 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_ddmamap, sc->sc_desc, 1626 sc->sc_desc_len, NULL, 0)) != 0) { 1627 printf("%s: unable to load control data DMA map, error = %d\n", 1628 sc->sc_dev.dv_xname, error); 1629 goto fail3; 1630 } 1631 1632 ds = sc->sc_desc; 1633 sc->sc_desc_paddr = sc->sc_ddmamap->dm_segs[0].ds_addr; 1634 1635 DPRINTF(ATH_DEBUG_XMIT_DESC|ATH_DEBUG_RECV_DESC, 1636 ("ath_desc_alloc: DMA map: %p (%lu) -> %p (%lu)\n", 1637 ds, (u_long)sc->sc_desc_len, 1638 (caddr_t) sc->sc_desc_paddr, /*XXX*/ (u_long) sc->sc_desc_len)); 1639 1640 /* allocate buffers */ 1641 bsize = sizeof(struct ath_buf) * (ATH_TXBUF + ATH_RXBUF + 1); 1642 bf = malloc(bsize, M_DEVBUF, M_NOWAIT | M_ZERO); 1643 if (bf == NULL) { 1644 printf("%s: unable to allocate Tx/Rx buffers\n", 1645 sc->sc_dev.dv_xname); 1646 error = ENOMEM; 1647 goto fail3; 1648 } 1649 sc->sc_bufptr = bf; 1650 1651 TAILQ_INIT(&sc->sc_rxbuf); 1652 for (i = 0; i < ATH_RXBUF; i++, bf++, ds++) { 1653 bf->bf_desc = ds; 1654 bf->bf_daddr = sc->sc_desc_paddr + 1655 ((caddr_t)ds - (caddr_t)sc->sc_desc); 1656 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1657 MCLBYTES, 0, 0, &bf->bf_dmamap)) != 0) { 1658 printf("%s: unable to create Rx dmamap, error = %d\n", 1659 sc->sc_dev.dv_xname, error); 1660 goto fail4; 1661 } 1662 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 1663 } 1664 1665 TAILQ_INIT(&sc->sc_txbuf); 1666 for (i = 0; i < ATH_TXBUF; i++, bf++, ds += ATH_TXDESC) { 1667 bf->bf_desc = ds; 1668 bf->bf_daddr = sc->sc_desc_paddr + 1669 ((caddr_t)ds - (caddr_t)sc->sc_desc); 1670 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1671 ATH_TXDESC, MCLBYTES, 0, 0, &bf->bf_dmamap)) != 0) { 1672 printf("%s: unable to create Tx dmamap, error = %d\n", 1673 sc->sc_dev.dv_xname, error); 1674 goto fail5; 1675 } 1676 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 1677 } 1678 TAILQ_INIT(&sc->sc_txq); 1679 1680 /* beacon buffer */ 1681 bf->bf_desc = ds; 1682 bf->bf_daddr = sc->sc_desc_paddr + ((caddr_t)ds - (caddr_t)sc->sc_desc); 1683 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 0, 1684 &bf->bf_dmamap)) != 0) { 1685 printf("%s: unable to create beacon dmamap, error = %d\n", 1686 sc->sc_dev.dv_xname, error); 1687 goto fail5; 1688 } 1689 sc->sc_bcbuf = bf; 1690 return 0; 1691 1692 fail5: 1693 for (i = ATH_RXBUF; i < ATH_RXBUF + ATH_TXBUF; i++) { 1694 if (sc->sc_bufptr[i].bf_dmamap == NULL) 1695 continue; 1696 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bufptr[i].bf_dmamap); 1697 } 1698 fail4: 1699 for (i = 0; i < ATH_RXBUF; i++) { 1700 if (sc->sc_bufptr[i].bf_dmamap == NULL) 1701 continue; 1702 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bufptr[i].bf_dmamap); 1703 } 1704 fail3: 1705 bus_dmamap_unload(sc->sc_dmat, sc->sc_ddmamap); 1706 fail2: 1707 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ddmamap); 1708 sc->sc_ddmamap = NULL; 1709 fail1: 1710 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_desc, sc->sc_desc_len); 1711 fail0: 1712 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg); 1713 return error; 1714 } 1715 1716 void 1717 ath_desc_free(struct ath_softc *sc) 1718 { 1719 struct ath_buf *bf; 1720 1721 bus_dmamap_unload(sc->sc_dmat, sc->sc_ddmamap); 1722 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ddmamap); 1723 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg); 1724 1725 TAILQ_FOREACH(bf, &sc->sc_txq, bf_list) { 1726 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1727 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 1728 m_freem(bf->bf_m); 1729 } 1730 TAILQ_FOREACH(bf, &sc->sc_txbuf, bf_list) 1731 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 1732 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 1733 if (bf->bf_m) { 1734 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1735 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 1736 m_freem(bf->bf_m); 1737 bf->bf_m = NULL; 1738 } 1739 } 1740 if (sc->sc_bcbuf != NULL) { 1741 bus_dmamap_unload(sc->sc_dmat, sc->sc_bcbuf->bf_dmamap); 1742 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bcbuf->bf_dmamap); 1743 sc->sc_bcbuf = NULL; 1744 } 1745 1746 TAILQ_INIT(&sc->sc_rxbuf); 1747 TAILQ_INIT(&sc->sc_txbuf); 1748 TAILQ_INIT(&sc->sc_txq); 1749 free(sc->sc_bufptr, M_DEVBUF); 1750 sc->sc_bufptr = NULL; 1751 } 1752 1753 struct ieee80211_node * 1754 ath_node_alloc(struct ieee80211com *ic) 1755 { 1756 struct ath_node *an; 1757 1758 an = malloc(sizeof(*an), M_DEVBUF, M_NOWAIT | M_ZERO); 1759 if (an) { 1760 int i; 1761 for (i = 0; i < ATH_RHIST_SIZE; i++) 1762 an->an_rx_hist[i].arh_ticks = ATH_RHIST_NOTIME; 1763 an->an_rx_hist_next = ATH_RHIST_SIZE-1; 1764 return &an->an_node; 1765 } else 1766 return NULL; 1767 } 1768 1769 void 1770 ath_node_free(struct ieee80211com *ic, struct ieee80211_node *ni) 1771 { 1772 struct ath_softc *sc = ic->ic_if.if_softc; 1773 struct ath_buf *bf; 1774 1775 TAILQ_FOREACH(bf, &sc->sc_txq, bf_list) { 1776 if (bf->bf_node == ni) 1777 bf->bf_node = NULL; 1778 } 1779 (*sc->sc_node_free)(ic, ni); 1780 } 1781 1782 void 1783 ath_node_copy(struct ieee80211com *ic, 1784 struct ieee80211_node *dst, const struct ieee80211_node *src) 1785 { 1786 struct ath_softc *sc = ic->ic_if.if_softc; 1787 1788 bcopy(&src[1], &dst[1], 1789 sizeof(struct ath_node) - sizeof(struct ieee80211_node)); 1790 (*sc->sc_node_copy)(ic, dst, src); 1791 } 1792 1793 u_int8_t 1794 ath_node_getrssi(struct ieee80211com *ic, const struct ieee80211_node *ni) 1795 { 1796 const struct ath_node *an = ATH_NODE(ni); 1797 int i, now, nsamples, rssi; 1798 1799 /* 1800 * Calculate the average over the last second of sampled data. 1801 */ 1802 now = ATH_TICKS(); 1803 nsamples = 0; 1804 rssi = 0; 1805 i = an->an_rx_hist_next; 1806 do { 1807 const struct ath_recv_hist *rh = &an->an_rx_hist[i]; 1808 if (rh->arh_ticks == ATH_RHIST_NOTIME) 1809 goto done; 1810 if (now - rh->arh_ticks > hz) 1811 goto done; 1812 rssi += rh->arh_rssi; 1813 nsamples++; 1814 if (i == 0) { 1815 i = ATH_RHIST_SIZE-1; 1816 } else { 1817 i--; 1818 } 1819 } while (i != an->an_rx_hist_next); 1820 done: 1821 /* 1822 * Return either the average or the last known 1823 * value if there is no recent data. 1824 */ 1825 return (nsamples ? rssi / nsamples : an->an_rx_hist[i].arh_rssi); 1826 } 1827 1828 int 1829 ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 1830 { 1831 struct ath_hal *ah = sc->sc_ah; 1832 int error; 1833 struct mbuf *m; 1834 struct ath_desc *ds; 1835 1836 m = bf->bf_m; 1837 if (m == NULL) { 1838 /* 1839 * NB: by assigning a page to the rx dma buffer we 1840 * implicitly satisfy the Atheros requirement that 1841 * this buffer be cache-line-aligned and sized to be 1842 * multiple of the cache line size. Not doing this 1843 * causes weird stuff to happen (for the 5210 at least). 1844 */ 1845 m = ath_getmbuf(M_DONTWAIT, MT_DATA, MCLBYTES); 1846 if (m == NULL) { 1847 DPRINTF(ATH_DEBUG_ANY, 1848 ("%s: no mbuf/cluster\n", __func__)); 1849 sc->sc_stats.ast_rx_nombuf++; 1850 return ENOMEM; 1851 } 1852 bf->bf_m = m; 1853 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 1854 1855 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m, 1856 BUS_DMA_NOWAIT); 1857 if (error != 0) { 1858 DPRINTF(ATH_DEBUG_ANY, 1859 ("%s: ath_bus_dmamap_load_mbuf failed;" 1860 " error %d\n", __func__, error)); 1861 sc->sc_stats.ast_rx_busdma++; 1862 return error; 1863 } 1864 KASSERT(bf->bf_nseg == 1, 1865 ("ath_rxbuf_init: multi-segment packet; nseg %u", 1866 bf->bf_nseg)); 1867 } 1868 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 1869 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1870 1871 /* 1872 * Setup descriptors. For receive we always terminate 1873 * the descriptor list with a self-linked entry so we'll 1874 * not get overrun under high load (as can happen with a 1875 * 5212 when ANI processing enables PHY errors). 1876 * 1877 * To insure the last descriptor is self-linked we create 1878 * each descriptor as self-linked and add it to the end. As 1879 * each additional descriptor is added the previous self-linked 1880 * entry is ``fixed'' naturally. This should be safe even 1881 * if DMA is happening. When processing RX interrupts we 1882 * never remove/process the last, self-linked, entry on the 1883 * descriptor list. This insures the hardware always has 1884 * someplace to write a new frame. 1885 */ 1886 ds = bf->bf_desc; 1887 bzero(ds, sizeof(struct ath_desc)); 1888 #ifndef IEEE80211_STA_ONLY 1889 if (sc->sc_ic.ic_opmode != IEEE80211_M_HOSTAP) 1890 ds->ds_link = bf->bf_daddr; /* link to self */ 1891 #endif 1892 ds->ds_data = bf->bf_segs[0].ds_addr; 1893 ath_hal_setup_rx_desc(ah, ds 1894 , m->m_len /* buffer size */ 1895 , 0 1896 ); 1897 1898 if (sc->sc_rxlink != NULL) 1899 *sc->sc_rxlink = bf->bf_daddr; 1900 sc->sc_rxlink = &ds->ds_link; 1901 return 0; 1902 } 1903 1904 void 1905 ath_rx_proc(void *arg, int npending) 1906 { 1907 #define PA2DESC(_sc, _pa) \ 1908 ((struct ath_desc *)((caddr_t)(_sc)->sc_desc + \ 1909 ((_pa) - (_sc)->sc_desc_paddr))) 1910 struct ath_softc *sc = arg; 1911 struct ath_buf *bf; 1912 struct ieee80211com *ic = &sc->sc_ic; 1913 struct ifnet *ifp = &ic->ic_if; 1914 struct ath_hal *ah = sc->sc_ah; 1915 struct ath_desc *ds; 1916 struct mbuf *m; 1917 struct ieee80211_frame *wh; 1918 struct ieee80211_frame whbuf; 1919 struct ieee80211_rxinfo rxi; 1920 struct ieee80211_node *ni; 1921 struct ath_node *an; 1922 struct ath_recv_hist *rh; 1923 int len; 1924 u_int phyerr; 1925 HAL_STATUS status; 1926 1927 DPRINTF(ATH_DEBUG_RX_PROC, ("%s: pending %u\n", __func__, npending)); 1928 do { 1929 bf = TAILQ_FIRST(&sc->sc_rxbuf); 1930 if (bf == NULL) { /* NB: shouldn't happen */ 1931 printf("%s: ath_rx_proc: no buffer!\n", ifp->if_xname); 1932 break; 1933 } 1934 ds = bf->bf_desc; 1935 if (ds->ds_link == bf->bf_daddr) { 1936 /* NB: never process the self-linked entry at the end */ 1937 break; 1938 } 1939 m = bf->bf_m; 1940 if (m == NULL) { /* NB: shouldn't happen */ 1941 printf("%s: ath_rx_proc: no mbuf!\n", ifp->if_xname); 1942 continue; 1943 } 1944 /* XXX sync descriptor memory */ 1945 /* 1946 * Must provide the virtual address of the current 1947 * descriptor, the physical address, and the virtual 1948 * address of the next descriptor in the h/w chain. 1949 * This allows the HAL to look ahead to see if the 1950 * hardware is done with a descriptor by checking the 1951 * done bit in the following descriptor and the address 1952 * of the current descriptor the DMA engine is working 1953 * on. All this is necessary because of our use of 1954 * a self-linked list to avoid rx overruns. 1955 */ 1956 status = ath_hal_proc_rx_desc(ah, ds, 1957 bf->bf_daddr, PA2DESC(sc, ds->ds_link)); 1958 #ifdef AR_DEBUG 1959 if (ath_debug & ATH_DEBUG_RECV_DESC) 1960 ath_printrxbuf(bf, status == HAL_OK); 1961 #endif 1962 if (status == HAL_EINPROGRESS) 1963 break; 1964 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 1965 1966 if (ds->ds_rxstat.rs_more) { 1967 /* 1968 * Frame spans multiple descriptors; this 1969 * cannot happen yet as we don't support 1970 * jumbograms. If not in monitor mode, 1971 * discard the frame. 1972 */ 1973 1974 /* 1975 * Enable this if you want to see error 1976 * frames in Monitor mode. 1977 */ 1978 #ifdef ERROR_FRAMES 1979 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 1980 /* XXX statistic */ 1981 goto rx_next; 1982 } 1983 #endif 1984 /* fall thru for monitor mode handling... */ 1985 1986 } else if (ds->ds_rxstat.rs_status != 0) { 1987 if (ds->ds_rxstat.rs_status & HAL_RXERR_CRC) 1988 sc->sc_stats.ast_rx_crcerr++; 1989 if (ds->ds_rxstat.rs_status & HAL_RXERR_FIFO) 1990 sc->sc_stats.ast_rx_fifoerr++; 1991 if (ds->ds_rxstat.rs_status & HAL_RXERR_DECRYPT) 1992 sc->sc_stats.ast_rx_badcrypt++; 1993 if (ds->ds_rxstat.rs_status & HAL_RXERR_PHY) { 1994 sc->sc_stats.ast_rx_phyerr++; 1995 phyerr = ds->ds_rxstat.rs_phyerr & 0x1f; 1996 sc->sc_stats.ast_rx_phy[phyerr]++; 1997 } 1998 1999 /* 2000 * reject error frames, we normally don't want 2001 * to see them in monitor mode. 2002 */ 2003 if ((ds->ds_rxstat.rs_status & HAL_RXERR_DECRYPT ) || 2004 (ds->ds_rxstat.rs_status & HAL_RXERR_PHY)) 2005 goto rx_next; 2006 2007 /* 2008 * In monitor mode, allow through packets that 2009 * cannot be decrypted 2010 */ 2011 if ((ds->ds_rxstat.rs_status & ~HAL_RXERR_DECRYPT) || 2012 sc->sc_ic.ic_opmode != IEEE80211_M_MONITOR) 2013 goto rx_next; 2014 } 2015 2016 len = ds->ds_rxstat.rs_datalen; 2017 if (len < IEEE80211_MIN_LEN) { 2018 DPRINTF(ATH_DEBUG_RECV, ("%s: short packet %d\n", 2019 __func__, len)); 2020 sc->sc_stats.ast_rx_tooshort++; 2021 goto rx_next; 2022 } 2023 2024 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 2025 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2026 2027 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2028 bf->bf_m = NULL; 2029 m->m_pkthdr.rcvif = ifp; 2030 m->m_pkthdr.len = m->m_len = len; 2031 2032 #if NBPFILTER > 0 2033 if (sc->sc_drvbpf) { 2034 struct mbuf mb; 2035 2036 sc->sc_rxtap.wr_flags = IEEE80211_RADIOTAP_F_FCS; 2037 sc->sc_rxtap.wr_rate = 2038 sc->sc_hwmap[ds->ds_rxstat.rs_rate] & 2039 IEEE80211_RATE_VAL; 2040 sc->sc_rxtap.wr_antenna = ds->ds_rxstat.rs_antenna; 2041 sc->sc_rxtap.wr_rssi = ds->ds_rxstat.rs_rssi; 2042 sc->sc_rxtap.wr_max_rssi = ic->ic_max_rssi; 2043 2044 mb.m_data = (caddr_t)&sc->sc_rxtap; 2045 mb.m_len = sc->sc_rxtap_len; 2046 mb.m_next = m; 2047 mb.m_nextpkt = NULL; 2048 mb.m_type = 0; 2049 mb.m_flags = 0; 2050 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN); 2051 } 2052 #endif 2053 m_adj(m, -IEEE80211_CRC_LEN); 2054 wh = mtod(m, struct ieee80211_frame *); 2055 rxi.rxi_flags = 0; 2056 if (!ath_softcrypto && (wh->i_fc[1] & IEEE80211_FC1_WEP)) { 2057 /* 2058 * WEP is decrypted by hardware. Clear WEP bit 2059 * and trim WEP header for ieee80211_input(). 2060 */ 2061 wh->i_fc[1] &= ~IEEE80211_FC1_WEP; 2062 bcopy(wh, &whbuf, sizeof(whbuf)); 2063 m_adj(m, IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN); 2064 wh = mtod(m, struct ieee80211_frame *); 2065 bcopy(&whbuf, wh, sizeof(whbuf)); 2066 /* 2067 * Also trim WEP ICV from the tail. 2068 */ 2069 m_adj(m, -IEEE80211_WEP_CRCLEN); 2070 /* 2071 * The header has probably moved. 2072 */ 2073 wh = mtod(m, struct ieee80211_frame *); 2074 2075 rxi.rxi_flags |= IEEE80211_RXI_HWDEC; 2076 } 2077 2078 /* 2079 * Locate the node for sender, track state, and 2080 * then pass this node (referenced) up to the 802.11 2081 * layer for its use. 2082 */ 2083 ni = ieee80211_find_rxnode(ic, wh); 2084 2085 /* 2086 * Record driver-specific state. 2087 */ 2088 an = ATH_NODE(ni); 2089 if (++(an->an_rx_hist_next) == ATH_RHIST_SIZE) 2090 an->an_rx_hist_next = 0; 2091 rh = &an->an_rx_hist[an->an_rx_hist_next]; 2092 rh->arh_ticks = ATH_TICKS(); 2093 rh->arh_rssi = ds->ds_rxstat.rs_rssi; 2094 rh->arh_antenna = ds->ds_rxstat.rs_antenna; 2095 2096 /* 2097 * Send frame up for processing. 2098 */ 2099 rxi.rxi_rssi = ds->ds_rxstat.rs_rssi; 2100 rxi.rxi_tstamp = ds->ds_rxstat.rs_tstamp; 2101 ieee80211_input(ifp, m, ni, &rxi); 2102 2103 /* Handle the rate adaption */ 2104 ieee80211_rssadapt_input(ic, ni, &an->an_rssadapt, 2105 ds->ds_rxstat.rs_rssi); 2106 2107 /* 2108 * The frame may have caused the node to be marked for 2109 * reclamation (e.g. in response to a DEAUTH message) 2110 * so use release_node here instead of unref_node. 2111 */ 2112 ieee80211_release_node(ic, ni); 2113 2114 rx_next: 2115 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 2116 } while (ath_rxbuf_init(sc, bf) == 0); 2117 2118 ath_hal_set_rx_signal(ah); /* rx signal state monitoring */ 2119 ath_hal_start_rx(ah); /* in case of RXEOL */ 2120 #undef PA2DESC 2121 } 2122 2123 /* 2124 * XXX Size of an ACK control frame in bytes. 2125 */ 2126 #define IEEE80211_ACK_SIZE (2+2+IEEE80211_ADDR_LEN+4) 2127 2128 int 2129 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, 2130 struct ath_buf *bf, struct mbuf *m0) 2131 { 2132 struct ieee80211com *ic = &sc->sc_ic; 2133 struct ath_hal *ah = sc->sc_ah; 2134 struct ifnet *ifp = &sc->sc_ic.ic_if; 2135 int i, error, iswep, hdrlen, pktlen, len, s; 2136 u_int8_t rix, cix, txrate, ctsrate; 2137 struct ath_desc *ds; 2138 struct ieee80211_frame *wh; 2139 struct ieee80211_key *k; 2140 u_int32_t iv; 2141 u_int8_t *ivp; 2142 u_int8_t hdrbuf[sizeof(struct ieee80211_frame) + 2143 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN]; 2144 u_int subtype, flags, ctsduration, antenna; 2145 HAL_PKT_TYPE atype; 2146 const HAL_RATE_TABLE *rt; 2147 HAL_BOOL shortPreamble; 2148 struct ath_node *an; 2149 u_int8_t hwqueue = HAL_TX_QUEUE_ID_DATA_MIN; 2150 2151 wh = mtod(m0, struct ieee80211_frame *); 2152 iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED; 2153 hdrlen = sizeof(struct ieee80211_frame); 2154 pktlen = m0->m_pkthdr.len; 2155 2156 if (ath_softcrypto && iswep) { 2157 k = ieee80211_get_txkey(ic, wh, ni); 2158 if ((m0 = ieee80211_encrypt(ic, m0, k)) == NULL) 2159 return ENOMEM; 2160 wh = mtod(m0, struct ieee80211_frame *); 2161 2162 /* reset len in case we got a new mbuf */ 2163 pktlen = m0->m_pkthdr.len; 2164 } else if (!ath_softcrypto && iswep) { 2165 bcopy(mtod(m0, caddr_t), hdrbuf, hdrlen); 2166 m_adj(m0, hdrlen); 2167 M_PREPEND(m0, sizeof(hdrbuf), M_DONTWAIT); 2168 if (m0 == NULL) { 2169 sc->sc_stats.ast_tx_nombuf++; 2170 return ENOMEM; 2171 } 2172 ivp = hdrbuf + hdrlen; 2173 wh = mtod(m0, struct ieee80211_frame *); 2174 /* 2175 * XXX 2176 * IV must not duplicate during the lifetime of the key. 2177 * But no mechanism to renew keys is defined in IEEE 802.11 2178 * for WEP. And the IV may be duplicated at other stations 2179 * because the session key itself is shared. So we use a 2180 * pseudo random IV for now, though it is not the right way. 2181 * 2182 * NB: Rather than use a strictly random IV we select a 2183 * random one to start and then increment the value for 2184 * each frame. This is an explicit tradeoff between 2185 * overhead and security. Given the basic insecurity of 2186 * WEP this seems worthwhile. 2187 */ 2188 2189 /* 2190 * Skip 'bad' IVs from Fluhrer/Mantin/Shamir: 2191 * (B, 255, N) with 3 <= B < 16 and 0 <= N <= 255 2192 */ 2193 iv = ic->ic_iv; 2194 if ((iv & 0xff00) == 0xff00) { 2195 int B = (iv & 0xff0000) >> 16; 2196 if (3 <= B && B < 16) 2197 iv = (B+1) << 16; 2198 } 2199 ic->ic_iv = iv + 1; 2200 2201 /* 2202 * NB: Preserve byte order of IV for packet 2203 * sniffers; it doesn't matter otherwise. 2204 */ 2205 #if AH_BYTE_ORDER == AH_BIG_ENDIAN 2206 ivp[0] = iv >> 0; 2207 ivp[1] = iv >> 8; 2208 ivp[2] = iv >> 16; 2209 #else 2210 ivp[2] = iv >> 0; 2211 ivp[1] = iv >> 8; 2212 ivp[0] = iv >> 16; 2213 #endif 2214 ivp[3] = ic->ic_wep_txkey << 6; /* Key ID and pad */ 2215 bcopy(hdrbuf, mtod(m0, caddr_t), sizeof(hdrbuf)); 2216 /* 2217 * The length of hdrlen and pktlen must be increased for WEP 2218 */ 2219 len = IEEE80211_WEP_IVLEN + 2220 IEEE80211_WEP_KIDLEN + 2221 IEEE80211_WEP_CRCLEN; 2222 hdrlen += len; 2223 pktlen += len; 2224 } 2225 pktlen += IEEE80211_CRC_LEN; 2226 2227 /* 2228 * Load the DMA map so any coalescing is done. This 2229 * also calculates the number of descriptors we need. 2230 */ 2231 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m0, 2232 BUS_DMA_NOWAIT); 2233 /* 2234 * Discard null packets and check for packets that 2235 * require too many TX descriptors. We try to convert 2236 * the latter to a cluster. 2237 */ 2238 if (error == EFBIG) { /* too many desc's, linearize */ 2239 sc->sc_stats.ast_tx_linear++; 2240 if (m_defrag(m0, M_DONTWAIT)) { 2241 sc->sc_stats.ast_tx_nomcl++; 2242 m_freem(m0); 2243 return ENOMEM; 2244 } 2245 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m0, 2246 BUS_DMA_NOWAIT); 2247 if (error != 0) { 2248 sc->sc_stats.ast_tx_busdma++; 2249 m_freem(m0); 2250 return error; 2251 } 2252 KASSERT(bf->bf_nseg == 1, 2253 ("ath_tx_start: packet not one segment; nseg %u", 2254 bf->bf_nseg)); 2255 } else if (error != 0) { 2256 sc->sc_stats.ast_tx_busdma++; 2257 m_freem(m0); 2258 return error; 2259 } else if (bf->bf_nseg == 0) { /* null packet, discard */ 2260 sc->sc_stats.ast_tx_nodata++; 2261 m_freem(m0); 2262 return EIO; 2263 } 2264 DPRINTF(ATH_DEBUG_XMIT, ("%s: m %p len %u\n", __func__, m0, pktlen)); 2265 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 2266 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 2267 bf->bf_m = m0; 2268 bf->bf_node = ni; /* NB: held reference */ 2269 an = ATH_NODE(ni); 2270 2271 /* setup descriptors */ 2272 ds = bf->bf_desc; 2273 rt = sc->sc_currates; 2274 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 2275 2276 /* 2277 * Calculate Atheros packet type from IEEE80211 packet header 2278 * and setup for rate calculations. 2279 */ 2280 bf->bf_id.id_node = NULL; 2281 atype = HAL_PKT_TYPE_NORMAL; /* default */ 2282 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 2283 case IEEE80211_FC0_TYPE_MGT: 2284 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2285 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) { 2286 atype = HAL_PKT_TYPE_BEACON; 2287 } else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) { 2288 atype = HAL_PKT_TYPE_PROBE_RESP; 2289 } else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) { 2290 atype = HAL_PKT_TYPE_ATIM; 2291 } 2292 rix = 0; /* XXX lowest rate */ 2293 break; 2294 case IEEE80211_FC0_TYPE_CTL: 2295 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2296 if (subtype == IEEE80211_FC0_SUBTYPE_PS_POLL) 2297 atype = HAL_PKT_TYPE_PSPOLL; 2298 rix = 0; /* XXX lowest rate */ 2299 break; 2300 default: 2301 /* remember link conditions for rate adaptation algorithm */ 2302 if (ic->ic_fixed_rate == -1) { 2303 bf->bf_id.id_len = m0->m_pkthdr.len; 2304 bf->bf_id.id_rateidx = ni->ni_txrate; 2305 bf->bf_id.id_node = ni; 2306 bf->bf_id.id_rssi = ath_node_getrssi(ic, ni); 2307 } 2308 ni->ni_txrate = ieee80211_rssadapt_choose(&an->an_rssadapt, 2309 &ni->ni_rates, wh, m0->m_pkthdr.len, ic->ic_fixed_rate, 2310 ifp->if_xname, 0); 2311 rix = sc->sc_rixmap[ni->ni_rates.rs_rates[ni->ni_txrate] & 2312 IEEE80211_RATE_VAL]; 2313 if (rix == 0xff) { 2314 printf("%s: bogus xmit rate 0x%x (idx 0x%x)\n", 2315 ifp->if_xname, ni->ni_rates.rs_rates[ni->ni_txrate], 2316 ni->ni_txrate); 2317 sc->sc_stats.ast_tx_badrate++; 2318 m_freem(m0); 2319 return EIO; 2320 } 2321 break; 2322 } 2323 2324 /* 2325 * NB: the 802.11 layer marks whether or not we should 2326 * use short preamble based on the current mode and 2327 * negotiated parameters. 2328 */ 2329 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 2330 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 2331 txrate = rt->info[rix].rateCode | rt->info[rix].shortPreamble; 2332 shortPreamble = AH_TRUE; 2333 sc->sc_stats.ast_tx_shortpre++; 2334 } else { 2335 txrate = rt->info[rix].rateCode; 2336 shortPreamble = AH_FALSE; 2337 } 2338 2339 /* 2340 * Calculate miscellaneous flags. 2341 */ 2342 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for wep errors */ 2343 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2344 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 2345 sc->sc_stats.ast_tx_noack++; 2346 } else if (pktlen > ic->ic_rtsthreshold) { 2347 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 2348 sc->sc_stats.ast_tx_rts++; 2349 } 2350 2351 /* 2352 * Calculate duration. This logically belongs in the 802.11 2353 * layer but it lacks sufficient information to calculate it. 2354 */ 2355 if ((flags & HAL_TXDESC_NOACK) == 0 && 2356 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 2357 u_int16_t dur; 2358 /* 2359 * XXX not right with fragmentation. 2360 */ 2361 dur = ath_hal_computetxtime(ah, rt, IEEE80211_ACK_SIZE, 2362 rix, shortPreamble); 2363 *((u_int16_t*) wh->i_dur) = htole16(dur); 2364 } 2365 2366 /* 2367 * Calculate RTS/CTS rate and duration if needed. 2368 */ 2369 ctsduration = 0; 2370 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) { 2371 /* 2372 * CTS transmit rate is derived from the transmit rate 2373 * by looking in the h/w rate table. We must also factor 2374 * in whether or not a short preamble is to be used. 2375 */ 2376 cix = rt->info[rix].controlRate; 2377 ctsrate = rt->info[cix].rateCode; 2378 if (shortPreamble) 2379 ctsrate |= rt->info[cix].shortPreamble; 2380 /* 2381 * Compute the transmit duration based on the size 2382 * of an ACK frame. We call into the HAL to do the 2383 * computation since it depends on the characteristics 2384 * of the actual PHY being used. 2385 */ 2386 if (flags & HAL_TXDESC_RTSENA) { /* SIFS + CTS */ 2387 ctsduration += ath_hal_computetxtime(ah, 2388 rt, IEEE80211_ACK_SIZE, cix, shortPreamble); 2389 } 2390 /* SIFS + data */ 2391 ctsduration += ath_hal_computetxtime(ah, 2392 rt, pktlen, rix, shortPreamble); 2393 if ((flags & HAL_TXDESC_NOACK) == 0) { /* SIFS + ACK */ 2394 ctsduration += ath_hal_computetxtime(ah, 2395 rt, IEEE80211_ACK_SIZE, cix, shortPreamble); 2396 } 2397 } else 2398 ctsrate = 0; 2399 2400 /* 2401 * For now use the antenna on which the last good 2402 * frame was received on. We assume this field is 2403 * initialized to 0 which gives us ``auto'' or the 2404 * ``default'' antenna. 2405 */ 2406 if (an->an_tx_antenna) { 2407 antenna = an->an_tx_antenna; 2408 } else { 2409 antenna = an->an_rx_hist[an->an_rx_hist_next].arh_antenna; 2410 } 2411 2412 #if NBPFILTER > 0 2413 if (ic->ic_rawbpf) 2414 bpf_mtap(ic->ic_rawbpf, m0, BPF_DIRECTION_OUT); 2415 2416 if (sc->sc_drvbpf) { 2417 struct mbuf mb; 2418 2419 sc->sc_txtap.wt_flags = 0; 2420 if (shortPreamble) 2421 sc->sc_txtap.wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2422 if (!ath_softcrypto && iswep) 2423 sc->sc_txtap.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2424 sc->sc_txtap.wt_rate = ni->ni_rates.rs_rates[ni->ni_txrate] & 2425 IEEE80211_RATE_VAL; 2426 sc->sc_txtap.wt_txpower = 30; 2427 sc->sc_txtap.wt_antenna = antenna; 2428 sc->sc_txtap.wt_hwqueue = hwqueue; 2429 2430 mb.m_data = (caddr_t)&sc->sc_txtap; 2431 mb.m_len = sc->sc_txtap_len; 2432 mb.m_next = m0; 2433 mb.m_nextpkt = NULL; 2434 mb.m_type = 0; 2435 mb.m_flags = 0; 2436 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT); 2437 } 2438 #endif 2439 2440 /* 2441 * Formulate first tx descriptor with tx controls. 2442 */ 2443 /* XXX check return value? */ 2444 ath_hal_setup_tx_desc(ah, ds 2445 , pktlen /* packet length */ 2446 , hdrlen /* header length */ 2447 , atype /* Atheros packet type */ 2448 , 60 /* txpower XXX */ 2449 , txrate, 1+10 /* series 0 rate/tries */ 2450 , iswep ? sc->sc_ic.ic_wep_txkey : HAL_TXKEYIX_INVALID 2451 , antenna /* antenna mode */ 2452 , flags /* flags */ 2453 , ctsrate /* rts/cts rate */ 2454 , ctsduration /* rts/cts duration */ 2455 ); 2456 #ifdef notyet 2457 ath_hal_setup_xtx_desc(ah, ds 2458 , AH_FALSE /* short preamble */ 2459 , 0, 0 /* series 1 rate/tries */ 2460 , 0, 0 /* series 2 rate/tries */ 2461 , 0, 0 /* series 3 rate/tries */ 2462 ); 2463 #endif 2464 /* 2465 * Fillin the remainder of the descriptor info. 2466 */ 2467 for (i = 0; i < bf->bf_nseg; i++, ds++) { 2468 ds->ds_data = bf->bf_segs[i].ds_addr; 2469 if (i == bf->bf_nseg - 1) { 2470 ds->ds_link = 0; 2471 } else { 2472 ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1); 2473 } 2474 ath_hal_fill_tx_desc(ah, ds 2475 , bf->bf_segs[i].ds_len /* segment length */ 2476 , i == 0 /* first segment */ 2477 , i == bf->bf_nseg - 1 /* last segment */ 2478 ); 2479 DPRINTF(ATH_DEBUG_XMIT, 2480 ("%s: %d: %08x %08x %08x %08x %08x %08x\n", 2481 __func__, i, ds->ds_link, ds->ds_data, 2482 ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1])); 2483 } 2484 2485 /* 2486 * Insert the frame on the outbound list and 2487 * pass it on to the hardware. 2488 */ 2489 s = splnet(); 2490 TAILQ_INSERT_TAIL(&sc->sc_txq, bf, bf_list); 2491 if (sc->sc_txlink == NULL) { 2492 ath_hal_put_tx_buf(ah, sc->sc_txhalq[hwqueue], bf->bf_daddr); 2493 DPRINTF(ATH_DEBUG_XMIT, ("%s: TXDP0 = %p (%p)\n", __func__, 2494 (caddr_t)bf->bf_daddr, bf->bf_desc)); 2495 } else { 2496 *sc->sc_txlink = bf->bf_daddr; 2497 DPRINTF(ATH_DEBUG_XMIT, ("%s: link(%p)=%p (%p)\n", __func__, 2498 sc->sc_txlink, (caddr_t)bf->bf_daddr, bf->bf_desc)); 2499 } 2500 sc->sc_txlink = &bf->bf_desc[bf->bf_nseg - 1].ds_link; 2501 splx(s); 2502 2503 ath_hal_tx_start(ah, sc->sc_txhalq[hwqueue]); 2504 return 0; 2505 } 2506 2507 void 2508 ath_tx_proc(void *arg, int npending) 2509 { 2510 struct ath_softc *sc = arg; 2511 struct ath_hal *ah = sc->sc_ah; 2512 struct ath_buf *bf; 2513 struct ieee80211com *ic = &sc->sc_ic; 2514 struct ifnet *ifp = &ic->ic_if; 2515 struct ath_desc *ds; 2516 struct ieee80211_node *ni; 2517 struct ath_node *an; 2518 int sr, lr, s; 2519 HAL_STATUS status; 2520 2521 for (;;) { 2522 s = splnet(); 2523 bf = TAILQ_FIRST(&sc->sc_txq); 2524 if (bf == NULL) { 2525 sc->sc_txlink = NULL; 2526 splx(s); 2527 break; 2528 } 2529 /* only the last descriptor is needed */ 2530 ds = &bf->bf_desc[bf->bf_nseg - 1]; 2531 status = ath_hal_proc_tx_desc(ah, ds); 2532 #ifdef AR_DEBUG 2533 if (ath_debug & ATH_DEBUG_XMIT_DESC) 2534 ath_printtxbuf(bf, status == HAL_OK); 2535 #endif 2536 if (status == HAL_EINPROGRESS) { 2537 splx(s); 2538 break; 2539 } 2540 TAILQ_REMOVE(&sc->sc_txq, bf, bf_list); 2541 splx(s); 2542 2543 ni = bf->bf_node; 2544 if (ni != NULL) { 2545 an = (struct ath_node *) ni; 2546 if (ds->ds_txstat.ts_status == 0) { 2547 if (bf->bf_id.id_node != NULL) 2548 ieee80211_rssadapt_raise_rate(ic, 2549 &an->an_rssadapt, &bf->bf_id); 2550 an->an_tx_antenna = ds->ds_txstat.ts_antenna; 2551 } else { 2552 if (bf->bf_id.id_node != NULL) 2553 ieee80211_rssadapt_lower_rate(ic, ni, 2554 &an->an_rssadapt, &bf->bf_id); 2555 ifp->if_oerrors++; 2556 if (ds->ds_txstat.ts_status & HAL_TXERR_XRETRY) 2557 sc->sc_stats.ast_tx_xretries++; 2558 if (ds->ds_txstat.ts_status & HAL_TXERR_FIFO) 2559 sc->sc_stats.ast_tx_fifoerr++; 2560 if (ds->ds_txstat.ts_status & HAL_TXERR_FILT) 2561 sc->sc_stats.ast_tx_filtered++; 2562 an->an_tx_antenna = 0; /* invalidate */ 2563 } 2564 sr = ds->ds_txstat.ts_shortretry; 2565 lr = ds->ds_txstat.ts_longretry; 2566 sc->sc_stats.ast_tx_shortretry += sr; 2567 sc->sc_stats.ast_tx_longretry += lr; 2568 /* 2569 * Reclaim reference to node. 2570 * 2571 * NB: the node may be reclaimed here if, for example 2572 * this is a DEAUTH message that was sent and the 2573 * node was timed out due to inactivity. 2574 */ 2575 ieee80211_release_node(ic, ni); 2576 } 2577 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 2578 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2579 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2580 m_freem(bf->bf_m); 2581 bf->bf_m = NULL; 2582 bf->bf_node = NULL; 2583 2584 s = splnet(); 2585 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 2586 splx(s); 2587 } 2588 ifp->if_flags &= ~IFF_OACTIVE; 2589 sc->sc_tx_timer = 0; 2590 2591 ath_start(ifp); 2592 } 2593 2594 /* 2595 * Drain the transmit queue and reclaim resources. 2596 */ 2597 void 2598 ath_draintxq(struct ath_softc *sc) 2599 { 2600 struct ath_hal *ah = sc->sc_ah; 2601 struct ieee80211com *ic = &sc->sc_ic; 2602 struct ifnet *ifp = &ic->ic_if; 2603 struct ieee80211_node *ni; 2604 struct ath_buf *bf; 2605 int s, i; 2606 2607 /* XXX return value */ 2608 if (!sc->sc_invalid) { 2609 for (i = 0; i <= HAL_TX_QUEUE_ID_DATA_MAX; i++) { 2610 /* don't touch the hardware if marked invalid */ 2611 (void) ath_hal_stop_tx_dma(ah, sc->sc_txhalq[i]); 2612 DPRINTF(ATH_DEBUG_RESET, 2613 ("%s: tx queue %d (%p), link %p\n", __func__, i, 2614 (caddr_t)(u_intptr_t)ath_hal_get_tx_buf(ah, 2615 sc->sc_txhalq[i]), sc->sc_txlink)); 2616 } 2617 (void) ath_hal_stop_tx_dma(ah, sc->sc_bhalq); 2618 DPRINTF(ATH_DEBUG_RESET, 2619 ("%s: beacon queue (%p)\n", __func__, 2620 (caddr_t)(u_intptr_t)ath_hal_get_tx_buf(ah, sc->sc_bhalq))); 2621 } 2622 for (;;) { 2623 s = splnet(); 2624 bf = TAILQ_FIRST(&sc->sc_txq); 2625 if (bf == NULL) { 2626 sc->sc_txlink = NULL; 2627 splx(s); 2628 break; 2629 } 2630 TAILQ_REMOVE(&sc->sc_txq, bf, bf_list); 2631 splx(s); 2632 #ifdef AR_DEBUG 2633 if (ath_debug & ATH_DEBUG_RESET) { 2634 ath_printtxbuf(bf, 2635 ath_hal_proc_tx_desc(ah, bf->bf_desc) == HAL_OK); 2636 } 2637 #endif /* AR_DEBUG */ 2638 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2639 m_freem(bf->bf_m); 2640 bf->bf_m = NULL; 2641 ni = bf->bf_node; 2642 bf->bf_node = NULL; 2643 s = splnet(); 2644 if (ni != NULL) { 2645 /* 2646 * Reclaim node reference. 2647 */ 2648 ieee80211_release_node(ic, ni); 2649 } 2650 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 2651 splx(s); 2652 } 2653 ifp->if_flags &= ~IFF_OACTIVE; 2654 sc->sc_tx_timer = 0; 2655 } 2656 2657 /* 2658 * Disable the receive h/w in preparation for a reset. 2659 */ 2660 void 2661 ath_stoprecv(struct ath_softc *sc) 2662 { 2663 #define PA2DESC(_sc, _pa) \ 2664 ((struct ath_desc *)((caddr_t)(_sc)->sc_desc + \ 2665 ((_pa) - (_sc)->sc_desc_paddr))) 2666 struct ath_hal *ah = sc->sc_ah; 2667 2668 ath_hal_stop_pcu_recv(ah); /* disable PCU */ 2669 ath_hal_set_rx_filter(ah, 0); /* clear recv filter */ 2670 ath_hal_stop_rx_dma(ah); /* disable DMA engine */ 2671 #ifdef AR_DEBUG 2672 if (ath_debug & ATH_DEBUG_RESET) { 2673 struct ath_buf *bf; 2674 2675 printf("%s: rx queue %p, link %p\n", __func__, 2676 (caddr_t)(u_intptr_t)ath_hal_get_rx_buf(ah), sc->sc_rxlink); 2677 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 2678 struct ath_desc *ds = bf->bf_desc; 2679 if (ath_hal_proc_rx_desc(ah, ds, bf->bf_daddr, 2680 PA2DESC(sc, ds->ds_link)) == HAL_OK) 2681 ath_printrxbuf(bf, 1); 2682 } 2683 } 2684 #endif 2685 sc->sc_rxlink = NULL; /* just in case */ 2686 #undef PA2DESC 2687 } 2688 2689 /* 2690 * Enable the receive h/w following a reset. 2691 */ 2692 int 2693 ath_startrecv(struct ath_softc *sc) 2694 { 2695 struct ath_hal *ah = sc->sc_ah; 2696 struct ath_buf *bf; 2697 2698 sc->sc_rxlink = NULL; 2699 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 2700 int error = ath_rxbuf_init(sc, bf); 2701 if (error != 0) { 2702 DPRINTF(ATH_DEBUG_RECV, 2703 ("%s: ath_rxbuf_init failed %d\n", 2704 __func__, error)); 2705 return error; 2706 } 2707 } 2708 2709 bf = TAILQ_FIRST(&sc->sc_rxbuf); 2710 ath_hal_put_rx_buf(ah, bf->bf_daddr); 2711 ath_hal_start_rx(ah); /* enable recv descriptors */ 2712 ath_mode_init(sc); /* set filters, etc. */ 2713 ath_hal_start_rx_pcu(ah); /* re-enable PCU/DMA engine */ 2714 return 0; 2715 } 2716 2717 /* 2718 * Set/change channels. If the channel is really being changed, 2719 * it's done by resetting the chip. To accomplish this we must 2720 * first cleanup any pending DMA, then restart stuff after a la 2721 * ath_init. 2722 */ 2723 int 2724 ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 2725 { 2726 struct ath_hal *ah = sc->sc_ah; 2727 struct ieee80211com *ic = &sc->sc_ic; 2728 struct ifnet *ifp = &ic->ic_if; 2729 2730 DPRINTF(ATH_DEBUG_ANY, ("%s: %u (%u MHz) -> %u (%u MHz)\n", __func__, 2731 ieee80211_chan2ieee(ic, ic->ic_ibss_chan), 2732 ic->ic_ibss_chan->ic_freq, 2733 ieee80211_chan2ieee(ic, chan), chan->ic_freq)); 2734 if (chan != ic->ic_ibss_chan) { 2735 HAL_STATUS status; 2736 HAL_CHANNEL hchan; 2737 enum ieee80211_phymode mode; 2738 2739 /* 2740 * To switch channels clear any pending DMA operations; 2741 * wait long enough for the RX fifo to drain, reset the 2742 * hardware at the new frequency, and then re-enable 2743 * the relevant bits of the h/w. 2744 */ 2745 ath_hal_set_intr(ah, 0); /* disable interrupts */ 2746 ath_draintxq(sc); /* clear pending tx frames */ 2747 ath_stoprecv(sc); /* turn off frame recv */ 2748 /* 2749 * Convert to a HAL channel description with 2750 * the flags constrained to reflect the current 2751 * operating mode. 2752 */ 2753 hchan.channel = chan->ic_freq; 2754 hchan.channelFlags = ath_chan2flags(ic, chan); 2755 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, AH_TRUE, 2756 &status)) { 2757 printf("%s: ath_chan_set: unable to reset " 2758 "channel %u (%u MHz)\n", ifp->if_xname, 2759 ieee80211_chan2ieee(ic, chan), chan->ic_freq); 2760 return EIO; 2761 } 2762 ath_set_slot_time(sc); 2763 /* 2764 * Re-enable rx framework. 2765 */ 2766 if (ath_startrecv(sc) != 0) { 2767 printf("%s: ath_chan_set: unable to restart recv " 2768 "logic\n", ifp->if_xname); 2769 return EIO; 2770 } 2771 2772 #if NBPFILTER > 0 2773 /* 2774 * Update BPF state. 2775 */ 2776 sc->sc_txtap.wt_chan_freq = sc->sc_rxtap.wr_chan_freq = 2777 htole16(chan->ic_freq); 2778 sc->sc_txtap.wt_chan_flags = sc->sc_rxtap.wr_chan_flags = 2779 htole16(chan->ic_flags); 2780 #endif 2781 2782 /* 2783 * Change channels and update the h/w rate map 2784 * if we're switching; e.g. 11a to 11b/g. 2785 */ 2786 ic->ic_ibss_chan = chan; 2787 mode = ieee80211_chan2mode(ic, chan); 2788 if (mode != sc->sc_curmode) 2789 ath_setcurmode(sc, mode); 2790 2791 /* 2792 * Re-enable interrupts. 2793 */ 2794 ath_hal_set_intr(ah, sc->sc_imask); 2795 } 2796 return 0; 2797 } 2798 2799 void 2800 ath_next_scan(void *arg) 2801 { 2802 struct ath_softc *sc = arg; 2803 struct ieee80211com *ic = &sc->sc_ic; 2804 struct ifnet *ifp = &ic->ic_if; 2805 int s; 2806 2807 /* don't call ath_start w/o network interrupts blocked */ 2808 s = splnet(); 2809 2810 if (ic->ic_state == IEEE80211_S_SCAN) 2811 ieee80211_next_scan(ifp); 2812 splx(s); 2813 } 2814 2815 int 2816 ath_set_slot_time(struct ath_softc *sc) 2817 { 2818 struct ath_hal *ah = sc->sc_ah; 2819 struct ieee80211com *ic = &sc->sc_ic; 2820 2821 if (ic->ic_flags & IEEE80211_F_SHSLOT) 2822 return (ath_hal_set_slot_time(ah, HAL_SLOT_TIME_9)); 2823 2824 return (0); 2825 } 2826 2827 /* 2828 * Periodically recalibrate the PHY to account 2829 * for temperature/environment changes. 2830 */ 2831 void 2832 ath_calibrate(void *arg) 2833 { 2834 struct ath_softc *sc = arg; 2835 struct ath_hal *ah = sc->sc_ah; 2836 struct ieee80211com *ic = &sc->sc_ic; 2837 struct ieee80211_channel *c; 2838 HAL_CHANNEL hchan; 2839 int s; 2840 2841 sc->sc_stats.ast_per_cal++; 2842 2843 /* 2844 * Convert to a HAL channel description with the flags 2845 * constrained to reflect the current operating mode. 2846 */ 2847 c = ic->ic_ibss_chan; 2848 hchan.channel = c->ic_freq; 2849 hchan.channelFlags = ath_chan2flags(ic, c); 2850 2851 s = splnet(); 2852 DPRINTF(ATH_DEBUG_CALIBRATE, 2853 ("%s: channel %u/%x\n", __func__, c->ic_freq, c->ic_flags)); 2854 2855 if (ath_hal_get_rf_gain(ah) == HAL_RFGAIN_NEED_CHANGE) { 2856 /* 2857 * Rfgain is out of bounds, reset the chip 2858 * to load new gain values. 2859 */ 2860 sc->sc_stats.ast_per_rfgain++; 2861 ath_reset(sc, 1); 2862 } 2863 if (!ath_hal_calibrate(ah, &hchan)) { 2864 DPRINTF(ATH_DEBUG_ANY, 2865 ("%s: calibration of channel %u failed\n", 2866 __func__, c->ic_freq)); 2867 sc->sc_stats.ast_per_calfail++; 2868 } 2869 timeout_add_sec(&sc->sc_cal_to, ath_calinterval); 2870 splx(s); 2871 } 2872 2873 void 2874 ath_ledstate(struct ath_softc *sc, enum ieee80211_state state) 2875 { 2876 HAL_LED_STATE led = HAL_LED_INIT; 2877 u_int32_t softled = AR5K_SOFTLED_OFF; 2878 2879 switch (state) { 2880 case IEEE80211_S_INIT: 2881 break; 2882 case IEEE80211_S_SCAN: 2883 led = HAL_LED_SCAN; 2884 break; 2885 case IEEE80211_S_AUTH: 2886 led = HAL_LED_AUTH; 2887 break; 2888 case IEEE80211_S_ASSOC: 2889 led = HAL_LED_ASSOC; 2890 softled = AR5K_SOFTLED_ON; 2891 break; 2892 case IEEE80211_S_RUN: 2893 led = HAL_LED_RUN; 2894 softled = AR5K_SOFTLED_ON; 2895 break; 2896 } 2897 2898 ath_hal_set_ledstate(sc->sc_ah, led); 2899 if (sc->sc_softled) { 2900 ath_hal_set_gpio_output(sc->sc_ah, AR5K_SOFTLED_PIN); 2901 ath_hal_set_gpio(sc->sc_ah, AR5K_SOFTLED_PIN, softled); 2902 } 2903 } 2904 2905 int 2906 ath_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg) 2907 { 2908 struct ifnet *ifp = &ic->ic_if; 2909 struct ath_softc *sc = ifp->if_softc; 2910 struct ath_hal *ah = sc->sc_ah; 2911 struct ieee80211_node *ni; 2912 const u_int8_t *bssid; 2913 int error, i; 2914 2915 u_int32_t rfilt; 2916 2917 DPRINTF(ATH_DEBUG_ANY, ("%s: %s -> %s\n", __func__, 2918 ieee80211_state_name[ic->ic_state], 2919 ieee80211_state_name[nstate])); 2920 2921 timeout_del(&sc->sc_scan_to); 2922 timeout_del(&sc->sc_cal_to); 2923 ath_ledstate(sc, nstate); 2924 2925 if (nstate == IEEE80211_S_INIT) { 2926 timeout_del(&sc->sc_rssadapt_to); 2927 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 2928 ath_hal_set_intr(ah, sc->sc_imask); 2929 return (*sc->sc_newstate)(ic, nstate, arg); 2930 } 2931 ni = ic->ic_bss; 2932 error = ath_chan_set(sc, ni->ni_chan); 2933 if (error != 0) 2934 goto bad; 2935 rfilt = ath_calcrxfilter(sc); 2936 if (nstate == IEEE80211_S_SCAN || 2937 ic->ic_opmode == IEEE80211_M_MONITOR) { 2938 bssid = sc->sc_broadcast_addr; 2939 } else { 2940 bssid = ni->ni_bssid; 2941 } 2942 ath_hal_set_rx_filter(ah, rfilt); 2943 DPRINTF(ATH_DEBUG_ANY, ("%s: RX filter 0x%x bssid %s\n", 2944 __func__, rfilt, ether_sprintf((u_char*)bssid))); 2945 2946 if (nstate == IEEE80211_S_RUN && ic->ic_opmode == IEEE80211_M_STA) { 2947 ath_hal_set_associd(ah, bssid, ni->ni_associd); 2948 } else { 2949 ath_hal_set_associd(ah, bssid, 0); 2950 } 2951 2952 if (!ath_softcrypto && (ic->ic_flags & IEEE80211_F_WEPON)) { 2953 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 2954 if (ath_hal_is_key_valid(ah, i)) 2955 ath_hal_set_key_lladdr(ah, i, bssid); 2956 } 2957 } 2958 2959 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 2960 /* nothing to do */ 2961 } else if (nstate == IEEE80211_S_RUN) { 2962 DPRINTF(ATH_DEBUG_ANY, ("%s(RUN): " 2963 "ic_flags=0x%08x iv=%d bssid=%s " 2964 "capinfo=0x%04x chan=%d\n", 2965 __func__, 2966 ic->ic_flags, 2967 ni->ni_intval, 2968 ether_sprintf(ni->ni_bssid), 2969 ni->ni_capinfo, 2970 ieee80211_chan2ieee(ic, ni->ni_chan))); 2971 2972 /* 2973 * Allocate and setup the beacon frame for AP or adhoc mode. 2974 */ 2975 #ifndef IEEE80211_STA_ONLY 2976 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 2977 ic->ic_opmode == IEEE80211_M_IBSS) { 2978 error = ath_beacon_alloc(sc, ni); 2979 if (error != 0) 2980 goto bad; 2981 } 2982 #endif 2983 /* 2984 * Configure the beacon and sleep timers. 2985 */ 2986 ath_beacon_config(sc); 2987 } else { 2988 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 2989 ath_hal_set_intr(ah, sc->sc_imask); 2990 } 2991 2992 /* 2993 * Invoke the parent method to complete the work. 2994 */ 2995 error = (*sc->sc_newstate)(ic, nstate, arg); 2996 2997 if (nstate == IEEE80211_S_RUN) { 2998 /* start periodic recalibration timer */ 2999 timeout_add_sec(&sc->sc_cal_to, ath_calinterval); 3000 3001 if (ic->ic_opmode != IEEE80211_M_MONITOR) 3002 timeout_add_msec(&sc->sc_rssadapt_to, 100); 3003 } else if (nstate == IEEE80211_S_SCAN) { 3004 /* start ap/neighbor scan timer */ 3005 timeout_add_msec(&sc->sc_scan_to, ath_dwelltime); 3006 } 3007 bad: 3008 return error; 3009 } 3010 3011 #ifndef IEEE80211_STA_ONLY 3012 void 3013 ath_recv_mgmt(struct ieee80211com *ic, struct mbuf *m, 3014 struct ieee80211_node *ni, struct ieee80211_rxinfo *rxi, int subtype) 3015 { 3016 struct ath_softc *sc = (struct ath_softc*)ic->ic_softc; 3017 struct ath_hal *ah = sc->sc_ah; 3018 3019 (*sc->sc_recv_mgmt)(ic, m, ni, rxi, subtype); 3020 3021 switch (subtype) { 3022 case IEEE80211_FC0_SUBTYPE_PROBE_RESP: 3023 case IEEE80211_FC0_SUBTYPE_BEACON: 3024 if (ic->ic_opmode != IEEE80211_M_IBSS || 3025 ic->ic_state != IEEE80211_S_RUN) 3026 break; 3027 if (ieee80211_ibss_merge(ic, ni, ath_hal_get_tsf64(ah)) == 3028 ENETRESET) 3029 ath_hal_set_associd(ah, ic->ic_bss->ni_bssid, 0); 3030 break; 3031 default: 3032 break; 3033 } 3034 return; 3035 } 3036 #endif 3037 3038 /* 3039 * Setup driver-specific state for a newly associated node. 3040 * Note that we're called also on a re-associate, the isnew 3041 * param tells us if this is the first time or not. 3042 */ 3043 void 3044 ath_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, int isnew) 3045 { 3046 if (ic->ic_opmode == IEEE80211_M_MONITOR) 3047 return; 3048 } 3049 3050 int 3051 ath_getchannels(struct ath_softc *sc, HAL_BOOL outdoor, HAL_BOOL xchanmode) 3052 { 3053 struct ieee80211com *ic = &sc->sc_ic; 3054 struct ifnet *ifp = &ic->ic_if; 3055 struct ath_hal *ah = sc->sc_ah; 3056 HAL_CHANNEL *chans; 3057 int i, ix, nchan; 3058 3059 sc->sc_nchan = 0; 3060 chans = malloc(IEEE80211_CHAN_MAX * sizeof(HAL_CHANNEL), 3061 M_TEMP, M_NOWAIT); 3062 if (chans == NULL) { 3063 printf("%s: unable to allocate channel table\n", ifp->if_xname); 3064 return ENOMEM; 3065 } 3066 if (!ath_hal_init_channels(ah, chans, IEEE80211_CHAN_MAX, &nchan, 3067 HAL_MODE_ALL, outdoor, xchanmode)) { 3068 printf("%s: unable to collect channel list from hal\n", 3069 ifp->if_xname); 3070 free(chans, M_TEMP); 3071 return EINVAL; 3072 } 3073 3074 /* 3075 * Convert HAL channels to ieee80211 ones and insert 3076 * them in the table according to their channel number. 3077 */ 3078 for (i = 0; i < nchan; i++) { 3079 HAL_CHANNEL *c = &chans[i]; 3080 ix = ieee80211_mhz2ieee(c->channel, c->channelFlags); 3081 if (ix > IEEE80211_CHAN_MAX) { 3082 printf("%s: bad hal channel %u (%u/%x) ignored\n", 3083 ifp->if_xname, ix, c->channel, c->channelFlags); 3084 continue; 3085 } 3086 DPRINTF(ATH_DEBUG_ANY, 3087 ("%s: HAL channel %d/%d freq %d flags %#04x idx %d\n", 3088 sc->sc_dev.dv_xname, i, nchan, c->channel, c->channelFlags, 3089 ix)); 3090 /* NB: flags are known to be compatible */ 3091 if (ic->ic_channels[ix].ic_freq == 0) { 3092 ic->ic_channels[ix].ic_freq = c->channel; 3093 ic->ic_channels[ix].ic_flags = c->channelFlags; 3094 } else { 3095 /* channels overlap; e.g. 11g and 11b */ 3096 ic->ic_channels[ix].ic_flags |= c->channelFlags; 3097 } 3098 /* count valid channels */ 3099 sc->sc_nchan++; 3100 } 3101 free(chans, M_TEMP); 3102 3103 if (sc->sc_nchan < 1) { 3104 printf("%s: no valid channels for regdomain %s(%u)\n", 3105 ifp->if_xname, ieee80211_regdomain2name(ah->ah_regdomain), 3106 ah->ah_regdomain); 3107 return ENOENT; 3108 } 3109 3110 /* set an initial channel */ 3111 ic->ic_ibss_chan = &ic->ic_channels[0]; 3112 3113 return 0; 3114 } 3115 3116 int 3117 ath_rate_setup(struct ath_softc *sc, u_int mode) 3118 { 3119 struct ath_hal *ah = sc->sc_ah; 3120 struct ieee80211com *ic = &sc->sc_ic; 3121 const HAL_RATE_TABLE *rt; 3122 struct ieee80211_rateset *rs; 3123 int i, maxrates; 3124 3125 switch (mode) { 3126 case IEEE80211_MODE_11A: 3127 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_11A); 3128 break; 3129 case IEEE80211_MODE_11B: 3130 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_11B); 3131 break; 3132 case IEEE80211_MODE_11G: 3133 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_11G); 3134 break; 3135 case IEEE80211_MODE_TURBO: 3136 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_TURBO); 3137 break; 3138 default: 3139 DPRINTF(ATH_DEBUG_ANY, 3140 ("%s: invalid mode %u\n", __func__, mode)); 3141 return 0; 3142 } 3143 rt = sc->sc_rates[mode]; 3144 if (rt == NULL) 3145 return 0; 3146 if (rt->rateCount > IEEE80211_RATE_MAXSIZE) { 3147 DPRINTF(ATH_DEBUG_ANY, 3148 ("%s: rate table too small (%u > %u)\n", 3149 __func__, rt->rateCount, IEEE80211_RATE_MAXSIZE)); 3150 maxrates = IEEE80211_RATE_MAXSIZE; 3151 } else { 3152 maxrates = rt->rateCount; 3153 } 3154 rs = &ic->ic_sup_rates[mode]; 3155 for (i = 0; i < maxrates; i++) 3156 rs->rs_rates[i] = rt->info[i].dot11Rate; 3157 rs->rs_nrates = maxrates; 3158 return 1; 3159 } 3160 3161 void 3162 ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 3163 { 3164 const HAL_RATE_TABLE *rt; 3165 int i; 3166 3167 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 3168 rt = sc->sc_rates[mode]; 3169 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 3170 for (i = 0; i < rt->rateCount; i++) 3171 sc->sc_rixmap[rt->info[i].dot11Rate & IEEE80211_RATE_VAL] = i; 3172 bzero(sc->sc_hwmap, sizeof(sc->sc_hwmap)); 3173 for (i = 0; i < 32; i++) 3174 sc->sc_hwmap[i] = rt->info[rt->rateCodeToIndex[i]].dot11Rate; 3175 sc->sc_currates = rt; 3176 sc->sc_curmode = mode; 3177 } 3178 3179 void 3180 ath_rssadapt_updatenode(void *arg, struct ieee80211_node *ni) 3181 { 3182 struct ath_node *an = ATH_NODE(ni); 3183 3184 ieee80211_rssadapt_updatestats(&an->an_rssadapt); 3185 } 3186 3187 void 3188 ath_rssadapt_updatestats(void *arg) 3189 { 3190 struct ath_softc *sc = (struct ath_softc *)arg; 3191 struct ieee80211com *ic = &sc->sc_ic; 3192 3193 if (ic->ic_opmode == IEEE80211_M_STA) { 3194 ath_rssadapt_updatenode(arg, ic->ic_bss); 3195 } else { 3196 ieee80211_iterate_nodes(ic, ath_rssadapt_updatenode, arg); 3197 } 3198 3199 timeout_add_msec(&sc->sc_rssadapt_to, 100); 3200 } 3201 3202 #ifdef AR_DEBUG 3203 void 3204 ath_printrxbuf(struct ath_buf *bf, int done) 3205 { 3206 struct ath_desc *ds; 3207 int i; 3208 3209 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { 3210 printf("R%d (%p %p) %08x %08x %08x %08x %08x %08x %c\n", 3211 i, ds, (struct ath_desc *)bf->bf_daddr + i, 3212 ds->ds_link, ds->ds_data, 3213 ds->ds_ctl0, ds->ds_ctl1, 3214 ds->ds_hw[0], ds->ds_hw[1], 3215 !done ? ' ' : (ds->ds_rxstat.rs_status == 0) ? '*' : '!'); 3216 } 3217 } 3218 3219 void 3220 ath_printtxbuf(struct ath_buf *bf, int done) 3221 { 3222 struct ath_desc *ds; 3223 int i; 3224 3225 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { 3226 printf("T%d (%p %p) " 3227 "%08x %08x %08x %08x %08x %08x %08x %08x %c\n", 3228 i, ds, (struct ath_desc *)bf->bf_daddr + i, 3229 ds->ds_link, ds->ds_data, 3230 ds->ds_ctl0, ds->ds_ctl1, 3231 ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3], 3232 !done ? ' ' : (ds->ds_txstat.ts_status == 0) ? '*' : '!'); 3233 } 3234 } 3235 #endif /* AR_DEBUG */ 3236 3237 int 3238 ath_gpio_attach(struct ath_softc *sc, u_int16_t devid) 3239 { 3240 struct ath_hal *ah = sc->sc_ah; 3241 struct gpiobus_attach_args gba; 3242 int i; 3243 3244 if (ah->ah_gpio_npins < 1) 3245 return 0; 3246 3247 /* Initialize gpio pins array */ 3248 for (i = 0; i < ah->ah_gpio_npins && i < AR5K_MAX_GPIO; i++) { 3249 sc->sc_gpio_pins[i].pin_num = i; 3250 sc->sc_gpio_pins[i].pin_caps = GPIO_PIN_INPUT | 3251 GPIO_PIN_OUTPUT; 3252 3253 /* Set pin mode to input */ 3254 ath_hal_set_gpio_input(ah, i); 3255 sc->sc_gpio_pins[i].pin_flags = GPIO_PIN_INPUT; 3256 3257 /* Get pin input */ 3258 sc->sc_gpio_pins[i].pin_state = ath_hal_get_gpio(ah, i) ? 3259 GPIO_PIN_HIGH : GPIO_PIN_LOW; 3260 } 3261 3262 /* Enable GPIO-controlled software LED if available */ 3263 if ((ah->ah_version == AR5K_AR5211) || 3264 (devid == PCI_PRODUCT_ATHEROS_AR5212_IBM)) { 3265 sc->sc_softled = 1; 3266 ath_hal_set_gpio_output(ah, AR5K_SOFTLED_PIN); 3267 ath_hal_set_gpio(ah, AR5K_SOFTLED_PIN, AR5K_SOFTLED_OFF); 3268 } 3269 3270 /* Create gpio controller tag */ 3271 sc->sc_gpio_gc.gp_cookie = sc; 3272 sc->sc_gpio_gc.gp_pin_read = ath_gpio_pin_read; 3273 sc->sc_gpio_gc.gp_pin_write = ath_gpio_pin_write; 3274 sc->sc_gpio_gc.gp_pin_ctl = ath_gpio_pin_ctl; 3275 3276 gba.gba_name = "gpio"; 3277 gba.gba_gc = &sc->sc_gpio_gc; 3278 gba.gba_pins = sc->sc_gpio_pins; 3279 gba.gba_npins = ah->ah_gpio_npins; 3280 3281 #ifdef notyet 3282 #if NGPIO > 0 3283 if (config_found(&sc->sc_dev, &gba, gpiobus_print) == NULL) 3284 return (ENODEV); 3285 #endif 3286 #endif 3287 3288 return (0); 3289 } 3290 3291 int 3292 ath_gpio_pin_read(void *arg, int pin) 3293 { 3294 struct ath_softc *sc = arg; 3295 struct ath_hal *ah = sc->sc_ah; 3296 return (ath_hal_get_gpio(ah, pin) ? GPIO_PIN_HIGH : GPIO_PIN_LOW); 3297 } 3298 3299 void 3300 ath_gpio_pin_write(void *arg, int pin, int value) 3301 { 3302 struct ath_softc *sc = arg; 3303 struct ath_hal *ah = sc->sc_ah; 3304 ath_hal_set_gpio(ah, pin, value ? GPIO_PIN_HIGH : GPIO_PIN_LOW); 3305 } 3306 3307 void 3308 ath_gpio_pin_ctl(void *arg, int pin, int flags) 3309 { 3310 struct ath_softc *sc = arg; 3311 struct ath_hal *ah = sc->sc_ah; 3312 3313 if (flags & GPIO_PIN_INPUT) { 3314 ath_hal_set_gpio_input(ah, pin); 3315 } else if (flags & GPIO_PIN_OUTPUT) { 3316 ath_hal_set_gpio_output(ah, pin); 3317 } 3318 } 3319