1 /* $OpenBSD: ath.c,v 1.92 2011/04/17 20:38:10 stsp Exp $ */ 2 /* $NetBSD: ath.c,v 1.37 2004/08/18 21:59:39 dyoung Exp $ */ 3 4 /*- 5 * Copyright (c) 2002-2004 Sam Leffler, Errno Consulting 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer, 13 * without modification. 14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 15 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 16 * redistribution must be conditioned upon including a substantially 17 * similar Disclaimer requirement for further binary redistribution. 18 * 3. Neither the names of the above-listed copyright holders nor the names 19 * of any contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 26 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 27 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 28 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 31 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGES. 34 */ 35 36 /* 37 * Driver for the Atheros Wireless LAN controller. 38 * 39 * This software is derived from work of Atsushi Onoe; his contribution 40 * is greatly appreciated. It has been modified for OpenBSD to use an 41 * open source HAL instead of the original binary-only HAL. 42 */ 43 44 #include "bpfilter.h" 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/mbuf.h> 49 #include <sys/malloc.h> 50 #include <sys/lock.h> 51 #include <sys/kernel.h> 52 #include <sys/socket.h> 53 #include <sys/sockio.h> 54 #include <sys/device.h> 55 #include <sys/errno.h> 56 #include <sys/timeout.h> 57 #include <sys/gpio.h> 58 59 #include <machine/endian.h> 60 #include <machine/bus.h> 61 62 #include <net/if.h> 63 #include <net/if_dl.h> 64 #include <net/if_media.h> 65 #include <net/if_arp.h> 66 #include <net/if_llc.h> 67 #if NBPFILTER > 0 68 #include <net/bpf.h> 69 #endif 70 #ifdef INET 71 #include <netinet/in.h> 72 #include <netinet/if_ether.h> 73 #endif 74 75 #include <net80211/ieee80211_var.h> 76 #include <net80211/ieee80211_rssadapt.h> 77 78 #include <dev/pci/pcidevs.h> 79 #include <dev/gpio/gpiovar.h> 80 81 #include <dev/ic/athvar.h> 82 83 int ath_init(struct ifnet *); 84 int ath_init1(struct ath_softc *); 85 int ath_intr1(struct ath_softc *); 86 void ath_stop(struct ifnet *); 87 void ath_start(struct ifnet *); 88 void ath_reset(struct ath_softc *, int); 89 int ath_media_change(struct ifnet *); 90 void ath_watchdog(struct ifnet *); 91 int ath_ioctl(struct ifnet *, u_long, caddr_t); 92 void ath_fatal_proc(void *, int); 93 void ath_rxorn_proc(void *, int); 94 void ath_bmiss_proc(void *, int); 95 u_int ath_chan2flags(struct ieee80211com *, struct ieee80211_channel *); 96 int ath_initkeytable(struct ath_softc *); 97 void ath_mcastfilter_accum(caddr_t, u_int32_t (*)[2]); 98 void ath_mcastfilter_compute(struct ath_softc *, u_int32_t (*)[2]); 99 u_int32_t ath_calcrxfilter(struct ath_softc *); 100 void ath_mode_init(struct ath_softc *); 101 #ifndef IEEE80211_STA_ONLY 102 int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *); 103 void ath_beacon_proc(void *, int); 104 void ath_beacon_free(struct ath_softc *); 105 #endif 106 void ath_beacon_config(struct ath_softc *); 107 int ath_desc_alloc(struct ath_softc *); 108 void ath_desc_free(struct ath_softc *); 109 struct ieee80211_node *ath_node_alloc(struct ieee80211com *); 110 struct mbuf *ath_getmbuf(int, int, u_int); 111 void ath_node_free(struct ieee80211com *, struct ieee80211_node *); 112 void ath_node_copy(struct ieee80211com *, 113 struct ieee80211_node *, const struct ieee80211_node *); 114 u_int8_t ath_node_getrssi(struct ieee80211com *, 115 const struct ieee80211_node *); 116 int ath_rxbuf_init(struct ath_softc *, struct ath_buf *); 117 void ath_rx_proc(void *, int); 118 int ath_tx_start(struct ath_softc *, struct ieee80211_node *, 119 struct ath_buf *, struct mbuf *); 120 void ath_tx_proc(void *, int); 121 int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 122 void ath_draintxq(struct ath_softc *); 123 void ath_stoprecv(struct ath_softc *); 124 int ath_startrecv(struct ath_softc *); 125 void ath_next_scan(void *); 126 int ath_set_slot_time(struct ath_softc *); 127 void ath_calibrate(void *); 128 void ath_ledstate(struct ath_softc *, enum ieee80211_state); 129 int ath_newstate(struct ieee80211com *, enum ieee80211_state, int); 130 void ath_newassoc(struct ieee80211com *, 131 struct ieee80211_node *, int); 132 int ath_getchannels(struct ath_softc *, HAL_BOOL outdoor, 133 HAL_BOOL xchanmode); 134 int ath_rate_setup(struct ath_softc *sc, u_int mode); 135 void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 136 void ath_rssadapt_updatenode(void *, struct ieee80211_node *); 137 void ath_rssadapt_updatestats(void *); 138 #ifndef IEEE80211_STA_ONLY 139 void ath_recv_mgmt(struct ieee80211com *, struct mbuf *, 140 struct ieee80211_node *, struct ieee80211_rxinfo *, int); 141 #endif 142 void ath_disable(struct ath_softc *); 143 144 int ath_gpio_attach(struct ath_softc *, u_int16_t); 145 int ath_gpio_pin_read(void *, int); 146 void ath_gpio_pin_write(void *, int, int); 147 void ath_gpio_pin_ctl(void *, int, int); 148 149 #ifdef AR_DEBUG 150 void ath_printrxbuf(struct ath_buf *, int); 151 void ath_printtxbuf(struct ath_buf *, int); 152 int ath_debug = 0; 153 #endif 154 155 int ath_dwelltime = 200; /* 5 channels/second */ 156 int ath_calinterval = 30; /* calibrate every 30 secs */ 157 int ath_outdoor = AH_TRUE; /* outdoor operation */ 158 int ath_xchanmode = AH_TRUE; /* enable extended channels */ 159 int ath_softcrypto = 1; /* 1=enable software crypto */ 160 161 struct cfdriver ath_cd = { 162 NULL, "ath", DV_IFNET 163 }; 164 165 int 166 ath_activate(struct device *self, int act) 167 { 168 struct ath_softc *sc = (struct ath_softc *)self; 169 struct ifnet *ifp = &sc->sc_ic.ic_if; 170 171 switch (act) { 172 case DVACT_SUSPEND: 173 if (ifp->if_flags & IFF_RUNNING) { 174 ath_stop(ifp); 175 if (sc->sc_power != NULL) 176 (*sc->sc_power)(sc, act); 177 } 178 break; 179 case DVACT_RESUME: 180 if (ifp->if_flags & IFF_UP) { 181 ath_init(ifp); 182 if (ifp->if_flags & IFF_RUNNING) 183 ath_start(ifp); 184 } 185 break; 186 } 187 return 0; 188 } 189 190 int 191 ath_enable(struct ath_softc *sc) 192 { 193 if (ATH_IS_ENABLED(sc) == 0) { 194 if (sc->sc_enable != NULL && (*sc->sc_enable)(sc) != 0) { 195 printf("%s: device enable failed\n", 196 sc->sc_dev.dv_xname); 197 return (EIO); 198 } 199 sc->sc_flags |= ATH_ENABLED; 200 } 201 return (0); 202 } 203 204 void 205 ath_disable(struct ath_softc *sc) 206 { 207 if (!ATH_IS_ENABLED(sc)) 208 return; 209 if (sc->sc_disable != NULL) 210 (*sc->sc_disable)(sc); 211 sc->sc_flags &= ~ATH_ENABLED; 212 } 213 214 int 215 ath_attach(u_int16_t devid, struct ath_softc *sc) 216 { 217 struct ieee80211com *ic = &sc->sc_ic; 218 struct ifnet *ifp = &ic->ic_if; 219 struct ath_hal *ah; 220 HAL_STATUS status; 221 HAL_TXQ_INFO qinfo; 222 int error = 0, i; 223 224 DPRINTF(ATH_DEBUG_ANY, ("%s: devid 0x%x\n", __func__, devid)); 225 226 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 227 sc->sc_flags &= ~ATH_ATTACHED; /* make sure that it's not attached */ 228 229 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, 230 sc->sc_pcie, &status); 231 if (ah == NULL) { 232 printf("%s: unable to attach hardware; HAL status %d\n", 233 ifp->if_xname, status); 234 error = ENXIO; 235 goto bad; 236 } 237 if (ah->ah_abi != HAL_ABI_VERSION) { 238 printf("%s: HAL ABI mismatch detected (0x%x != 0x%x)\n", 239 ifp->if_xname, ah->ah_abi, HAL_ABI_VERSION); 240 error = ENXIO; 241 goto bad; 242 } 243 244 if (ah->ah_single_chip == AH_TRUE) { 245 printf("%s: AR%s %u.%u phy %u.%u rf %u.%u", ifp->if_xname, 246 ar5k_printver(AR5K_VERSION_DEV, devid), 247 ah->ah_mac_version, ah->ah_mac_revision, 248 ah->ah_phy_revision >> 4, ah->ah_phy_revision & 0xf, 249 ah->ah_radio_5ghz_revision >> 4, 250 ah->ah_radio_5ghz_revision & 0xf); 251 } else { 252 printf("%s: AR%s %u.%u phy %u.%u", ifp->if_xname, 253 ar5k_printver(AR5K_VERSION_VER, ah->ah_mac_srev), 254 ah->ah_mac_version, ah->ah_mac_revision, 255 ah->ah_phy_revision >> 4, ah->ah_phy_revision & 0xf); 256 printf(" rf%s %u.%u", 257 ar5k_printver(AR5K_VERSION_RAD, ah->ah_radio_5ghz_revision), 258 ah->ah_radio_5ghz_revision >> 4, 259 ah->ah_radio_5ghz_revision & 0xf); 260 if (ah->ah_radio_2ghz_revision != 0) { 261 printf(" rf%s %u.%u", 262 ar5k_printver(AR5K_VERSION_RAD, 263 ah->ah_radio_2ghz_revision), 264 ah->ah_radio_2ghz_revision >> 4, 265 ah->ah_radio_2ghz_revision & 0xf); 266 } 267 } 268 269 #if 0 270 if (ah->ah_radio_5ghz_revision >= AR5K_SREV_RAD_UNSUPP || 271 ah->ah_radio_2ghz_revision >= AR5K_SREV_RAD_UNSUPP) { 272 printf(": RF radio not supported\n"); 273 error = EOPNOTSUPP; 274 goto bad; 275 } 276 #endif 277 278 sc->sc_ah = ah; 279 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 280 281 /* 282 * Get regulation domain either stored in the EEPROM or defined 283 * as the default value. Some devices are known to have broken 284 * regulation domain values in their EEPROM. 285 */ 286 ath_hal_get_regdomain(ah, &ah->ah_regdomain); 287 288 /* 289 * Construct channel list based on the current regulation domain. 290 */ 291 error = ath_getchannels(sc, ath_outdoor, ath_xchanmode); 292 if (error != 0) 293 goto bad; 294 295 /* 296 * Setup rate tables for all potential media types. 297 */ 298 ath_rate_setup(sc, IEEE80211_MODE_11A); 299 ath_rate_setup(sc, IEEE80211_MODE_11B); 300 ath_rate_setup(sc, IEEE80211_MODE_11G); 301 ath_rate_setup(sc, IEEE80211_MODE_TURBO); 302 303 error = ath_desc_alloc(sc); 304 if (error != 0) { 305 printf(": failed to allocate descriptors: %d\n", error); 306 goto bad; 307 } 308 timeout_set(&sc->sc_scan_to, ath_next_scan, sc); 309 timeout_set(&sc->sc_cal_to, ath_calibrate, sc); 310 timeout_set(&sc->sc_rssadapt_to, ath_rssadapt_updatestats, sc); 311 312 #ifdef __FreeBSD__ 313 ATH_TXBUF_LOCK_INIT(sc); 314 ATH_TXQ_LOCK_INIT(sc); 315 #endif 316 317 ATH_TASK_INIT(&sc->sc_txtask, ath_tx_proc, sc); 318 ATH_TASK_INIT(&sc->sc_rxtask, ath_rx_proc, sc); 319 ATH_TASK_INIT(&sc->sc_rxorntask, ath_rxorn_proc, sc); 320 ATH_TASK_INIT(&sc->sc_fataltask, ath_fatal_proc, sc); 321 ATH_TASK_INIT(&sc->sc_bmisstask, ath_bmiss_proc, sc); 322 #ifndef IEEE80211_STA_ONLY 323 ATH_TASK_INIT(&sc->sc_swbatask, ath_beacon_proc, sc); 324 #endif 325 326 /* 327 * For now just pre-allocate one data queue and one 328 * beacon queue. Note that the HAL handles resetting 329 * them at the needed time. Eventually we'll want to 330 * allocate more tx queues for splitting management 331 * frames and for QOS support. 332 */ 333 sc->sc_bhalq = ath_hal_setup_tx_queue(ah, HAL_TX_QUEUE_BEACON, NULL); 334 if (sc->sc_bhalq == (u_int) -1) { 335 printf(": unable to setup a beacon xmit queue!\n"); 336 goto bad2; 337 } 338 339 for (i = 0; i <= HAL_TX_QUEUE_ID_DATA_MAX; i++) { 340 bzero(&qinfo, sizeof(qinfo)); 341 qinfo.tqi_type = HAL_TX_QUEUE_DATA; 342 qinfo.tqi_subtype = i; /* should be mapped to WME types */ 343 sc->sc_txhalq[i] = ath_hal_setup_tx_queue(ah, 344 HAL_TX_QUEUE_DATA, &qinfo); 345 if (sc->sc_txhalq[i] == (u_int) -1) { 346 printf(": unable to setup a data xmit queue %u!\n", i); 347 goto bad2; 348 } 349 } 350 351 ifp->if_softc = sc; 352 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST 353 | IFF_NOTRAILERS; 354 ifp->if_start = ath_start; 355 ifp->if_watchdog = ath_watchdog; 356 ifp->if_ioctl = ath_ioctl; 357 #ifndef __OpenBSD__ 358 ifp->if_stop = ath_stop; /* XXX */ 359 #endif 360 IFQ_SET_MAXLEN(&ifp->if_snd, ATH_TXBUF * ATH_TXDESC); 361 IFQ_SET_READY(&ifp->if_snd); 362 363 ic->ic_softc = sc; 364 ic->ic_newassoc = ath_newassoc; 365 /* XXX not right but it's not used anywhere important */ 366 ic->ic_phytype = IEEE80211_T_OFDM; 367 ic->ic_opmode = IEEE80211_M_STA; 368 ic->ic_caps = IEEE80211_C_WEP /* wep supported */ 369 | IEEE80211_C_PMGT /* power management */ 370 #ifndef IEEE80211_STA_ONLY 371 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 372 | IEEE80211_C_HOSTAP /* hostap mode */ 373 #endif 374 | IEEE80211_C_MONITOR /* monitor mode */ 375 | IEEE80211_C_SHSLOT /* short slot time supported */ 376 | IEEE80211_C_SHPREAMBLE; /* short preamble supported */ 377 if (ath_softcrypto) 378 ic->ic_caps |= IEEE80211_C_RSN; /* wpa/rsn supported */ 379 380 /* 381 * Not all chips have the VEOL support we want to use with 382 * IBSS beacon; check here for it. 383 */ 384 sc->sc_veol = ath_hal_has_veol(ah); 385 386 /* get mac address from hardware */ 387 ath_hal_get_lladdr(ah, ic->ic_myaddr); 388 389 if_attach(ifp); 390 391 /* call MI attach routine. */ 392 ieee80211_ifattach(ifp); 393 394 /* override default methods */ 395 ic->ic_node_alloc = ath_node_alloc; 396 sc->sc_node_free = ic->ic_node_free; 397 ic->ic_node_free = ath_node_free; 398 sc->sc_node_copy = ic->ic_node_copy; 399 ic->ic_node_copy = ath_node_copy; 400 ic->ic_node_getrssi = ath_node_getrssi; 401 sc->sc_newstate = ic->ic_newstate; 402 ic->ic_newstate = ath_newstate; 403 #ifndef IEEE80211_STA_ONLY 404 sc->sc_recv_mgmt = ic->ic_recv_mgmt; 405 ic->ic_recv_mgmt = ath_recv_mgmt; 406 #endif 407 ic->ic_max_rssi = AR5K_MAX_RSSI; 408 bcopy(etherbroadcastaddr, sc->sc_broadcast_addr, IEEE80211_ADDR_LEN); 409 410 /* complete initialization */ 411 ieee80211_media_init(ifp, ath_media_change, ieee80211_media_status); 412 413 #if NBPFILTER > 0 414 bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO, 415 sizeof(struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN); 416 417 sc->sc_rxtap_len = sizeof(sc->sc_rxtapu); 418 bzero(&sc->sc_rxtapu, sc->sc_rxtap_len); 419 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); 420 sc->sc_rxtap.wr_ihdr.it_present = htole32(ATH_RX_RADIOTAP_PRESENT); 421 422 sc->sc_txtap_len = sizeof(sc->sc_txtapu); 423 bzero(&sc->sc_txtapu, sc->sc_txtap_len); 424 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); 425 sc->sc_txtap.wt_ihdr.it_present = htole32(ATH_TX_RADIOTAP_PRESENT); 426 #endif 427 428 sc->sc_flags |= ATH_ATTACHED; 429 430 /* 431 * Print regulation domain and the mac address. The regulation domain 432 * will be marked with a * if the EEPROM value has been overwritten. 433 */ 434 printf(", %s%s, address %s\n", 435 ieee80211_regdomain2name(ah->ah_regdomain), 436 ah->ah_regdomain != ah->ah_regdomain_hw ? "*" : "", 437 ether_sprintf(ic->ic_myaddr)); 438 439 if (ath_gpio_attach(sc, devid) == 0) 440 sc->sc_flags |= ATH_GPIO; 441 442 return 0; 443 bad2: 444 ath_desc_free(sc); 445 bad: 446 if (ah) 447 ath_hal_detach(ah); 448 sc->sc_invalid = 1; 449 return error; 450 } 451 452 int 453 ath_detach(struct ath_softc *sc, int flags) 454 { 455 struct ifnet *ifp = &sc->sc_ic.ic_if; 456 int s; 457 458 if ((sc->sc_flags & ATH_ATTACHED) == 0) 459 return (0); 460 461 config_detach_children(&sc->sc_dev, flags); 462 463 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags %x\n", __func__, ifp->if_flags)); 464 465 timeout_del(&sc->sc_scan_to); 466 timeout_del(&sc->sc_cal_to); 467 timeout_del(&sc->sc_rssadapt_to); 468 469 s = splnet(); 470 ath_stop(ifp); 471 ath_desc_free(sc); 472 ath_hal_detach(sc->sc_ah); 473 474 ieee80211_ifdetach(ifp); 475 if_detach(ifp); 476 477 splx(s); 478 #ifdef __FreeBSD__ 479 ATH_TXBUF_LOCK_DESTROY(sc); 480 ATH_TXQ_LOCK_DESTROY(sc); 481 #endif 482 483 return 0; 484 } 485 486 int 487 ath_intr(void *arg) 488 { 489 return ath_intr1((struct ath_softc *)arg); 490 } 491 492 int 493 ath_intr1(struct ath_softc *sc) 494 { 495 struct ieee80211com *ic = &sc->sc_ic; 496 struct ifnet *ifp = &ic->ic_if; 497 struct ath_hal *ah = sc->sc_ah; 498 HAL_INT status; 499 500 if (sc->sc_invalid) { 501 /* 502 * The hardware is not ready/present, don't touch anything. 503 * Note this can happen early on if the IRQ is shared. 504 */ 505 DPRINTF(ATH_DEBUG_ANY, ("%s: invalid; ignored\n", __func__)); 506 return 0; 507 } 508 if (!ath_hal_is_intr_pending(ah)) /* shared irq, not for us */ 509 return 0; 510 if ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) != (IFF_RUNNING|IFF_UP)) { 511 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags 0x%x\n", 512 __func__, ifp->if_flags)); 513 ath_hal_get_isr(ah, &status); /* clear ISR */ 514 ath_hal_set_intr(ah, 0); /* disable further intr's */ 515 return 1; /* XXX */ 516 } 517 ath_hal_get_isr(ah, &status); /* NB: clears ISR too */ 518 DPRINTF(ATH_DEBUG_INTR, ("%s: status 0x%x\n", __func__, status)); 519 status &= sc->sc_imask; /* discard unasked for bits */ 520 if (status & HAL_INT_FATAL) { 521 sc->sc_stats.ast_hardware++; 522 ath_hal_set_intr(ah, 0); /* disable intr's until reset */ 523 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_fataltask); 524 } else if (status & HAL_INT_RXORN) { 525 sc->sc_stats.ast_rxorn++; 526 ath_hal_set_intr(ah, 0); /* disable intr's until reset */ 527 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_rxorntask); 528 } else if (status & HAL_INT_MIB) { 529 DPRINTF(ATH_DEBUG_INTR, 530 ("%s: resetting MIB counters\n", __func__)); 531 sc->sc_stats.ast_mib++; 532 ath_hal_update_mib_counters(ah, &sc->sc_mib_stats); 533 } else { 534 if (status & HAL_INT_RXEOL) { 535 /* 536 * NB: the hardware should re-read the link when 537 * RXE bit is written, but it doesn't work at 538 * least on older hardware revs. 539 */ 540 sc->sc_stats.ast_rxeol++; 541 sc->sc_rxlink = NULL; 542 } 543 if (status & HAL_INT_TXURN) { 544 sc->sc_stats.ast_txurn++; 545 /* bump tx trigger level */ 546 ath_hal_update_tx_triglevel(ah, AH_TRUE); 547 } 548 if (status & HAL_INT_RX) 549 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_rxtask); 550 if (status & HAL_INT_TX) 551 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_txtask); 552 if (status & HAL_INT_SWBA) 553 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_swbatask); 554 if (status & HAL_INT_BMISS) { 555 sc->sc_stats.ast_bmiss++; 556 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_bmisstask); 557 } 558 } 559 return 1; 560 } 561 562 void 563 ath_fatal_proc(void *arg, int pending) 564 { 565 struct ath_softc *sc = arg; 566 struct ieee80211com *ic = &sc->sc_ic; 567 struct ifnet *ifp = &ic->ic_if; 568 569 if (ifp->if_flags & IFF_DEBUG) 570 printf("%s: hardware error; resetting\n", ifp->if_xname); 571 ath_reset(sc, 1); 572 } 573 574 void 575 ath_rxorn_proc(void *arg, int pending) 576 { 577 struct ath_softc *sc = arg; 578 struct ieee80211com *ic = &sc->sc_ic; 579 struct ifnet *ifp = &ic->ic_if; 580 581 if (ifp->if_flags & IFF_DEBUG) 582 printf("%s: rx FIFO overrun; resetting\n", ifp->if_xname); 583 ath_reset(sc, 1); 584 } 585 586 void 587 ath_bmiss_proc(void *arg, int pending) 588 { 589 struct ath_softc *sc = arg; 590 struct ieee80211com *ic = &sc->sc_ic; 591 592 DPRINTF(ATH_DEBUG_ANY, ("%s: pending %u\n", __func__, pending)); 593 if (ic->ic_opmode != IEEE80211_M_STA) 594 return; 595 if (ic->ic_state == IEEE80211_S_RUN) { 596 /* 597 * Rather than go directly to scan state, try to 598 * reassociate first. If that fails then the state 599 * machine will drop us into scanning after timing 600 * out waiting for a probe response. 601 */ 602 ieee80211_new_state(ic, IEEE80211_S_ASSOC, -1); 603 } 604 } 605 606 u_int 607 ath_chan2flags(struct ieee80211com *ic, struct ieee80211_channel *chan) 608 { 609 enum ieee80211_phymode mode = ieee80211_chan2mode(ic, chan); 610 611 switch (mode) { 612 case IEEE80211_MODE_AUTO: 613 return 0; 614 case IEEE80211_MODE_11A: 615 return CHANNEL_A; 616 case IEEE80211_MODE_11B: 617 return CHANNEL_B; 618 case IEEE80211_MODE_11G: 619 return CHANNEL_G; 620 case IEEE80211_MODE_TURBO: 621 return CHANNEL_T; 622 default: 623 panic("%s: unsupported mode %d", __func__, mode); 624 return 0; 625 } 626 } 627 628 int 629 ath_init(struct ifnet *ifp) 630 { 631 return ath_init1((struct ath_softc *)ifp->if_softc); 632 } 633 634 int 635 ath_init1(struct ath_softc *sc) 636 { 637 struct ieee80211com *ic = &sc->sc_ic; 638 struct ifnet *ifp = &ic->ic_if; 639 struct ieee80211_node *ni; 640 enum ieee80211_phymode mode; 641 struct ath_hal *ah = sc->sc_ah; 642 HAL_STATUS status; 643 HAL_CHANNEL hchan; 644 int error = 0, s; 645 646 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags 0x%x\n", 647 __func__, ifp->if_flags)); 648 649 if ((error = ath_enable(sc)) != 0) 650 return error; 651 652 s = splnet(); 653 /* 654 * Stop anything previously setup. This is safe 655 * whether this is the first time through or not. 656 */ 657 ath_stop(ifp); 658 659 /* 660 * Reset the link layer address to the latest value. 661 */ 662 IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl)); 663 ath_hal_set_lladdr(ah, ic->ic_myaddr); 664 665 /* 666 * The basic interface to setting the hardware in a good 667 * state is ``reset''. On return the hardware is known to 668 * be powered up and with interrupts disabled. This must 669 * be followed by initialization of the appropriate bits 670 * and then setup of the interrupt mask. 671 */ 672 hchan.channel = ic->ic_ibss_chan->ic_freq; 673 hchan.channelFlags = ath_chan2flags(ic, ic->ic_ibss_chan); 674 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, AH_TRUE, &status)) { 675 printf("%s: unable to reset hardware; hal status %u\n", 676 ifp->if_xname, status); 677 error = EIO; 678 goto done; 679 } 680 ath_set_slot_time(sc); 681 682 if ((error = ath_initkeytable(sc)) != 0) { 683 printf("%s: unable to reset the key cache\n", 684 ifp->if_xname); 685 goto done; 686 } 687 688 if ((error = ath_startrecv(sc)) != 0) { 689 printf("%s: unable to start recv logic\n", ifp->if_xname); 690 goto done; 691 } 692 693 /* 694 * Enable interrupts. 695 */ 696 sc->sc_imask = HAL_INT_RX | HAL_INT_TX 697 | HAL_INT_RXEOL | HAL_INT_RXORN 698 | HAL_INT_FATAL | HAL_INT_GLOBAL; 699 #ifndef IEEE80211_STA_ONLY 700 if (ic->ic_opmode == IEEE80211_M_HOSTAP) 701 sc->sc_imask |= HAL_INT_MIB; 702 #endif 703 ath_hal_set_intr(ah, sc->sc_imask); 704 705 ifp->if_flags |= IFF_RUNNING; 706 ic->ic_state = IEEE80211_S_INIT; 707 708 /* 709 * The hardware should be ready to go now so it's safe 710 * to kick the 802.11 state machine as it's likely to 711 * immediately call back to us to send mgmt frames. 712 */ 713 ni = ic->ic_bss; 714 ni->ni_chan = ic->ic_ibss_chan; 715 mode = ieee80211_chan2mode(ic, ni->ni_chan); 716 if (mode != sc->sc_curmode) 717 ath_setcurmode(sc, mode); 718 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 719 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1); 720 } else { 721 ieee80211_new_state(ic, IEEE80211_S_RUN, -1); 722 } 723 done: 724 splx(s); 725 return error; 726 } 727 728 void 729 ath_stop(struct ifnet *ifp) 730 { 731 struct ieee80211com *ic = (struct ieee80211com *) ifp; 732 struct ath_softc *sc = ifp->if_softc; 733 struct ath_hal *ah = sc->sc_ah; 734 int s; 735 736 DPRINTF(ATH_DEBUG_ANY, ("%s: invalid %u if_flags 0x%x\n", 737 __func__, sc->sc_invalid, ifp->if_flags)); 738 739 s = splnet(); 740 if (ifp->if_flags & IFF_RUNNING) { 741 /* 742 * Shutdown the hardware and driver: 743 * disable interrupts 744 * turn off timers 745 * clear transmit machinery 746 * clear receive machinery 747 * drain and release tx queues 748 * reclaim beacon resources 749 * reset 802.11 state machine 750 * power down hardware 751 * 752 * Note that some of this work is not possible if the 753 * hardware is gone (invalid). 754 */ 755 ifp->if_flags &= ~IFF_RUNNING; 756 ifp->if_timer = 0; 757 if (!sc->sc_invalid) 758 ath_hal_set_intr(ah, 0); 759 ath_draintxq(sc); 760 if (!sc->sc_invalid) { 761 ath_stoprecv(sc); 762 } else { 763 sc->sc_rxlink = NULL; 764 } 765 IFQ_PURGE(&ifp->if_snd); 766 #ifndef IEEE80211_STA_ONLY 767 ath_beacon_free(sc); 768 #endif 769 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 770 if (!sc->sc_invalid) { 771 ath_hal_set_power(ah, HAL_PM_FULL_SLEEP, 0); 772 } 773 ath_disable(sc); 774 } 775 splx(s); 776 } 777 778 /* 779 * Reset the hardware w/o losing operational state. This is 780 * basically a more efficient way of doing ath_stop, ath_init, 781 * followed by state transitions to the current 802.11 782 * operational state. Used to recover from errors rx overrun 783 * and to reset the hardware when rf gain settings must be reset. 784 */ 785 void 786 ath_reset(struct ath_softc *sc, int full) 787 { 788 struct ieee80211com *ic = &sc->sc_ic; 789 struct ifnet *ifp = &ic->ic_if; 790 struct ath_hal *ah = sc->sc_ah; 791 struct ieee80211_channel *c; 792 HAL_STATUS status; 793 HAL_CHANNEL hchan; 794 795 /* 796 * Convert to a HAL channel description with the flags 797 * constrained to reflect the current operating mode. 798 */ 799 c = ic->ic_ibss_chan; 800 hchan.channel = c->ic_freq; 801 hchan.channelFlags = ath_chan2flags(ic, c); 802 803 ath_hal_set_intr(ah, 0); /* disable interrupts */ 804 ath_draintxq(sc); /* stop xmit side */ 805 ath_stoprecv(sc); /* stop recv side */ 806 /* NB: indicate channel change so we do a full reset */ 807 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, 808 full ? AH_TRUE : AH_FALSE, &status)) { 809 printf("%s: %s: unable to reset hardware; hal status %u\n", 810 ifp->if_xname, __func__, status); 811 } 812 ath_set_slot_time(sc); 813 /* In case channel changed, save as a node channel */ 814 ic->ic_bss->ni_chan = ic->ic_ibss_chan; 815 ath_hal_set_intr(ah, sc->sc_imask); 816 if (ath_startrecv(sc) != 0) /* restart recv */ 817 printf("%s: %s: unable to start recv logic\n", ifp->if_xname, 818 __func__); 819 ath_start(ifp); /* restart xmit */ 820 if (ic->ic_state == IEEE80211_S_RUN) 821 ath_beacon_config(sc); /* restart beacons */ 822 } 823 824 void 825 ath_start(struct ifnet *ifp) 826 { 827 struct ath_softc *sc = ifp->if_softc; 828 struct ath_hal *ah = sc->sc_ah; 829 struct ieee80211com *ic = &sc->sc_ic; 830 struct ieee80211_node *ni; 831 struct ath_buf *bf; 832 struct mbuf *m; 833 struct ieee80211_frame *wh; 834 int s; 835 836 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING || 837 sc->sc_invalid) 838 return; 839 for (;;) { 840 /* 841 * Grab a TX buffer and associated resources. 842 */ 843 s = splnet(); 844 bf = TAILQ_FIRST(&sc->sc_txbuf); 845 if (bf != NULL) 846 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list); 847 splx(s); 848 if (bf == NULL) { 849 DPRINTF(ATH_DEBUG_ANY, ("%s: out of xmit buffers\n", 850 __func__)); 851 sc->sc_stats.ast_tx_qstop++; 852 ifp->if_flags |= IFF_OACTIVE; 853 break; 854 } 855 /* 856 * Poll the management queue for frames; they 857 * have priority over normal data frames. 858 */ 859 IF_DEQUEUE(&ic->ic_mgtq, m); 860 if (m == NULL) { 861 /* 862 * No data frames go out unless we're associated. 863 */ 864 if (ic->ic_state != IEEE80211_S_RUN) { 865 DPRINTF(ATH_DEBUG_ANY, 866 ("%s: ignore data packet, state %u\n", 867 __func__, ic->ic_state)); 868 sc->sc_stats.ast_tx_discard++; 869 s = splnet(); 870 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 871 splx(s); 872 break; 873 } 874 IFQ_DEQUEUE(&ifp->if_snd, m); 875 if (m == NULL) { 876 s = splnet(); 877 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 878 splx(s); 879 break; 880 } 881 ifp->if_opackets++; 882 883 #if NBPFILTER > 0 884 if (ifp->if_bpf) 885 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 886 #endif 887 888 /* 889 * Encapsulate the packet in prep for transmission. 890 */ 891 m = ieee80211_encap(ifp, m, &ni); 892 if (m == NULL) { 893 DPRINTF(ATH_DEBUG_ANY, 894 ("%s: encapsulation failure\n", 895 __func__)); 896 sc->sc_stats.ast_tx_encap++; 897 goto bad; 898 } 899 wh = mtod(m, struct ieee80211_frame *); 900 } else { 901 /* 902 * Hack! The referenced node pointer is in the 903 * rcvif field of the packet header. This is 904 * placed there by ieee80211_mgmt_output because 905 * we need to hold the reference with the frame 906 * and there's no other way (other than packet 907 * tags which we consider too expensive to use) 908 * to pass it along. 909 */ 910 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 911 m->m_pkthdr.rcvif = NULL; 912 913 wh = mtod(m, struct ieee80211_frame *); 914 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == 915 IEEE80211_FC0_SUBTYPE_PROBE_RESP) { 916 /* fill time stamp */ 917 u_int64_t tsf; 918 u_int32_t *tstamp; 919 920 tsf = ath_hal_get_tsf64(ah); 921 /* XXX: adjust 100us delay to xmit */ 922 tsf += 100; 923 tstamp = (u_int32_t *)&wh[1]; 924 tstamp[0] = htole32(tsf & 0xffffffff); 925 tstamp[1] = htole32(tsf >> 32); 926 } 927 sc->sc_stats.ast_tx_mgmt++; 928 } 929 930 if (ath_tx_start(sc, ni, bf, m)) { 931 bad: 932 s = splnet(); 933 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 934 splx(s); 935 ifp->if_oerrors++; 936 if (ni != NULL) 937 ieee80211_release_node(ic, ni); 938 continue; 939 } 940 941 sc->sc_tx_timer = 5; 942 ifp->if_timer = 1; 943 } 944 } 945 946 int 947 ath_media_change(struct ifnet *ifp) 948 { 949 int error; 950 951 error = ieee80211_media_change(ifp); 952 if (error == ENETRESET) { 953 if ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) == 954 (IFF_RUNNING|IFF_UP)) 955 ath_init(ifp); /* XXX lose error */ 956 error = 0; 957 } 958 return error; 959 } 960 961 void 962 ath_watchdog(struct ifnet *ifp) 963 { 964 struct ath_softc *sc = ifp->if_softc; 965 966 ifp->if_timer = 0; 967 if ((ifp->if_flags & IFF_RUNNING) == 0 || sc->sc_invalid) 968 return; 969 if (sc->sc_tx_timer) { 970 if (--sc->sc_tx_timer == 0) { 971 printf("%s: device timeout\n", ifp->if_xname); 972 ath_reset(sc, 1); 973 ifp->if_oerrors++; 974 sc->sc_stats.ast_watchdog++; 975 return; 976 } 977 ifp->if_timer = 1; 978 } 979 980 ieee80211_watchdog(ifp); 981 } 982 983 int 984 ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 985 { 986 struct ath_softc *sc = ifp->if_softc; 987 struct ieee80211com *ic = &sc->sc_ic; 988 struct ifreq *ifr = (struct ifreq *)data; 989 struct ifaddr *ifa = (struct ifaddr *)data; 990 int error = 0, s; 991 992 s = splnet(); 993 switch (cmd) { 994 case SIOCSIFADDR: 995 ifp->if_flags |= IFF_UP; 996 #ifdef INET 997 if (ifa->ifa_addr->sa_family == AF_INET) { 998 arp_ifinit(&ic->ic_ac, ifa); 999 } 1000 #endif /* INET */ 1001 /* FALLTHROUGH */ 1002 case SIOCSIFFLAGS: 1003 if (ifp->if_flags & IFF_UP) { 1004 if (ifp->if_flags & IFF_RUNNING) { 1005 /* 1006 * To avoid rescanning another access point, 1007 * do not call ath_init() here. Instead, 1008 * only reflect promisc mode settings. 1009 */ 1010 ath_mode_init(sc); 1011 } else { 1012 /* 1013 * Beware of being called during detach to 1014 * reset promiscuous mode. In that case we 1015 * will still be marked UP but not RUNNING. 1016 * However trying to re-init the interface 1017 * is the wrong thing to do as we've already 1018 * torn down much of our state. There's 1019 * probably a better way to deal with this. 1020 */ 1021 if (!sc->sc_invalid) 1022 ath_init(ifp); /* XXX lose error */ 1023 } 1024 } else 1025 ath_stop(ifp); 1026 break; 1027 case SIOCADDMULTI: 1028 case SIOCDELMULTI: 1029 #ifdef __FreeBSD__ 1030 /* 1031 * The upper layer has already installed/removed 1032 * the multicast address(es), just recalculate the 1033 * multicast filter for the card. 1034 */ 1035 if (ifp->if_flags & IFF_RUNNING) 1036 ath_mode_init(sc); 1037 #endif 1038 error = (cmd == SIOCADDMULTI) ? 1039 ether_addmulti(ifr, &sc->sc_ic.ic_ac) : 1040 ether_delmulti(ifr, &sc->sc_ic.ic_ac); 1041 if (error == ENETRESET) { 1042 if (ifp->if_flags & IFF_RUNNING) 1043 ath_mode_init(sc); 1044 error = 0; 1045 } 1046 break; 1047 case SIOCGATHSTATS: 1048 error = copyout(&sc->sc_stats, 1049 ifr->ifr_data, sizeof (sc->sc_stats)); 1050 break; 1051 default: 1052 error = ieee80211_ioctl(ifp, cmd, data); 1053 if (error == ENETRESET) { 1054 if ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) == 1055 (IFF_RUNNING|IFF_UP)) { 1056 if (ic->ic_opmode != IEEE80211_M_MONITOR) 1057 ath_init(ifp); /* XXX lose error */ 1058 else 1059 ath_reset(sc, 1); 1060 } 1061 error = 0; 1062 } 1063 break; 1064 } 1065 splx(s); 1066 return error; 1067 } 1068 1069 /* 1070 * Fill the hardware key cache with key entries. 1071 */ 1072 int 1073 ath_initkeytable(struct ath_softc *sc) 1074 { 1075 struct ieee80211com *ic = &sc->sc_ic; 1076 struct ath_hal *ah = sc->sc_ah; 1077 int i; 1078 1079 if (ath_softcrypto) { 1080 /* 1081 * Disable the hardware crypto engine and reset the key cache 1082 * to allow software crypto operation for WEP/RSN/WPA2 1083 */ 1084 if (ic->ic_flags & (IEEE80211_F_WEPON|IEEE80211_F_RSNON)) 1085 (void)ath_hal_softcrypto(ah, AH_TRUE); 1086 else 1087 (void)ath_hal_softcrypto(ah, AH_FALSE); 1088 return (0); 1089 } 1090 1091 /* WEP is disabled, we only support WEP in hardware yet */ 1092 if ((ic->ic_flags & IEEE80211_F_WEPON) == 0) 1093 return (0); 1094 1095 /* 1096 * Setup the hardware after reset: the key cache is filled as 1097 * needed and the receive engine is set going. Frame transmit 1098 * is handled entirely in the frame output path; there's nothing 1099 * to do here except setup the interrupt mask. 1100 */ 1101 1102 /* XXX maybe should reset all keys when !WEPON */ 1103 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 1104 struct ieee80211_key *k = &ic->ic_nw_keys[i]; 1105 if (k->k_len == 0) 1106 ath_hal_reset_key(ah, i); 1107 else { 1108 HAL_KEYVAL hk; 1109 1110 bzero(&hk, sizeof(hk)); 1111 /* 1112 * Pad the key to a supported key length. It 1113 * is always a good idea to use full-length 1114 * keys without padded zeros but this seems 1115 * to be the default behaviour used by many 1116 * implementations. 1117 */ 1118 if (k->k_cipher == IEEE80211_CIPHER_WEP40) 1119 hk.wk_len = AR5K_KEYVAL_LENGTH_40; 1120 else if (k->k_cipher == IEEE80211_CIPHER_WEP104) 1121 hk.wk_len = AR5K_KEYVAL_LENGTH_104; 1122 else 1123 return (EINVAL); 1124 bcopy(k->k_key, hk.wk_key, hk.wk_len); 1125 1126 if (ath_hal_set_key(ah, i, &hk) != AH_TRUE) 1127 return (EINVAL); 1128 } 1129 } 1130 1131 return (0); 1132 } 1133 1134 void 1135 ath_mcastfilter_accum(caddr_t dl, u_int32_t (*mfilt)[2]) 1136 { 1137 u_int32_t val; 1138 u_int8_t pos; 1139 1140 val = LE_READ_4(dl + 0); 1141 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 1142 val = LE_READ_4(dl + 3); 1143 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 1144 pos &= 0x3f; 1145 (*mfilt)[pos / 32] |= (1 << (pos % 32)); 1146 } 1147 1148 void 1149 ath_mcastfilter_compute(struct ath_softc *sc, u_int32_t (*mfilt)[2]) 1150 { 1151 struct ifnet *ifp = &sc->sc_ic.ic_if; 1152 struct ether_multi *enm; 1153 struct ether_multistep estep; 1154 1155 ETHER_FIRST_MULTI(estep, &sc->sc_ic.ic_ac, enm); 1156 while (enm != NULL) { 1157 /* XXX Punt on ranges. */ 1158 if (!IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) { 1159 (*mfilt)[0] = (*mfilt)[1] = ~((u_int32_t)0); 1160 ifp->if_flags |= IFF_ALLMULTI; 1161 return; 1162 } 1163 ath_mcastfilter_accum(enm->enm_addrlo, mfilt); 1164 ETHER_NEXT_MULTI(estep, enm); 1165 } 1166 ifp->if_flags &= ~IFF_ALLMULTI; 1167 } 1168 1169 /* 1170 * Calculate the receive filter according to the 1171 * operating mode and state: 1172 * 1173 * o always accept unicast, broadcast, and multicast traffic 1174 * o maintain current state of phy error reception 1175 * o probe request frames are accepted only when operating in 1176 * hostap, adhoc, or monitor modes 1177 * o enable promiscuous mode according to the interface state 1178 * o accept beacons: 1179 * - when operating in adhoc mode so the 802.11 layer creates 1180 * node table entries for peers, 1181 * - when operating in station mode for collecting rssi data when 1182 * the station is otherwise quiet, or 1183 * - when scanning 1184 */ 1185 u_int32_t 1186 ath_calcrxfilter(struct ath_softc *sc) 1187 { 1188 struct ieee80211com *ic = &sc->sc_ic; 1189 struct ath_hal *ah = sc->sc_ah; 1190 struct ifnet *ifp = &ic->ic_if; 1191 u_int32_t rfilt; 1192 1193 rfilt = (ath_hal_get_rx_filter(ah) & HAL_RX_FILTER_PHYERR) 1194 | HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST; 1195 if (ic->ic_opmode != IEEE80211_M_STA) 1196 rfilt |= HAL_RX_FILTER_PROBEREQ; 1197 #ifndef IEEE80211_STA_ONLY 1198 if (ic->ic_opmode != IEEE80211_M_AHDEMO) 1199 #endif 1200 rfilt |= HAL_RX_FILTER_BEACON; 1201 if (ifp->if_flags & IFF_PROMISC) 1202 rfilt |= HAL_RX_FILTER_PROM; 1203 return rfilt; 1204 } 1205 1206 void 1207 ath_mode_init(struct ath_softc *sc) 1208 { 1209 struct ath_hal *ah = sc->sc_ah; 1210 u_int32_t rfilt, mfilt[2]; 1211 1212 /* configure rx filter */ 1213 rfilt = ath_calcrxfilter(sc); 1214 ath_hal_set_rx_filter(ah, rfilt); 1215 1216 /* configure operational mode */ 1217 ath_hal_set_opmode(ah); 1218 1219 /* calculate and install multicast filter */ 1220 mfilt[0] = mfilt[1] = 0; 1221 ath_mcastfilter_compute(sc, &mfilt); 1222 ath_hal_set_mcast_filter(ah, mfilt[0], mfilt[1]); 1223 DPRINTF(ATH_DEBUG_MODE, ("%s: RX filter 0x%x, MC filter %08x:%08x\n", 1224 __func__, rfilt, mfilt[0], mfilt[1])); 1225 } 1226 1227 struct mbuf * 1228 ath_getmbuf(int flags, int type, u_int pktlen) 1229 { 1230 struct mbuf *m; 1231 1232 KASSERT(pktlen <= MCLBYTES, ("802.11 packet too large: %u", pktlen)); 1233 #ifdef __FreeBSD__ 1234 if (pktlen <= MHLEN) { 1235 MGETHDR(m, flags, type); 1236 } else { 1237 m = m_getcl(flags, type, M_PKTHDR); 1238 } 1239 #else 1240 MGETHDR(m, flags, type); 1241 if (m != NULL && pktlen > MHLEN) { 1242 MCLGET(m, flags); 1243 if ((m->m_flags & M_EXT) == 0) { 1244 m_free(m); 1245 m = NULL; 1246 } 1247 } 1248 #endif 1249 return m; 1250 } 1251 1252 #ifndef IEEE80211_STA_ONLY 1253 int 1254 ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni) 1255 { 1256 struct ieee80211com *ic = &sc->sc_ic; 1257 struct ath_hal *ah = sc->sc_ah; 1258 struct ath_buf *bf; 1259 struct ath_desc *ds; 1260 struct mbuf *m; 1261 int error; 1262 u_int8_t rate; 1263 const HAL_RATE_TABLE *rt; 1264 u_int flags = 0; 1265 1266 bf = sc->sc_bcbuf; 1267 if (bf->bf_m != NULL) { 1268 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1269 m_freem(bf->bf_m); 1270 bf->bf_m = NULL; 1271 bf->bf_node = NULL; 1272 } 1273 /* 1274 * NB: the beacon data buffer must be 32-bit aligned; 1275 * we assume the mbuf routines will return us something 1276 * with this alignment (perhaps should assert). 1277 */ 1278 m = ieee80211_beacon_alloc(ic, ni); 1279 if (m == NULL) { 1280 DPRINTF(ATH_DEBUG_BEACON, ("%s: cannot get mbuf/cluster\n", 1281 __func__)); 1282 sc->sc_stats.ast_be_nombuf++; 1283 return ENOMEM; 1284 } 1285 1286 DPRINTF(ATH_DEBUG_BEACON, ("%s: m %p len %u\n", __func__, m, m->m_len)); 1287 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m, 1288 BUS_DMA_NOWAIT); 1289 if (error != 0) { 1290 m_freem(m); 1291 return error; 1292 } 1293 KASSERT(bf->bf_nseg == 1, 1294 ("%s: multi-segment packet; nseg %u", __func__, bf->bf_nseg)); 1295 bf->bf_m = m; 1296 1297 /* setup descriptors */ 1298 ds = bf->bf_desc; 1299 bzero(ds, sizeof(struct ath_desc)); 1300 1301 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_veol) { 1302 ds->ds_link = bf->bf_daddr; /* link to self */ 1303 flags |= HAL_TXDESC_VEOL; 1304 } else { 1305 ds->ds_link = 0; 1306 } 1307 ds->ds_data = bf->bf_segs[0].ds_addr; 1308 1309 DPRINTF(ATH_DEBUG_ANY, ("%s: segaddr %p seglen %u\n", __func__, 1310 (caddr_t)bf->bf_segs[0].ds_addr, (u_int)bf->bf_segs[0].ds_len)); 1311 1312 /* 1313 * Calculate rate code. 1314 * XXX everything at min xmit rate 1315 */ 1316 rt = sc->sc_currates; 1317 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 1318 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) { 1319 rate = rt->info[0].rateCode | rt->info[0].shortPreamble; 1320 } else { 1321 rate = rt->info[0].rateCode; 1322 } 1323 1324 flags = HAL_TXDESC_NOACK; 1325 if (ic->ic_opmode == IEEE80211_M_IBSS) 1326 flags |= HAL_TXDESC_VEOL; 1327 1328 if (!ath_hal_setup_tx_desc(ah, ds 1329 , m->m_pkthdr.len + IEEE80211_CRC_LEN /* packet length */ 1330 , sizeof(struct ieee80211_frame) /* header length */ 1331 , HAL_PKT_TYPE_BEACON /* Atheros packet type */ 1332 , 60 /* txpower XXX */ 1333 , rate, 1 /* series 0 rate/tries */ 1334 , HAL_TXKEYIX_INVALID /* no encryption */ 1335 , 0 /* antenna mode */ 1336 , flags /* no ack for beacons */ 1337 , 0 /* rts/cts rate */ 1338 , 0 /* rts/cts duration */ 1339 )) { 1340 printf("%s: ath_hal_setup_tx_desc failed\n", __func__); 1341 return -1; 1342 } 1343 /* NB: beacon's BufLen must be a multiple of 4 bytes */ 1344 /* XXX verify mbuf data area covers this roundup */ 1345 if (!ath_hal_fill_tx_desc(ah, ds 1346 , roundup(bf->bf_segs[0].ds_len, 4) /* buffer length */ 1347 , AH_TRUE /* first segment */ 1348 , AH_TRUE /* last segment */ 1349 )) { 1350 printf("%s: ath_hal_fill_tx_desc failed\n", __func__); 1351 return -1; 1352 } 1353 1354 /* XXX it is not appropriate to bus_dmamap_sync? -dcy */ 1355 1356 return 0; 1357 } 1358 1359 void 1360 ath_beacon_proc(void *arg, int pending) 1361 { 1362 struct ath_softc *sc = arg; 1363 struct ieee80211com *ic = &sc->sc_ic; 1364 struct ath_buf *bf = sc->sc_bcbuf; 1365 struct ath_hal *ah = sc->sc_ah; 1366 1367 DPRINTF(ATH_DEBUG_BEACON_PROC, ("%s: pending %u\n", __func__, pending)); 1368 if (ic->ic_opmode == IEEE80211_M_STA || 1369 bf == NULL || bf->bf_m == NULL) { 1370 DPRINTF(ATH_DEBUG_ANY, ("%s: ic_flags=%x bf=%p bf_m=%p\n", 1371 __func__, ic->ic_flags, bf, bf ? bf->bf_m : NULL)); 1372 return; 1373 } 1374 /* TODO: update beacon to reflect PS poll state */ 1375 if (!ath_hal_stop_tx_dma(ah, sc->sc_bhalq)) { 1376 DPRINTF(ATH_DEBUG_ANY, ("%s: beacon queue %u did not stop?\n", 1377 __func__, sc->sc_bhalq)); 1378 } 1379 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 1380 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1381 1382 ath_hal_put_tx_buf(ah, sc->sc_bhalq, bf->bf_daddr); 1383 ath_hal_tx_start(ah, sc->sc_bhalq); 1384 DPRINTF(ATH_DEBUG_BEACON_PROC, 1385 ("%s: TXDP%u = %p (%p)\n", __func__, 1386 sc->sc_bhalq, (caddr_t)bf->bf_daddr, bf->bf_desc)); 1387 } 1388 1389 void 1390 ath_beacon_free(struct ath_softc *sc) 1391 { 1392 struct ath_buf *bf = sc->sc_bcbuf; 1393 1394 if (bf->bf_m != NULL) { 1395 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1396 m_freem(bf->bf_m); 1397 bf->bf_m = NULL; 1398 bf->bf_node = NULL; 1399 } 1400 } 1401 #endif /* IEEE80211_STA_ONLY */ 1402 1403 /* 1404 * Configure the beacon and sleep timers. 1405 * 1406 * When operating as an AP this resets the TSF and sets 1407 * up the hardware to notify us when we need to issue beacons. 1408 * 1409 * When operating in station mode this sets up the beacon 1410 * timers according to the timestamp of the last received 1411 * beacon and the current TSF, configures PCF and DTIM 1412 * handling, programs the sleep registers so the hardware 1413 * will wakeup in time to receive beacons, and configures 1414 * the beacon miss handling so we'll receive a BMISS 1415 * interrupt when we stop seeing beacons from the AP 1416 * we've associated with. 1417 */ 1418 void 1419 ath_beacon_config(struct ath_softc *sc) 1420 { 1421 #define MS_TO_TU(x) (((x) * 1000) / 1024) 1422 struct ath_hal *ah = sc->sc_ah; 1423 struct ieee80211com *ic = &sc->sc_ic; 1424 struct ieee80211_node *ni = ic->ic_bss; 1425 u_int32_t nexttbtt, intval; 1426 1427 nexttbtt = (LE_READ_4(ni->ni_tstamp + 4) << 22) | 1428 (LE_READ_4(ni->ni_tstamp) >> 10); 1429 intval = MAX(1, ni->ni_intval) & HAL_BEACON_PERIOD; 1430 if (nexttbtt == 0) { /* e.g. for ap mode */ 1431 nexttbtt = intval; 1432 } else if (intval) { 1433 nexttbtt = roundup(nexttbtt, intval); 1434 } 1435 DPRINTF(ATH_DEBUG_BEACON, ("%s: intval %u nexttbtt %u\n", 1436 __func__, ni->ni_intval, nexttbtt)); 1437 if (ic->ic_opmode == IEEE80211_M_STA) { 1438 HAL_BEACON_STATE bs; 1439 u_int32_t bmisstime; 1440 1441 /* NB: no PCF support right now */ 1442 bzero(&bs, sizeof(bs)); 1443 bs.bs_intval = intval; 1444 bs.bs_nexttbtt = nexttbtt; 1445 bs.bs_dtimperiod = bs.bs_intval; 1446 bs.bs_nextdtim = nexttbtt; 1447 /* 1448 * Calculate the number of consecutive beacons to miss 1449 * before taking a BMISS interrupt. The configuration 1450 * is specified in ms, so we need to convert that to 1451 * TU's and then calculate based on the beacon interval. 1452 * Note that we clamp the result to at most 10 beacons. 1453 */ 1454 bmisstime = MAX(7, ic->ic_bmisstimeout); 1455 bs.bs_bmissthreshold = howmany(bmisstime, intval); 1456 if (bs.bs_bmissthreshold > 7) { 1457 bs.bs_bmissthreshold = 7; 1458 } else if (bs.bs_bmissthreshold <= 0) { 1459 bs.bs_bmissthreshold = 1; 1460 } 1461 1462 /* 1463 * Calculate sleep duration. The configuration is 1464 * given in ms. We insure a multiple of the beacon 1465 * period is used. Also, if the sleep duration is 1466 * greater than the DTIM period then it makes senses 1467 * to make it a multiple of that. 1468 * 1469 * XXX fixed at 100ms 1470 */ 1471 bs.bs_sleepduration = 1472 roundup(MS_TO_TU(100), bs.bs_intval); 1473 if (bs.bs_sleepduration > bs.bs_dtimperiod) { 1474 bs.bs_sleepduration = 1475 roundup(bs.bs_sleepduration, bs.bs_dtimperiod); 1476 } 1477 1478 DPRINTF(ATH_DEBUG_BEACON, 1479 ("%s: intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u" 1480 " sleep %u\n" 1481 , __func__ 1482 , bs.bs_intval 1483 , bs.bs_nexttbtt 1484 , bs.bs_dtimperiod 1485 , bs.bs_nextdtim 1486 , bs.bs_bmissthreshold 1487 , bs.bs_sleepduration 1488 )); 1489 ath_hal_set_intr(ah, 0); 1490 ath_hal_set_beacon_timers(ah, &bs, 0/*XXX*/, 0, 0); 1491 sc->sc_imask |= HAL_INT_BMISS; 1492 ath_hal_set_intr(ah, sc->sc_imask); 1493 } 1494 #ifndef IEEE80211_STA_ONLY 1495 else { 1496 ath_hal_set_intr(ah, 0); 1497 if (nexttbtt == intval) 1498 intval |= HAL_BEACON_RESET_TSF; 1499 if (ic->ic_opmode == IEEE80211_M_IBSS) { 1500 /* 1501 * In IBSS mode enable the beacon timers but only 1502 * enable SWBA interrupts if we need to manually 1503 * prepare beacon frames. Otherwise we use a 1504 * self-linked tx descriptor and let the hardware 1505 * deal with things. 1506 */ 1507 intval |= HAL_BEACON_ENA; 1508 if (!sc->sc_veol) 1509 sc->sc_imask |= HAL_INT_SWBA; 1510 } else if (ic->ic_opmode == IEEE80211_M_HOSTAP) { 1511 /* 1512 * In AP mode we enable the beacon timers and 1513 * SWBA interrupts to prepare beacon frames. 1514 */ 1515 intval |= HAL_BEACON_ENA; 1516 sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */ 1517 } 1518 ath_hal_init_beacon(ah, nexttbtt, intval); 1519 ath_hal_set_intr(ah, sc->sc_imask); 1520 /* 1521 * When using a self-linked beacon descriptor in IBBS 1522 * mode load it once here. 1523 */ 1524 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_veol) 1525 ath_beacon_proc(sc, 0); 1526 } 1527 #endif 1528 } 1529 1530 int 1531 ath_desc_alloc(struct ath_softc *sc) 1532 { 1533 int i, bsize, error = -1; 1534 struct ath_desc *ds; 1535 struct ath_buf *bf; 1536 1537 /* allocate descriptors */ 1538 sc->sc_desc_len = sizeof(struct ath_desc) * 1539 (ATH_TXBUF * ATH_TXDESC + ATH_RXBUF + 1); 1540 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_desc_len, PAGE_SIZE, 1541 0, &sc->sc_dseg, 1, &sc->sc_dnseg, 0)) != 0) { 1542 printf("%s: unable to allocate control data, error = %d\n", 1543 sc->sc_dev.dv_xname, error); 1544 goto fail0; 1545 } 1546 1547 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg, 1548 sc->sc_desc_len, (caddr_t *)&sc->sc_desc, BUS_DMA_COHERENT)) != 0) { 1549 printf("%s: unable to map control data, error = %d\n", 1550 sc->sc_dev.dv_xname, error); 1551 goto fail1; 1552 } 1553 1554 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_desc_len, 1, 1555 sc->sc_desc_len, 0, 0, &sc->sc_ddmamap)) != 0) { 1556 printf("%s: unable to create control data DMA map, " 1557 "error = %d\n", sc->sc_dev.dv_xname, error); 1558 goto fail2; 1559 } 1560 1561 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_ddmamap, sc->sc_desc, 1562 sc->sc_desc_len, NULL, 0)) != 0) { 1563 printf("%s: unable to load control data DMA map, error = %d\n", 1564 sc->sc_dev.dv_xname, error); 1565 goto fail3; 1566 } 1567 1568 ds = sc->sc_desc; 1569 sc->sc_desc_paddr = sc->sc_ddmamap->dm_segs[0].ds_addr; 1570 1571 DPRINTF(ATH_DEBUG_XMIT_DESC|ATH_DEBUG_RECV_DESC, 1572 ("ath_desc_alloc: DMA map: %p (%lu) -> %p (%lu)\n", 1573 ds, (u_long)sc->sc_desc_len, 1574 (caddr_t) sc->sc_desc_paddr, /*XXX*/ (u_long) sc->sc_desc_len)); 1575 1576 /* allocate buffers */ 1577 bsize = sizeof(struct ath_buf) * (ATH_TXBUF + ATH_RXBUF + 1); 1578 bf = malloc(bsize, M_DEVBUF, M_NOWAIT | M_ZERO); 1579 if (bf == NULL) { 1580 printf("%s: unable to allocate Tx/Rx buffers\n", 1581 sc->sc_dev.dv_xname); 1582 error = ENOMEM; 1583 goto fail3; 1584 } 1585 sc->sc_bufptr = bf; 1586 1587 TAILQ_INIT(&sc->sc_rxbuf); 1588 for (i = 0; i < ATH_RXBUF; i++, bf++, ds++) { 1589 bf->bf_desc = ds; 1590 bf->bf_daddr = sc->sc_desc_paddr + 1591 ((caddr_t)ds - (caddr_t)sc->sc_desc); 1592 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1593 MCLBYTES, 0, 0, &bf->bf_dmamap)) != 0) { 1594 printf("%s: unable to create Rx dmamap, error = %d\n", 1595 sc->sc_dev.dv_xname, error); 1596 goto fail4; 1597 } 1598 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 1599 } 1600 1601 TAILQ_INIT(&sc->sc_txbuf); 1602 for (i = 0; i < ATH_TXBUF; i++, bf++, ds += ATH_TXDESC) { 1603 bf->bf_desc = ds; 1604 bf->bf_daddr = sc->sc_desc_paddr + 1605 ((caddr_t)ds - (caddr_t)sc->sc_desc); 1606 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1607 ATH_TXDESC, MCLBYTES, 0, 0, &bf->bf_dmamap)) != 0) { 1608 printf("%s: unable to create Tx dmamap, error = %d\n", 1609 sc->sc_dev.dv_xname, error); 1610 goto fail5; 1611 } 1612 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 1613 } 1614 TAILQ_INIT(&sc->sc_txq); 1615 1616 /* beacon buffer */ 1617 bf->bf_desc = ds; 1618 bf->bf_daddr = sc->sc_desc_paddr + ((caddr_t)ds - (caddr_t)sc->sc_desc); 1619 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 0, 1620 &bf->bf_dmamap)) != 0) { 1621 printf("%s: unable to create beacon dmamap, error = %d\n", 1622 sc->sc_dev.dv_xname, error); 1623 goto fail5; 1624 } 1625 sc->sc_bcbuf = bf; 1626 return 0; 1627 1628 fail5: 1629 for (i = ATH_RXBUF; i < ATH_RXBUF + ATH_TXBUF; i++) { 1630 if (sc->sc_bufptr[i].bf_dmamap == NULL) 1631 continue; 1632 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bufptr[i].bf_dmamap); 1633 } 1634 fail4: 1635 for (i = 0; i < ATH_RXBUF; i++) { 1636 if (sc->sc_bufptr[i].bf_dmamap == NULL) 1637 continue; 1638 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bufptr[i].bf_dmamap); 1639 } 1640 fail3: 1641 bus_dmamap_unload(sc->sc_dmat, sc->sc_ddmamap); 1642 fail2: 1643 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ddmamap); 1644 sc->sc_ddmamap = NULL; 1645 fail1: 1646 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_desc, sc->sc_desc_len); 1647 fail0: 1648 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg); 1649 return error; 1650 } 1651 1652 void 1653 ath_desc_free(struct ath_softc *sc) 1654 { 1655 struct ath_buf *bf; 1656 1657 bus_dmamap_unload(sc->sc_dmat, sc->sc_ddmamap); 1658 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ddmamap); 1659 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg); 1660 1661 TAILQ_FOREACH(bf, &sc->sc_txq, bf_list) { 1662 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1663 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 1664 m_freem(bf->bf_m); 1665 } 1666 TAILQ_FOREACH(bf, &sc->sc_txbuf, bf_list) 1667 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 1668 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 1669 if (bf->bf_m) { 1670 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1671 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 1672 m_freem(bf->bf_m); 1673 bf->bf_m = NULL; 1674 } 1675 } 1676 if (sc->sc_bcbuf != NULL) { 1677 bus_dmamap_unload(sc->sc_dmat, sc->sc_bcbuf->bf_dmamap); 1678 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bcbuf->bf_dmamap); 1679 sc->sc_bcbuf = NULL; 1680 } 1681 1682 TAILQ_INIT(&sc->sc_rxbuf); 1683 TAILQ_INIT(&sc->sc_txbuf); 1684 TAILQ_INIT(&sc->sc_txq); 1685 free(sc->sc_bufptr, M_DEVBUF); 1686 sc->sc_bufptr = NULL; 1687 } 1688 1689 struct ieee80211_node * 1690 ath_node_alloc(struct ieee80211com *ic) 1691 { 1692 struct ath_node *an; 1693 1694 an = malloc(sizeof(*an), M_DEVBUF, M_NOWAIT | M_ZERO); 1695 if (an) { 1696 int i; 1697 for (i = 0; i < ATH_RHIST_SIZE; i++) 1698 an->an_rx_hist[i].arh_ticks = ATH_RHIST_NOTIME; 1699 an->an_rx_hist_next = ATH_RHIST_SIZE-1; 1700 return &an->an_node; 1701 } else 1702 return NULL; 1703 } 1704 1705 void 1706 ath_node_free(struct ieee80211com *ic, struct ieee80211_node *ni) 1707 { 1708 struct ath_softc *sc = ic->ic_if.if_softc; 1709 struct ath_buf *bf; 1710 1711 TAILQ_FOREACH(bf, &sc->sc_txq, bf_list) { 1712 if (bf->bf_node == ni) 1713 bf->bf_node = NULL; 1714 } 1715 (*sc->sc_node_free)(ic, ni); 1716 } 1717 1718 void 1719 ath_node_copy(struct ieee80211com *ic, 1720 struct ieee80211_node *dst, const struct ieee80211_node *src) 1721 { 1722 struct ath_softc *sc = ic->ic_if.if_softc; 1723 1724 bcopy(&src[1], &dst[1], 1725 sizeof(struct ath_node) - sizeof(struct ieee80211_node)); 1726 (*sc->sc_node_copy)(ic, dst, src); 1727 } 1728 1729 u_int8_t 1730 ath_node_getrssi(struct ieee80211com *ic, const struct ieee80211_node *ni) 1731 { 1732 const struct ath_node *an = ATH_NODE(ni); 1733 int i, now, nsamples, rssi; 1734 1735 /* 1736 * Calculate the average over the last second of sampled data. 1737 */ 1738 now = ATH_TICKS(); 1739 nsamples = 0; 1740 rssi = 0; 1741 i = an->an_rx_hist_next; 1742 do { 1743 const struct ath_recv_hist *rh = &an->an_rx_hist[i]; 1744 if (rh->arh_ticks == ATH_RHIST_NOTIME) 1745 goto done; 1746 if (now - rh->arh_ticks > hz) 1747 goto done; 1748 rssi += rh->arh_rssi; 1749 nsamples++; 1750 if (i == 0) { 1751 i = ATH_RHIST_SIZE-1; 1752 } else { 1753 i--; 1754 } 1755 } while (i != an->an_rx_hist_next); 1756 done: 1757 /* 1758 * Return either the average or the last known 1759 * value if there is no recent data. 1760 */ 1761 return (nsamples ? rssi / nsamples : an->an_rx_hist[i].arh_rssi); 1762 } 1763 1764 int 1765 ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 1766 { 1767 struct ath_hal *ah = sc->sc_ah; 1768 int error; 1769 struct mbuf *m; 1770 struct ath_desc *ds; 1771 1772 m = bf->bf_m; 1773 if (m == NULL) { 1774 /* 1775 * NB: by assigning a page to the rx dma buffer we 1776 * implicitly satisfy the Atheros requirement that 1777 * this buffer be cache-line-aligned and sized to be 1778 * multiple of the cache line size. Not doing this 1779 * causes weird stuff to happen (for the 5210 at least). 1780 */ 1781 m = ath_getmbuf(M_DONTWAIT, MT_DATA, MCLBYTES); 1782 if (m == NULL) { 1783 DPRINTF(ATH_DEBUG_ANY, 1784 ("%s: no mbuf/cluster\n", __func__)); 1785 sc->sc_stats.ast_rx_nombuf++; 1786 return ENOMEM; 1787 } 1788 bf->bf_m = m; 1789 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 1790 1791 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m, 1792 BUS_DMA_NOWAIT); 1793 if (error != 0) { 1794 DPRINTF(ATH_DEBUG_ANY, 1795 ("%s: ath_bus_dmamap_load_mbuf failed;" 1796 " error %d\n", __func__, error)); 1797 sc->sc_stats.ast_rx_busdma++; 1798 return error; 1799 } 1800 KASSERT(bf->bf_nseg == 1, 1801 ("ath_rxbuf_init: multi-segment packet; nseg %u", 1802 bf->bf_nseg)); 1803 } 1804 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 1805 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1806 1807 /* 1808 * Setup descriptors. For receive we always terminate 1809 * the descriptor list with a self-linked entry so we'll 1810 * not get overrun under high load (as can happen with a 1811 * 5212 when ANI processing enables PHY errors). 1812 * 1813 * To insure the last descriptor is self-linked we create 1814 * each descriptor as self-linked and add it to the end. As 1815 * each additional descriptor is added the previous self-linked 1816 * entry is ``fixed'' naturally. This should be safe even 1817 * if DMA is happening. When processing RX interrupts we 1818 * never remove/process the last, self-linked, entry on the 1819 * descriptor list. This insures the hardware always has 1820 * someplace to write a new frame. 1821 */ 1822 ds = bf->bf_desc; 1823 bzero(ds, sizeof(struct ath_desc)); 1824 #ifndef IEEE80211_STA_ONLY 1825 if (sc->sc_ic.ic_opmode != IEEE80211_M_HOSTAP) 1826 ds->ds_link = bf->bf_daddr; /* link to self */ 1827 #endif 1828 ds->ds_data = bf->bf_segs[0].ds_addr; 1829 ath_hal_setup_rx_desc(ah, ds 1830 , m->m_len /* buffer size */ 1831 , 0 1832 ); 1833 1834 if (sc->sc_rxlink != NULL) 1835 *sc->sc_rxlink = bf->bf_daddr; 1836 sc->sc_rxlink = &ds->ds_link; 1837 return 0; 1838 } 1839 1840 void 1841 ath_rx_proc(void *arg, int npending) 1842 { 1843 #define PA2DESC(_sc, _pa) \ 1844 ((struct ath_desc *)((caddr_t)(_sc)->sc_desc + \ 1845 ((_pa) - (_sc)->sc_desc_paddr))) 1846 struct ath_softc *sc = arg; 1847 struct ath_buf *bf; 1848 struct ieee80211com *ic = &sc->sc_ic; 1849 struct ifnet *ifp = &ic->ic_if; 1850 struct ath_hal *ah = sc->sc_ah; 1851 struct ath_desc *ds; 1852 struct mbuf *m; 1853 struct ieee80211_frame *wh; 1854 struct ieee80211_frame whbuf; 1855 struct ieee80211_rxinfo rxi; 1856 struct ieee80211_node *ni; 1857 struct ath_node *an; 1858 struct ath_recv_hist *rh; 1859 int len; 1860 u_int phyerr; 1861 HAL_STATUS status; 1862 1863 DPRINTF(ATH_DEBUG_RX_PROC, ("%s: pending %u\n", __func__, npending)); 1864 do { 1865 bf = TAILQ_FIRST(&sc->sc_rxbuf); 1866 if (bf == NULL) { /* NB: shouldn't happen */ 1867 printf("%s: ath_rx_proc: no buffer!\n", ifp->if_xname); 1868 break; 1869 } 1870 ds = bf->bf_desc; 1871 if (ds->ds_link == bf->bf_daddr) { 1872 /* NB: never process the self-linked entry at the end */ 1873 break; 1874 } 1875 m = bf->bf_m; 1876 if (m == NULL) { /* NB: shouldn't happen */ 1877 printf("%s: ath_rx_proc: no mbuf!\n", ifp->if_xname); 1878 continue; 1879 } 1880 /* XXX sync descriptor memory */ 1881 /* 1882 * Must provide the virtual address of the current 1883 * descriptor, the physical address, and the virtual 1884 * address of the next descriptor in the h/w chain. 1885 * This allows the HAL to look ahead to see if the 1886 * hardware is done with a descriptor by checking the 1887 * done bit in the following descriptor and the address 1888 * of the current descriptor the DMA engine is working 1889 * on. All this is necessary because of our use of 1890 * a self-linked list to avoid rx overruns. 1891 */ 1892 status = ath_hal_proc_rx_desc(ah, ds, 1893 bf->bf_daddr, PA2DESC(sc, ds->ds_link)); 1894 #ifdef AR_DEBUG 1895 if (ath_debug & ATH_DEBUG_RECV_DESC) 1896 ath_printrxbuf(bf, status == HAL_OK); 1897 #endif 1898 if (status == HAL_EINPROGRESS) 1899 break; 1900 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 1901 1902 if (ds->ds_rxstat.rs_more) { 1903 /* 1904 * Frame spans multiple descriptors; this 1905 * cannot happen yet as we don't support 1906 * jumbograms. If not in monitor mode, 1907 * discard the frame. 1908 */ 1909 1910 /* 1911 * Enable this if you want to see error 1912 * frames in Monitor mode. 1913 */ 1914 #ifdef ERROR_FRAMES 1915 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 1916 /* XXX statistic */ 1917 goto rx_next; 1918 } 1919 #endif 1920 /* fall thru for monitor mode handling... */ 1921 1922 } else if (ds->ds_rxstat.rs_status != 0) { 1923 if (ds->ds_rxstat.rs_status & HAL_RXERR_CRC) 1924 sc->sc_stats.ast_rx_crcerr++; 1925 if (ds->ds_rxstat.rs_status & HAL_RXERR_FIFO) 1926 sc->sc_stats.ast_rx_fifoerr++; 1927 if (ds->ds_rxstat.rs_status & HAL_RXERR_DECRYPT) 1928 sc->sc_stats.ast_rx_badcrypt++; 1929 if (ds->ds_rxstat.rs_status & HAL_RXERR_PHY) { 1930 sc->sc_stats.ast_rx_phyerr++; 1931 phyerr = ds->ds_rxstat.rs_phyerr & 0x1f; 1932 sc->sc_stats.ast_rx_phy[phyerr]++; 1933 } 1934 1935 /* 1936 * reject error frames, we normally don't want 1937 * to see them in monitor mode. 1938 */ 1939 if ((ds->ds_rxstat.rs_status & HAL_RXERR_DECRYPT ) || 1940 (ds->ds_rxstat.rs_status & HAL_RXERR_PHY)) 1941 goto rx_next; 1942 1943 /* 1944 * In monitor mode, allow through packets that 1945 * cannot be decrypted 1946 */ 1947 if ((ds->ds_rxstat.rs_status & ~HAL_RXERR_DECRYPT) || 1948 sc->sc_ic.ic_opmode != IEEE80211_M_MONITOR) 1949 goto rx_next; 1950 } 1951 1952 len = ds->ds_rxstat.rs_datalen; 1953 if (len < IEEE80211_MIN_LEN) { 1954 DPRINTF(ATH_DEBUG_RECV, ("%s: short packet %d\n", 1955 __func__, len)); 1956 sc->sc_stats.ast_rx_tooshort++; 1957 goto rx_next; 1958 } 1959 1960 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 1961 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1962 1963 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1964 bf->bf_m = NULL; 1965 m->m_pkthdr.rcvif = ifp; 1966 m->m_pkthdr.len = m->m_len = len; 1967 1968 #if NBPFILTER > 0 1969 if (sc->sc_drvbpf) { 1970 struct mbuf mb; 1971 1972 sc->sc_rxtap.wr_flags = IEEE80211_RADIOTAP_F_FCS; 1973 sc->sc_rxtap.wr_rate = 1974 sc->sc_hwmap[ds->ds_rxstat.rs_rate] & 1975 IEEE80211_RATE_VAL; 1976 sc->sc_rxtap.wr_antenna = ds->ds_rxstat.rs_antenna; 1977 sc->sc_rxtap.wr_rssi = ds->ds_rxstat.rs_rssi; 1978 sc->sc_rxtap.wr_max_rssi = ic->ic_max_rssi; 1979 1980 mb.m_data = (caddr_t)&sc->sc_rxtap; 1981 mb.m_len = sc->sc_rxtap_len; 1982 mb.m_next = m; 1983 mb.m_nextpkt = NULL; 1984 mb.m_type = 0; 1985 mb.m_flags = 0; 1986 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN); 1987 } 1988 #endif 1989 m_adj(m, -IEEE80211_CRC_LEN); 1990 wh = mtod(m, struct ieee80211_frame *); 1991 rxi.rxi_flags = 0; 1992 if (!ath_softcrypto && (wh->i_fc[1] & IEEE80211_FC1_WEP)) { 1993 /* 1994 * WEP is decrypted by hardware. Clear WEP bit 1995 * and trim WEP header for ieee80211_input(). 1996 */ 1997 wh->i_fc[1] &= ~IEEE80211_FC1_WEP; 1998 bcopy(wh, &whbuf, sizeof(whbuf)); 1999 m_adj(m, IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN); 2000 wh = mtod(m, struct ieee80211_frame *); 2001 bcopy(&whbuf, wh, sizeof(whbuf)); 2002 /* 2003 * Also trim WEP ICV from the tail. 2004 */ 2005 m_adj(m, -IEEE80211_WEP_CRCLEN); 2006 /* 2007 * The header has probably moved. 2008 */ 2009 wh = mtod(m, struct ieee80211_frame *); 2010 2011 rxi.rxi_flags |= IEEE80211_RXI_HWDEC; 2012 } 2013 2014 /* 2015 * Locate the node for sender, track state, and 2016 * then pass this node (referenced) up to the 802.11 2017 * layer for its use. 2018 */ 2019 ni = ieee80211_find_rxnode(ic, wh); 2020 2021 /* 2022 * Record driver-specific state. 2023 */ 2024 an = ATH_NODE(ni); 2025 if (++(an->an_rx_hist_next) == ATH_RHIST_SIZE) 2026 an->an_rx_hist_next = 0; 2027 rh = &an->an_rx_hist[an->an_rx_hist_next]; 2028 rh->arh_ticks = ATH_TICKS(); 2029 rh->arh_rssi = ds->ds_rxstat.rs_rssi; 2030 rh->arh_antenna = ds->ds_rxstat.rs_antenna; 2031 2032 /* 2033 * Send frame up for processing. 2034 */ 2035 rxi.rxi_rssi = ds->ds_rxstat.rs_rssi; 2036 rxi.rxi_tstamp = ds->ds_rxstat.rs_tstamp; 2037 ieee80211_input(ifp, m, ni, &rxi); 2038 2039 /* Handle the rate adaption */ 2040 ieee80211_rssadapt_input(ic, ni, &an->an_rssadapt, 2041 ds->ds_rxstat.rs_rssi); 2042 2043 /* 2044 * The frame may have caused the node to be marked for 2045 * reclamation (e.g. in response to a DEAUTH message) 2046 * so use release_node here instead of unref_node. 2047 */ 2048 ieee80211_release_node(ic, ni); 2049 2050 rx_next: 2051 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 2052 } while (ath_rxbuf_init(sc, bf) == 0); 2053 2054 ath_hal_set_rx_signal(ah); /* rx signal state monitoring */ 2055 ath_hal_start_rx(ah); /* in case of RXEOL */ 2056 #undef PA2DESC 2057 } 2058 2059 /* 2060 * XXX Size of an ACK control frame in bytes. 2061 */ 2062 #define IEEE80211_ACK_SIZE (2+2+IEEE80211_ADDR_LEN+4) 2063 2064 int 2065 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, 2066 struct ath_buf *bf, struct mbuf *m0) 2067 { 2068 struct ieee80211com *ic = &sc->sc_ic; 2069 struct ath_hal *ah = sc->sc_ah; 2070 struct ifnet *ifp = &sc->sc_ic.ic_if; 2071 int i, error, iswep, hdrlen, pktlen, len, s; 2072 u_int8_t rix, cix, txrate, ctsrate; 2073 struct ath_desc *ds; 2074 struct ieee80211_frame *wh; 2075 struct ieee80211_key *k; 2076 u_int32_t iv; 2077 u_int8_t *ivp; 2078 u_int8_t hdrbuf[sizeof(struct ieee80211_frame) + 2079 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN]; 2080 u_int subtype, flags, ctsduration, antenna; 2081 HAL_PKT_TYPE atype; 2082 const HAL_RATE_TABLE *rt; 2083 HAL_BOOL shortPreamble; 2084 struct ath_node *an; 2085 u_int8_t hwqueue = HAL_TX_QUEUE_ID_DATA_MIN; 2086 2087 wh = mtod(m0, struct ieee80211_frame *); 2088 iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED; 2089 hdrlen = sizeof(struct ieee80211_frame); 2090 pktlen = m0->m_pkthdr.len; 2091 2092 if (ath_softcrypto && iswep) { 2093 k = ieee80211_get_txkey(ic, wh, ni); 2094 if ((m0 = ieee80211_encrypt(ic, m0, k)) == NULL) 2095 return ENOMEM; 2096 wh = mtod(m0, struct ieee80211_frame *); 2097 2098 /* reset len in case we got a new mbuf */ 2099 pktlen = m0->m_pkthdr.len; 2100 } else if (!ath_softcrypto && iswep) { 2101 bcopy(mtod(m0, caddr_t), hdrbuf, hdrlen); 2102 m_adj(m0, hdrlen); 2103 M_PREPEND(m0, sizeof(hdrbuf), M_DONTWAIT); 2104 if (m0 == NULL) { 2105 sc->sc_stats.ast_tx_nombuf++; 2106 return ENOMEM; 2107 } 2108 ivp = hdrbuf + hdrlen; 2109 wh = mtod(m0, struct ieee80211_frame *); 2110 /* 2111 * XXX 2112 * IV must not duplicate during the lifetime of the key. 2113 * But no mechanism to renew keys is defined in IEEE 802.11 2114 * for WEP. And the IV may be duplicated at other stations 2115 * because the session key itself is shared. So we use a 2116 * pseudo random IV for now, though it is not the right way. 2117 * 2118 * NB: Rather than use a strictly random IV we select a 2119 * random one to start and then increment the value for 2120 * each frame. This is an explicit tradeoff between 2121 * overhead and security. Given the basic insecurity of 2122 * WEP this seems worthwhile. 2123 */ 2124 2125 /* 2126 * Skip 'bad' IVs from Fluhrer/Mantin/Shamir: 2127 * (B, 255, N) with 3 <= B < 16 and 0 <= N <= 255 2128 */ 2129 iv = ic->ic_iv; 2130 if ((iv & 0xff00) == 0xff00) { 2131 int B = (iv & 0xff0000) >> 16; 2132 if (3 <= B && B < 16) 2133 iv = (B+1) << 16; 2134 } 2135 ic->ic_iv = iv + 1; 2136 2137 /* 2138 * NB: Preserve byte order of IV for packet 2139 * sniffers; it doesn't matter otherwise. 2140 */ 2141 #if AH_BYTE_ORDER == AH_BIG_ENDIAN 2142 ivp[0] = iv >> 0; 2143 ivp[1] = iv >> 8; 2144 ivp[2] = iv >> 16; 2145 #else 2146 ivp[2] = iv >> 0; 2147 ivp[1] = iv >> 8; 2148 ivp[0] = iv >> 16; 2149 #endif 2150 ivp[3] = ic->ic_wep_txkey << 6; /* Key ID and pad */ 2151 bcopy(hdrbuf, mtod(m0, caddr_t), sizeof(hdrbuf)); 2152 /* 2153 * The length of hdrlen and pktlen must be increased for WEP 2154 */ 2155 len = IEEE80211_WEP_IVLEN + 2156 IEEE80211_WEP_KIDLEN + 2157 IEEE80211_WEP_CRCLEN; 2158 hdrlen += len; 2159 pktlen += len; 2160 } 2161 pktlen += IEEE80211_CRC_LEN; 2162 2163 /* 2164 * Load the DMA map so any coalescing is done. This 2165 * also calculates the number of descriptors we need. 2166 */ 2167 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m0, 2168 BUS_DMA_NOWAIT); 2169 /* 2170 * Discard null packets and check for packets that 2171 * require too many TX descriptors. We try to convert 2172 * the latter to a cluster. 2173 */ 2174 if (error == EFBIG) { /* too many desc's, linearize */ 2175 sc->sc_stats.ast_tx_linear++; 2176 if (m_defrag(m0, M_DONTWAIT)) { 2177 sc->sc_stats.ast_tx_nomcl++; 2178 m_freem(m0); 2179 return ENOMEM; 2180 } 2181 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m0, 2182 BUS_DMA_NOWAIT); 2183 if (error != 0) { 2184 sc->sc_stats.ast_tx_busdma++; 2185 m_freem(m0); 2186 return error; 2187 } 2188 KASSERT(bf->bf_nseg == 1, 2189 ("ath_tx_start: packet not one segment; nseg %u", 2190 bf->bf_nseg)); 2191 } else if (error != 0) { 2192 sc->sc_stats.ast_tx_busdma++; 2193 m_freem(m0); 2194 return error; 2195 } else if (bf->bf_nseg == 0) { /* null packet, discard */ 2196 sc->sc_stats.ast_tx_nodata++; 2197 m_freem(m0); 2198 return EIO; 2199 } 2200 DPRINTF(ATH_DEBUG_XMIT, ("%s: m %p len %u\n", __func__, m0, pktlen)); 2201 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 2202 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 2203 bf->bf_m = m0; 2204 bf->bf_node = ni; /* NB: held reference */ 2205 an = ATH_NODE(ni); 2206 2207 /* setup descriptors */ 2208 ds = bf->bf_desc; 2209 rt = sc->sc_currates; 2210 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 2211 2212 /* 2213 * Calculate Atheros packet type from IEEE80211 packet header 2214 * and setup for rate calculations. 2215 */ 2216 bf->bf_id.id_node = NULL; 2217 atype = HAL_PKT_TYPE_NORMAL; /* default */ 2218 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 2219 case IEEE80211_FC0_TYPE_MGT: 2220 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2221 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) { 2222 atype = HAL_PKT_TYPE_BEACON; 2223 } else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) { 2224 atype = HAL_PKT_TYPE_PROBE_RESP; 2225 } else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) { 2226 atype = HAL_PKT_TYPE_ATIM; 2227 } 2228 rix = 0; /* XXX lowest rate */ 2229 break; 2230 case IEEE80211_FC0_TYPE_CTL: 2231 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2232 if (subtype == IEEE80211_FC0_SUBTYPE_PS_POLL) 2233 atype = HAL_PKT_TYPE_PSPOLL; 2234 rix = 0; /* XXX lowest rate */ 2235 break; 2236 default: 2237 /* remember link conditions for rate adaptation algorithm */ 2238 if (ic->ic_fixed_rate == -1) { 2239 bf->bf_id.id_len = m0->m_pkthdr.len; 2240 bf->bf_id.id_rateidx = ni->ni_txrate; 2241 bf->bf_id.id_node = ni; 2242 bf->bf_id.id_rssi = ath_node_getrssi(ic, ni); 2243 } 2244 ni->ni_txrate = ieee80211_rssadapt_choose(&an->an_rssadapt, 2245 &ni->ni_rates, wh, m0->m_pkthdr.len, ic->ic_fixed_rate, 2246 ifp->if_xname, 0); 2247 rix = sc->sc_rixmap[ni->ni_rates.rs_rates[ni->ni_txrate] & 2248 IEEE80211_RATE_VAL]; 2249 if (rix == 0xff) { 2250 printf("%s: bogus xmit rate 0x%x (idx 0x%x)\n", 2251 ifp->if_xname, ni->ni_rates.rs_rates[ni->ni_txrate], 2252 ni->ni_txrate); 2253 sc->sc_stats.ast_tx_badrate++; 2254 m_freem(m0); 2255 return EIO; 2256 } 2257 break; 2258 } 2259 2260 /* 2261 * NB: the 802.11 layer marks whether or not we should 2262 * use short preamble based on the current mode and 2263 * negotiated parameters. 2264 */ 2265 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 2266 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 2267 txrate = rt->info[rix].rateCode | rt->info[rix].shortPreamble; 2268 shortPreamble = AH_TRUE; 2269 sc->sc_stats.ast_tx_shortpre++; 2270 } else { 2271 txrate = rt->info[rix].rateCode; 2272 shortPreamble = AH_FALSE; 2273 } 2274 2275 /* 2276 * Calculate miscellaneous flags. 2277 */ 2278 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for wep errors */ 2279 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2280 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 2281 sc->sc_stats.ast_tx_noack++; 2282 } else if (pktlen > ic->ic_rtsthreshold) { 2283 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 2284 sc->sc_stats.ast_tx_rts++; 2285 } 2286 2287 /* 2288 * Calculate duration. This logically belongs in the 802.11 2289 * layer but it lacks sufficient information to calculate it. 2290 */ 2291 if ((flags & HAL_TXDESC_NOACK) == 0 && 2292 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 2293 u_int16_t dur; 2294 /* 2295 * XXX not right with fragmentation. 2296 */ 2297 dur = ath_hal_computetxtime(ah, rt, IEEE80211_ACK_SIZE, 2298 rix, shortPreamble); 2299 *((u_int16_t*) wh->i_dur) = htole16(dur); 2300 } 2301 2302 /* 2303 * Calculate RTS/CTS rate and duration if needed. 2304 */ 2305 ctsduration = 0; 2306 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) { 2307 /* 2308 * CTS transmit rate is derived from the transmit rate 2309 * by looking in the h/w rate table. We must also factor 2310 * in whether or not a short preamble is to be used. 2311 */ 2312 cix = rt->info[rix].controlRate; 2313 ctsrate = rt->info[cix].rateCode; 2314 if (shortPreamble) 2315 ctsrate |= rt->info[cix].shortPreamble; 2316 /* 2317 * Compute the transmit duration based on the size 2318 * of an ACK frame. We call into the HAL to do the 2319 * computation since it depends on the characteristics 2320 * of the actual PHY being used. 2321 */ 2322 if (flags & HAL_TXDESC_RTSENA) { /* SIFS + CTS */ 2323 ctsduration += ath_hal_computetxtime(ah, 2324 rt, IEEE80211_ACK_SIZE, cix, shortPreamble); 2325 } 2326 /* SIFS + data */ 2327 ctsduration += ath_hal_computetxtime(ah, 2328 rt, pktlen, rix, shortPreamble); 2329 if ((flags & HAL_TXDESC_NOACK) == 0) { /* SIFS + ACK */ 2330 ctsduration += ath_hal_computetxtime(ah, 2331 rt, IEEE80211_ACK_SIZE, cix, shortPreamble); 2332 } 2333 } else 2334 ctsrate = 0; 2335 2336 /* 2337 * For now use the antenna on which the last good 2338 * frame was received on. We assume this field is 2339 * initialized to 0 which gives us ``auto'' or the 2340 * ``default'' antenna. 2341 */ 2342 if (an->an_tx_antenna) { 2343 antenna = an->an_tx_antenna; 2344 } else { 2345 antenna = an->an_rx_hist[an->an_rx_hist_next].arh_antenna; 2346 } 2347 2348 #if NBPFILTER > 0 2349 if (ic->ic_rawbpf) 2350 bpf_mtap(ic->ic_rawbpf, m0, BPF_DIRECTION_OUT); 2351 2352 if (sc->sc_drvbpf) { 2353 struct mbuf mb; 2354 2355 sc->sc_txtap.wt_flags = 0; 2356 if (shortPreamble) 2357 sc->sc_txtap.wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2358 if (!ath_softcrypto && iswep) 2359 sc->sc_txtap.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2360 sc->sc_txtap.wt_rate = ni->ni_rates.rs_rates[ni->ni_txrate] & 2361 IEEE80211_RATE_VAL; 2362 sc->sc_txtap.wt_txpower = 30; 2363 sc->sc_txtap.wt_antenna = antenna; 2364 sc->sc_txtap.wt_hwqueue = hwqueue; 2365 2366 mb.m_data = (caddr_t)&sc->sc_txtap; 2367 mb.m_len = sc->sc_txtap_len; 2368 mb.m_next = m0; 2369 mb.m_nextpkt = NULL; 2370 mb.m_type = 0; 2371 mb.m_flags = 0; 2372 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT); 2373 } 2374 #endif 2375 2376 /* 2377 * Formulate first tx descriptor with tx controls. 2378 */ 2379 /* XXX check return value? */ 2380 ath_hal_setup_tx_desc(ah, ds 2381 , pktlen /* packet length */ 2382 , hdrlen /* header length */ 2383 , atype /* Atheros packet type */ 2384 , 60 /* txpower XXX */ 2385 , txrate, 1+10 /* series 0 rate/tries */ 2386 , iswep ? sc->sc_ic.ic_wep_txkey : HAL_TXKEYIX_INVALID 2387 , antenna /* antenna mode */ 2388 , flags /* flags */ 2389 , ctsrate /* rts/cts rate */ 2390 , ctsduration /* rts/cts duration */ 2391 ); 2392 #ifdef notyet 2393 ath_hal_setup_xtx_desc(ah, ds 2394 , AH_FALSE /* short preamble */ 2395 , 0, 0 /* series 1 rate/tries */ 2396 , 0, 0 /* series 2 rate/tries */ 2397 , 0, 0 /* series 3 rate/tries */ 2398 ); 2399 #endif 2400 /* 2401 * Fillin the remainder of the descriptor info. 2402 */ 2403 for (i = 0; i < bf->bf_nseg; i++, ds++) { 2404 ds->ds_data = bf->bf_segs[i].ds_addr; 2405 if (i == bf->bf_nseg - 1) { 2406 ds->ds_link = 0; 2407 } else { 2408 ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1); 2409 } 2410 ath_hal_fill_tx_desc(ah, ds 2411 , bf->bf_segs[i].ds_len /* segment length */ 2412 , i == 0 /* first segment */ 2413 , i == bf->bf_nseg - 1 /* last segment */ 2414 ); 2415 DPRINTF(ATH_DEBUG_XMIT, 2416 ("%s: %d: %08x %08x %08x %08x %08x %08x\n", 2417 __func__, i, ds->ds_link, ds->ds_data, 2418 ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1])); 2419 } 2420 2421 /* 2422 * Insert the frame on the outbound list and 2423 * pass it on to the hardware. 2424 */ 2425 s = splnet(); 2426 TAILQ_INSERT_TAIL(&sc->sc_txq, bf, bf_list); 2427 if (sc->sc_txlink == NULL) { 2428 ath_hal_put_tx_buf(ah, sc->sc_txhalq[hwqueue], bf->bf_daddr); 2429 DPRINTF(ATH_DEBUG_XMIT, ("%s: TXDP0 = %p (%p)\n", __func__, 2430 (caddr_t)bf->bf_daddr, bf->bf_desc)); 2431 } else { 2432 *sc->sc_txlink = bf->bf_daddr; 2433 DPRINTF(ATH_DEBUG_XMIT, ("%s: link(%p)=%p (%p)\n", __func__, 2434 sc->sc_txlink, (caddr_t)bf->bf_daddr, bf->bf_desc)); 2435 } 2436 sc->sc_txlink = &bf->bf_desc[bf->bf_nseg - 1].ds_link; 2437 splx(s); 2438 2439 ath_hal_tx_start(ah, sc->sc_txhalq[hwqueue]); 2440 return 0; 2441 } 2442 2443 void 2444 ath_tx_proc(void *arg, int npending) 2445 { 2446 struct ath_softc *sc = arg; 2447 struct ath_hal *ah = sc->sc_ah; 2448 struct ath_buf *bf; 2449 struct ieee80211com *ic = &sc->sc_ic; 2450 struct ifnet *ifp = &ic->ic_if; 2451 struct ath_desc *ds; 2452 struct ieee80211_node *ni; 2453 struct ath_node *an; 2454 int sr, lr, s; 2455 HAL_STATUS status; 2456 2457 for (;;) { 2458 s = splnet(); 2459 bf = TAILQ_FIRST(&sc->sc_txq); 2460 if (bf == NULL) { 2461 sc->sc_txlink = NULL; 2462 splx(s); 2463 break; 2464 } 2465 /* only the last descriptor is needed */ 2466 ds = &bf->bf_desc[bf->bf_nseg - 1]; 2467 status = ath_hal_proc_tx_desc(ah, ds); 2468 #ifdef AR_DEBUG 2469 if (ath_debug & ATH_DEBUG_XMIT_DESC) 2470 ath_printtxbuf(bf, status == HAL_OK); 2471 #endif 2472 if (status == HAL_EINPROGRESS) { 2473 splx(s); 2474 break; 2475 } 2476 TAILQ_REMOVE(&sc->sc_txq, bf, bf_list); 2477 splx(s); 2478 2479 ni = bf->bf_node; 2480 if (ni != NULL) { 2481 an = (struct ath_node *) ni; 2482 if (ds->ds_txstat.ts_status == 0) { 2483 if (bf->bf_id.id_node != NULL) 2484 ieee80211_rssadapt_raise_rate(ic, 2485 &an->an_rssadapt, &bf->bf_id); 2486 an->an_tx_antenna = ds->ds_txstat.ts_antenna; 2487 } else { 2488 if (bf->bf_id.id_node != NULL) 2489 ieee80211_rssadapt_lower_rate(ic, ni, 2490 &an->an_rssadapt, &bf->bf_id); 2491 if (ds->ds_txstat.ts_status & HAL_TXERR_XRETRY) 2492 sc->sc_stats.ast_tx_xretries++; 2493 if (ds->ds_txstat.ts_status & HAL_TXERR_FIFO) 2494 sc->sc_stats.ast_tx_fifoerr++; 2495 if (ds->ds_txstat.ts_status & HAL_TXERR_FILT) 2496 sc->sc_stats.ast_tx_filtered++; 2497 an->an_tx_antenna = 0; /* invalidate */ 2498 } 2499 sr = ds->ds_txstat.ts_shortretry; 2500 lr = ds->ds_txstat.ts_longretry; 2501 sc->sc_stats.ast_tx_shortretry += sr; 2502 sc->sc_stats.ast_tx_longretry += lr; 2503 /* 2504 * Reclaim reference to node. 2505 * 2506 * NB: the node may be reclaimed here if, for example 2507 * this is a DEAUTH message that was sent and the 2508 * node was timed out due to inactivity. 2509 */ 2510 ieee80211_release_node(ic, ni); 2511 } 2512 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 2513 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2514 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2515 m_freem(bf->bf_m); 2516 bf->bf_m = NULL; 2517 bf->bf_node = NULL; 2518 2519 s = splnet(); 2520 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 2521 splx(s); 2522 } 2523 ifp->if_flags &= ~IFF_OACTIVE; 2524 sc->sc_tx_timer = 0; 2525 2526 ath_start(ifp); 2527 } 2528 2529 /* 2530 * Drain the transmit queue and reclaim resources. 2531 */ 2532 void 2533 ath_draintxq(struct ath_softc *sc) 2534 { 2535 struct ath_hal *ah = sc->sc_ah; 2536 struct ieee80211com *ic = &sc->sc_ic; 2537 struct ifnet *ifp = &ic->ic_if; 2538 struct ieee80211_node *ni; 2539 struct ath_buf *bf; 2540 int s, i; 2541 2542 /* XXX return value */ 2543 if (!sc->sc_invalid) { 2544 for (i = 0; i <= HAL_TX_QUEUE_ID_DATA_MAX; i++) { 2545 /* don't touch the hardware if marked invalid */ 2546 (void) ath_hal_stop_tx_dma(ah, sc->sc_txhalq[i]); 2547 DPRINTF(ATH_DEBUG_RESET, 2548 ("%s: tx queue %d (%p), link %p\n", __func__, i, 2549 (caddr_t)(u_intptr_t)ath_hal_get_tx_buf(ah, 2550 sc->sc_txhalq[i]), sc->sc_txlink)); 2551 } 2552 (void) ath_hal_stop_tx_dma(ah, sc->sc_bhalq); 2553 DPRINTF(ATH_DEBUG_RESET, 2554 ("%s: beacon queue (%p)\n", __func__, 2555 (caddr_t)(u_intptr_t)ath_hal_get_tx_buf(ah, sc->sc_bhalq))); 2556 } 2557 for (;;) { 2558 s = splnet(); 2559 bf = TAILQ_FIRST(&sc->sc_txq); 2560 if (bf == NULL) { 2561 sc->sc_txlink = NULL; 2562 splx(s); 2563 break; 2564 } 2565 TAILQ_REMOVE(&sc->sc_txq, bf, bf_list); 2566 splx(s); 2567 #ifdef AR_DEBUG 2568 if (ath_debug & ATH_DEBUG_RESET) { 2569 ath_printtxbuf(bf, 2570 ath_hal_proc_tx_desc(ah, bf->bf_desc) == HAL_OK); 2571 } 2572 #endif /* AR_DEBUG */ 2573 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2574 m_freem(bf->bf_m); 2575 bf->bf_m = NULL; 2576 ni = bf->bf_node; 2577 bf->bf_node = NULL; 2578 s = splnet(); 2579 if (ni != NULL) { 2580 /* 2581 * Reclaim node reference. 2582 */ 2583 ieee80211_release_node(ic, ni); 2584 } 2585 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 2586 splx(s); 2587 } 2588 ifp->if_flags &= ~IFF_OACTIVE; 2589 sc->sc_tx_timer = 0; 2590 } 2591 2592 /* 2593 * Disable the receive h/w in preparation for a reset. 2594 */ 2595 void 2596 ath_stoprecv(struct ath_softc *sc) 2597 { 2598 #define PA2DESC(_sc, _pa) \ 2599 ((struct ath_desc *)((caddr_t)(_sc)->sc_desc + \ 2600 ((_pa) - (_sc)->sc_desc_paddr))) 2601 struct ath_hal *ah = sc->sc_ah; 2602 2603 ath_hal_stop_pcu_recv(ah); /* disable PCU */ 2604 ath_hal_set_rx_filter(ah, 0); /* clear recv filter */ 2605 ath_hal_stop_rx_dma(ah); /* disable DMA engine */ 2606 #ifdef AR_DEBUG 2607 if (ath_debug & ATH_DEBUG_RESET) { 2608 struct ath_buf *bf; 2609 2610 printf("%s: rx queue %p, link %p\n", __func__, 2611 (caddr_t)(u_intptr_t)ath_hal_get_rx_buf(ah), sc->sc_rxlink); 2612 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 2613 struct ath_desc *ds = bf->bf_desc; 2614 if (ath_hal_proc_rx_desc(ah, ds, bf->bf_daddr, 2615 PA2DESC(sc, ds->ds_link)) == HAL_OK) 2616 ath_printrxbuf(bf, 1); 2617 } 2618 } 2619 #endif 2620 sc->sc_rxlink = NULL; /* just in case */ 2621 #undef PA2DESC 2622 } 2623 2624 /* 2625 * Enable the receive h/w following a reset. 2626 */ 2627 int 2628 ath_startrecv(struct ath_softc *sc) 2629 { 2630 struct ath_hal *ah = sc->sc_ah; 2631 struct ath_buf *bf; 2632 2633 sc->sc_rxlink = NULL; 2634 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 2635 int error = ath_rxbuf_init(sc, bf); 2636 if (error != 0) { 2637 DPRINTF(ATH_DEBUG_RECV, 2638 ("%s: ath_rxbuf_init failed %d\n", 2639 __func__, error)); 2640 return error; 2641 } 2642 } 2643 2644 bf = TAILQ_FIRST(&sc->sc_rxbuf); 2645 ath_hal_put_rx_buf(ah, bf->bf_daddr); 2646 ath_hal_start_rx(ah); /* enable recv descriptors */ 2647 ath_mode_init(sc); /* set filters, etc. */ 2648 ath_hal_start_rx_pcu(ah); /* re-enable PCU/DMA engine */ 2649 return 0; 2650 } 2651 2652 /* 2653 * Set/change channels. If the channel is really being changed, 2654 * it's done by resetting the chip. To accomplish this we must 2655 * first cleanup any pending DMA, then restart stuff after a la 2656 * ath_init. 2657 */ 2658 int 2659 ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 2660 { 2661 struct ath_hal *ah = sc->sc_ah; 2662 struct ieee80211com *ic = &sc->sc_ic; 2663 struct ifnet *ifp = &ic->ic_if; 2664 2665 DPRINTF(ATH_DEBUG_ANY, ("%s: %u (%u MHz) -> %u (%u MHz)\n", __func__, 2666 ieee80211_chan2ieee(ic, ic->ic_ibss_chan), 2667 ic->ic_ibss_chan->ic_freq, 2668 ieee80211_chan2ieee(ic, chan), chan->ic_freq)); 2669 if (chan != ic->ic_ibss_chan) { 2670 HAL_STATUS status; 2671 HAL_CHANNEL hchan; 2672 enum ieee80211_phymode mode; 2673 2674 /* 2675 * To switch channels clear any pending DMA operations; 2676 * wait long enough for the RX fifo to drain, reset the 2677 * hardware at the new frequency, and then re-enable 2678 * the relevant bits of the h/w. 2679 */ 2680 ath_hal_set_intr(ah, 0); /* disable interrupts */ 2681 ath_draintxq(sc); /* clear pending tx frames */ 2682 ath_stoprecv(sc); /* turn off frame recv */ 2683 /* 2684 * Convert to a HAL channel description with 2685 * the flags constrained to reflect the current 2686 * operating mode. 2687 */ 2688 hchan.channel = chan->ic_freq; 2689 hchan.channelFlags = ath_chan2flags(ic, chan); 2690 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, AH_TRUE, 2691 &status)) { 2692 printf("%s: ath_chan_set: unable to reset " 2693 "channel %u (%u MHz)\n", ifp->if_xname, 2694 ieee80211_chan2ieee(ic, chan), chan->ic_freq); 2695 return EIO; 2696 } 2697 ath_set_slot_time(sc); 2698 /* 2699 * Re-enable rx framework. 2700 */ 2701 if (ath_startrecv(sc) != 0) { 2702 printf("%s: ath_chan_set: unable to restart recv " 2703 "logic\n", ifp->if_xname); 2704 return EIO; 2705 } 2706 2707 #if NBPFILTER > 0 2708 /* 2709 * Update BPF state. 2710 */ 2711 sc->sc_txtap.wt_chan_freq = sc->sc_rxtap.wr_chan_freq = 2712 htole16(chan->ic_freq); 2713 sc->sc_txtap.wt_chan_flags = sc->sc_rxtap.wr_chan_flags = 2714 htole16(chan->ic_flags); 2715 #endif 2716 2717 /* 2718 * Change channels and update the h/w rate map 2719 * if we're switching; e.g. 11a to 11b/g. 2720 */ 2721 ic->ic_ibss_chan = chan; 2722 mode = ieee80211_chan2mode(ic, chan); 2723 if (mode != sc->sc_curmode) 2724 ath_setcurmode(sc, mode); 2725 2726 /* 2727 * Re-enable interrupts. 2728 */ 2729 ath_hal_set_intr(ah, sc->sc_imask); 2730 } 2731 return 0; 2732 } 2733 2734 void 2735 ath_next_scan(void *arg) 2736 { 2737 struct ath_softc *sc = arg; 2738 struct ieee80211com *ic = &sc->sc_ic; 2739 struct ifnet *ifp = &ic->ic_if; 2740 int s; 2741 2742 /* don't call ath_start w/o network interrupts blocked */ 2743 s = splnet(); 2744 2745 if (ic->ic_state == IEEE80211_S_SCAN) 2746 ieee80211_next_scan(ifp); 2747 splx(s); 2748 } 2749 2750 int 2751 ath_set_slot_time(struct ath_softc *sc) 2752 { 2753 struct ath_hal *ah = sc->sc_ah; 2754 struct ieee80211com *ic = &sc->sc_ic; 2755 2756 if (ic->ic_flags & IEEE80211_F_SHSLOT) 2757 return (ath_hal_set_slot_time(ah, HAL_SLOT_TIME_9)); 2758 2759 return (0); 2760 } 2761 2762 /* 2763 * Periodically recalibrate the PHY to account 2764 * for temperature/environment changes. 2765 */ 2766 void 2767 ath_calibrate(void *arg) 2768 { 2769 struct ath_softc *sc = arg; 2770 struct ath_hal *ah = sc->sc_ah; 2771 struct ieee80211com *ic = &sc->sc_ic; 2772 struct ieee80211_channel *c; 2773 HAL_CHANNEL hchan; 2774 int s; 2775 2776 sc->sc_stats.ast_per_cal++; 2777 2778 /* 2779 * Convert to a HAL channel description with the flags 2780 * constrained to reflect the current operating mode. 2781 */ 2782 c = ic->ic_ibss_chan; 2783 hchan.channel = c->ic_freq; 2784 hchan.channelFlags = ath_chan2flags(ic, c); 2785 2786 s = splnet(); 2787 DPRINTF(ATH_DEBUG_CALIBRATE, 2788 ("%s: channel %u/%x\n", __func__, c->ic_freq, c->ic_flags)); 2789 2790 if (ath_hal_get_rf_gain(ah) == HAL_RFGAIN_NEED_CHANGE) { 2791 /* 2792 * Rfgain is out of bounds, reset the chip 2793 * to load new gain values. 2794 */ 2795 sc->sc_stats.ast_per_rfgain++; 2796 ath_reset(sc, 1); 2797 } 2798 if (!ath_hal_calibrate(ah, &hchan)) { 2799 DPRINTF(ATH_DEBUG_ANY, 2800 ("%s: calibration of channel %u failed\n", 2801 __func__, c->ic_freq)); 2802 sc->sc_stats.ast_per_calfail++; 2803 } 2804 timeout_add_sec(&sc->sc_cal_to, ath_calinterval); 2805 splx(s); 2806 } 2807 2808 void 2809 ath_ledstate(struct ath_softc *sc, enum ieee80211_state state) 2810 { 2811 HAL_LED_STATE led = HAL_LED_INIT; 2812 u_int32_t softled = AR5K_SOFTLED_OFF; 2813 2814 switch (state) { 2815 case IEEE80211_S_INIT: 2816 break; 2817 case IEEE80211_S_SCAN: 2818 led = HAL_LED_SCAN; 2819 break; 2820 case IEEE80211_S_AUTH: 2821 led = HAL_LED_AUTH; 2822 break; 2823 case IEEE80211_S_ASSOC: 2824 led = HAL_LED_ASSOC; 2825 softled = AR5K_SOFTLED_ON; 2826 break; 2827 case IEEE80211_S_RUN: 2828 led = HAL_LED_RUN; 2829 softled = AR5K_SOFTLED_ON; 2830 break; 2831 } 2832 2833 ath_hal_set_ledstate(sc->sc_ah, led); 2834 if (sc->sc_softled) { 2835 ath_hal_set_gpio_output(sc->sc_ah, AR5K_SOFTLED_PIN); 2836 ath_hal_set_gpio(sc->sc_ah, AR5K_SOFTLED_PIN, softled); 2837 } 2838 } 2839 2840 int 2841 ath_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg) 2842 { 2843 struct ifnet *ifp = &ic->ic_if; 2844 struct ath_softc *sc = ifp->if_softc; 2845 struct ath_hal *ah = sc->sc_ah; 2846 struct ieee80211_node *ni; 2847 const u_int8_t *bssid; 2848 int error, i; 2849 2850 u_int32_t rfilt; 2851 2852 DPRINTF(ATH_DEBUG_ANY, ("%s: %s -> %s\n", __func__, 2853 ieee80211_state_name[ic->ic_state], 2854 ieee80211_state_name[nstate])); 2855 2856 timeout_del(&sc->sc_scan_to); 2857 timeout_del(&sc->sc_cal_to); 2858 ath_ledstate(sc, nstate); 2859 2860 if (nstate == IEEE80211_S_INIT) { 2861 timeout_del(&sc->sc_rssadapt_to); 2862 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 2863 ath_hal_set_intr(ah, sc->sc_imask); 2864 return (*sc->sc_newstate)(ic, nstate, arg); 2865 } 2866 ni = ic->ic_bss; 2867 error = ath_chan_set(sc, ni->ni_chan); 2868 if (error != 0) 2869 goto bad; 2870 rfilt = ath_calcrxfilter(sc); 2871 if (nstate == IEEE80211_S_SCAN || 2872 ic->ic_opmode == IEEE80211_M_MONITOR) { 2873 bssid = sc->sc_broadcast_addr; 2874 } else { 2875 bssid = ni->ni_bssid; 2876 } 2877 ath_hal_set_rx_filter(ah, rfilt); 2878 DPRINTF(ATH_DEBUG_ANY, ("%s: RX filter 0x%x bssid %s\n", 2879 __func__, rfilt, ether_sprintf((u_char*)bssid))); 2880 2881 if (nstate == IEEE80211_S_RUN && ic->ic_opmode == IEEE80211_M_STA) { 2882 ath_hal_set_associd(ah, bssid, ni->ni_associd); 2883 } else { 2884 ath_hal_set_associd(ah, bssid, 0); 2885 } 2886 2887 if (!ath_softcrypto && (ic->ic_flags & IEEE80211_F_WEPON)) { 2888 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 2889 if (ath_hal_is_key_valid(ah, i)) 2890 ath_hal_set_key_lladdr(ah, i, bssid); 2891 } 2892 } 2893 2894 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 2895 /* nothing to do */ 2896 } else if (nstate == IEEE80211_S_RUN) { 2897 DPRINTF(ATH_DEBUG_ANY, ("%s(RUN): " 2898 "ic_flags=0x%08x iv=%d bssid=%s " 2899 "capinfo=0x%04x chan=%d\n", 2900 __func__, 2901 ic->ic_flags, 2902 ni->ni_intval, 2903 ether_sprintf(ni->ni_bssid), 2904 ni->ni_capinfo, 2905 ieee80211_chan2ieee(ic, ni->ni_chan))); 2906 2907 /* 2908 * Allocate and setup the beacon frame for AP or adhoc mode. 2909 */ 2910 #ifndef IEEE80211_STA_ONLY 2911 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 2912 ic->ic_opmode == IEEE80211_M_IBSS) { 2913 error = ath_beacon_alloc(sc, ni); 2914 if (error != 0) 2915 goto bad; 2916 } 2917 #endif 2918 /* 2919 * Configure the beacon and sleep timers. 2920 */ 2921 ath_beacon_config(sc); 2922 } else { 2923 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 2924 ath_hal_set_intr(ah, sc->sc_imask); 2925 } 2926 2927 /* 2928 * Invoke the parent method to complete the work. 2929 */ 2930 error = (*sc->sc_newstate)(ic, nstate, arg); 2931 2932 if (nstate == IEEE80211_S_RUN) { 2933 /* start periodic recalibration timer */ 2934 timeout_add_sec(&sc->sc_cal_to, ath_calinterval); 2935 2936 if (ic->ic_opmode != IEEE80211_M_MONITOR) 2937 timeout_add_msec(&sc->sc_rssadapt_to, 100); 2938 } else if (nstate == IEEE80211_S_SCAN) { 2939 /* start ap/neighbor scan timer */ 2940 timeout_add_msec(&sc->sc_scan_to, ath_dwelltime); 2941 } 2942 bad: 2943 return error; 2944 } 2945 2946 #ifndef IEEE80211_STA_ONLY 2947 void 2948 ath_recv_mgmt(struct ieee80211com *ic, struct mbuf *m, 2949 struct ieee80211_node *ni, struct ieee80211_rxinfo *rxi, int subtype) 2950 { 2951 struct ath_softc *sc = (struct ath_softc*)ic->ic_softc; 2952 struct ath_hal *ah = sc->sc_ah; 2953 2954 (*sc->sc_recv_mgmt)(ic, m, ni, rxi, subtype); 2955 2956 switch (subtype) { 2957 case IEEE80211_FC0_SUBTYPE_PROBE_RESP: 2958 case IEEE80211_FC0_SUBTYPE_BEACON: 2959 if (ic->ic_opmode != IEEE80211_M_IBSS || 2960 ic->ic_state != IEEE80211_S_RUN) 2961 break; 2962 if (ieee80211_ibss_merge(ic, ni, ath_hal_get_tsf64(ah)) == 2963 ENETRESET) 2964 ath_hal_set_associd(ah, ic->ic_bss->ni_bssid, 0); 2965 break; 2966 default: 2967 break; 2968 } 2969 return; 2970 } 2971 #endif 2972 2973 /* 2974 * Setup driver-specific state for a newly associated node. 2975 * Note that we're called also on a re-associate, the isnew 2976 * param tells us if this is the first time or not. 2977 */ 2978 void 2979 ath_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, int isnew) 2980 { 2981 if (ic->ic_opmode == IEEE80211_M_MONITOR) 2982 return; 2983 } 2984 2985 int 2986 ath_getchannels(struct ath_softc *sc, HAL_BOOL outdoor, HAL_BOOL xchanmode) 2987 { 2988 struct ieee80211com *ic = &sc->sc_ic; 2989 struct ifnet *ifp = &ic->ic_if; 2990 struct ath_hal *ah = sc->sc_ah; 2991 HAL_CHANNEL *chans; 2992 int i, ix, nchan; 2993 2994 sc->sc_nchan = 0; 2995 chans = malloc(IEEE80211_CHAN_MAX * sizeof(HAL_CHANNEL), 2996 M_TEMP, M_NOWAIT); 2997 if (chans == NULL) { 2998 printf("%s: unable to allocate channel table\n", ifp->if_xname); 2999 return ENOMEM; 3000 } 3001 if (!ath_hal_init_channels(ah, chans, IEEE80211_CHAN_MAX, &nchan, 3002 HAL_MODE_ALL, outdoor, xchanmode)) { 3003 printf("%s: unable to collect channel list from hal\n", 3004 ifp->if_xname); 3005 free(chans, M_TEMP); 3006 return EINVAL; 3007 } 3008 3009 /* 3010 * Convert HAL channels to ieee80211 ones and insert 3011 * them in the table according to their channel number. 3012 */ 3013 for (i = 0; i < nchan; i++) { 3014 HAL_CHANNEL *c = &chans[i]; 3015 ix = ieee80211_mhz2ieee(c->channel, c->channelFlags); 3016 if (ix > IEEE80211_CHAN_MAX) { 3017 printf("%s: bad hal channel %u (%u/%x) ignored\n", 3018 ifp->if_xname, ix, c->channel, c->channelFlags); 3019 continue; 3020 } 3021 DPRINTF(ATH_DEBUG_ANY, 3022 ("%s: HAL channel %d/%d freq %d flags %#04x idx %d\n", 3023 sc->sc_dev.dv_xname, i, nchan, c->channel, c->channelFlags, 3024 ix)); 3025 /* NB: flags are known to be compatible */ 3026 if (ic->ic_channels[ix].ic_freq == 0) { 3027 ic->ic_channels[ix].ic_freq = c->channel; 3028 ic->ic_channels[ix].ic_flags = c->channelFlags; 3029 } else { 3030 /* channels overlap; e.g. 11g and 11b */ 3031 ic->ic_channels[ix].ic_flags |= c->channelFlags; 3032 } 3033 /* count valid channels */ 3034 sc->sc_nchan++; 3035 } 3036 free(chans, M_TEMP); 3037 3038 if (sc->sc_nchan < 1) { 3039 printf("%s: no valid channels for regdomain %s(%u)\n", 3040 ifp->if_xname, ieee80211_regdomain2name(ah->ah_regdomain), 3041 ah->ah_regdomain); 3042 return ENOENT; 3043 } 3044 3045 /* set an initial channel */ 3046 ic->ic_ibss_chan = &ic->ic_channels[0]; 3047 3048 return 0; 3049 } 3050 3051 int 3052 ath_rate_setup(struct ath_softc *sc, u_int mode) 3053 { 3054 struct ath_hal *ah = sc->sc_ah; 3055 struct ieee80211com *ic = &sc->sc_ic; 3056 const HAL_RATE_TABLE *rt; 3057 struct ieee80211_rateset *rs; 3058 int i, maxrates; 3059 3060 switch (mode) { 3061 case IEEE80211_MODE_11A: 3062 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_11A); 3063 break; 3064 case IEEE80211_MODE_11B: 3065 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_11B); 3066 break; 3067 case IEEE80211_MODE_11G: 3068 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_11G); 3069 break; 3070 case IEEE80211_MODE_TURBO: 3071 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_TURBO); 3072 break; 3073 default: 3074 DPRINTF(ATH_DEBUG_ANY, 3075 ("%s: invalid mode %u\n", __func__, mode)); 3076 return 0; 3077 } 3078 rt = sc->sc_rates[mode]; 3079 if (rt == NULL) 3080 return 0; 3081 if (rt->rateCount > IEEE80211_RATE_MAXSIZE) { 3082 DPRINTF(ATH_DEBUG_ANY, 3083 ("%s: rate table too small (%u > %u)\n", 3084 __func__, rt->rateCount, IEEE80211_RATE_MAXSIZE)); 3085 maxrates = IEEE80211_RATE_MAXSIZE; 3086 } else { 3087 maxrates = rt->rateCount; 3088 } 3089 rs = &ic->ic_sup_rates[mode]; 3090 for (i = 0; i < maxrates; i++) 3091 rs->rs_rates[i] = rt->info[i].dot11Rate; 3092 rs->rs_nrates = maxrates; 3093 return 1; 3094 } 3095 3096 void 3097 ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 3098 { 3099 const HAL_RATE_TABLE *rt; 3100 int i; 3101 3102 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 3103 rt = sc->sc_rates[mode]; 3104 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 3105 for (i = 0; i < rt->rateCount; i++) 3106 sc->sc_rixmap[rt->info[i].dot11Rate & IEEE80211_RATE_VAL] = i; 3107 bzero(sc->sc_hwmap, sizeof(sc->sc_hwmap)); 3108 for (i = 0; i < 32; i++) 3109 sc->sc_hwmap[i] = rt->info[rt->rateCodeToIndex[i]].dot11Rate; 3110 sc->sc_currates = rt; 3111 sc->sc_curmode = mode; 3112 } 3113 3114 void 3115 ath_rssadapt_updatenode(void *arg, struct ieee80211_node *ni) 3116 { 3117 struct ath_node *an = ATH_NODE(ni); 3118 3119 ieee80211_rssadapt_updatestats(&an->an_rssadapt); 3120 } 3121 3122 void 3123 ath_rssadapt_updatestats(void *arg) 3124 { 3125 struct ath_softc *sc = (struct ath_softc *)arg; 3126 struct ieee80211com *ic = &sc->sc_ic; 3127 3128 if (ic->ic_opmode == IEEE80211_M_STA) { 3129 ath_rssadapt_updatenode(arg, ic->ic_bss); 3130 } else { 3131 ieee80211_iterate_nodes(ic, ath_rssadapt_updatenode, arg); 3132 } 3133 3134 timeout_add_msec(&sc->sc_rssadapt_to, 100); 3135 } 3136 3137 #ifdef AR_DEBUG 3138 void 3139 ath_printrxbuf(struct ath_buf *bf, int done) 3140 { 3141 struct ath_desc *ds; 3142 int i; 3143 3144 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { 3145 printf("R%d (%p %p) %08x %08x %08x %08x %08x %08x %c\n", 3146 i, ds, (struct ath_desc *)bf->bf_daddr + i, 3147 ds->ds_link, ds->ds_data, 3148 ds->ds_ctl0, ds->ds_ctl1, 3149 ds->ds_hw[0], ds->ds_hw[1], 3150 !done ? ' ' : (ds->ds_rxstat.rs_status == 0) ? '*' : '!'); 3151 } 3152 } 3153 3154 void 3155 ath_printtxbuf(struct ath_buf *bf, int done) 3156 { 3157 struct ath_desc *ds; 3158 int i; 3159 3160 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { 3161 printf("T%d (%p %p) " 3162 "%08x %08x %08x %08x %08x %08x %08x %08x %c\n", 3163 i, ds, (struct ath_desc *)bf->bf_daddr + i, 3164 ds->ds_link, ds->ds_data, 3165 ds->ds_ctl0, ds->ds_ctl1, 3166 ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3], 3167 !done ? ' ' : (ds->ds_txstat.ts_status == 0) ? '*' : '!'); 3168 } 3169 } 3170 #endif /* AR_DEBUG */ 3171 3172 int 3173 ath_gpio_attach(struct ath_softc *sc, u_int16_t devid) 3174 { 3175 struct ath_hal *ah = sc->sc_ah; 3176 struct gpiobus_attach_args gba; 3177 int i; 3178 3179 if (ah->ah_gpio_npins < 1) 3180 return 0; 3181 3182 /* Initialize gpio pins array */ 3183 for (i = 0; i < ah->ah_gpio_npins && i < AR5K_MAX_GPIO; i++) { 3184 sc->sc_gpio_pins[i].pin_num = i; 3185 sc->sc_gpio_pins[i].pin_caps = GPIO_PIN_INPUT | 3186 GPIO_PIN_OUTPUT; 3187 3188 /* Set pin mode to input */ 3189 ath_hal_set_gpio_input(ah, i); 3190 sc->sc_gpio_pins[i].pin_flags = GPIO_PIN_INPUT; 3191 3192 /* Get pin input */ 3193 sc->sc_gpio_pins[i].pin_state = ath_hal_get_gpio(ah, i) ? 3194 GPIO_PIN_HIGH : GPIO_PIN_LOW; 3195 } 3196 3197 /* Enable GPIO-controlled software LED if available */ 3198 if ((ah->ah_version == AR5K_AR5211) || 3199 (devid == PCI_PRODUCT_ATHEROS_AR5212_IBM)) { 3200 sc->sc_softled = 1; 3201 ath_hal_set_gpio_output(ah, AR5K_SOFTLED_PIN); 3202 ath_hal_set_gpio(ah, AR5K_SOFTLED_PIN, AR5K_SOFTLED_OFF); 3203 } 3204 3205 /* Create gpio controller tag */ 3206 sc->sc_gpio_gc.gp_cookie = sc; 3207 sc->sc_gpio_gc.gp_pin_read = ath_gpio_pin_read; 3208 sc->sc_gpio_gc.gp_pin_write = ath_gpio_pin_write; 3209 sc->sc_gpio_gc.gp_pin_ctl = ath_gpio_pin_ctl; 3210 3211 gba.gba_name = "gpio"; 3212 gba.gba_gc = &sc->sc_gpio_gc; 3213 gba.gba_pins = sc->sc_gpio_pins; 3214 gba.gba_npins = ah->ah_gpio_npins; 3215 3216 #ifdef notyet 3217 #if NGPIO > 0 3218 if (config_found(&sc->sc_dev, &gba, gpiobus_print) == NULL) 3219 return (ENODEV); 3220 #endif 3221 #endif 3222 3223 return (0); 3224 } 3225 3226 int 3227 ath_gpio_pin_read(void *arg, int pin) 3228 { 3229 struct ath_softc *sc = arg; 3230 struct ath_hal *ah = sc->sc_ah; 3231 return (ath_hal_get_gpio(ah, pin) ? GPIO_PIN_HIGH : GPIO_PIN_LOW); 3232 } 3233 3234 void 3235 ath_gpio_pin_write(void *arg, int pin, int value) 3236 { 3237 struct ath_softc *sc = arg; 3238 struct ath_hal *ah = sc->sc_ah; 3239 ath_hal_set_gpio(ah, pin, value ? GPIO_PIN_HIGH : GPIO_PIN_LOW); 3240 } 3241 3242 void 3243 ath_gpio_pin_ctl(void *arg, int pin, int flags) 3244 { 3245 struct ath_softc *sc = arg; 3246 struct ath_hal *ah = sc->sc_ah; 3247 3248 if (flags & GPIO_PIN_INPUT) { 3249 ath_hal_set_gpio_input(ah, pin); 3250 } else if (flags & GPIO_PIN_OUTPUT) { 3251 ath_hal_set_gpio_output(ah, pin); 3252 } 3253 } 3254