1 /* $OpenBSD: ath.c,v 1.112 2017/01/22 10:17:37 dlg Exp $ */ 2 /* $NetBSD: ath.c,v 1.37 2004/08/18 21:59:39 dyoung Exp $ */ 3 4 /*- 5 * Copyright (c) 2002-2004 Sam Leffler, Errno Consulting 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer, 13 * without modification. 14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 15 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 16 * redistribution must be conditioned upon including a substantially 17 * similar Disclaimer requirement for further binary redistribution. 18 * 3. Neither the names of the above-listed copyright holders nor the names 19 * of any contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 26 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 27 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 28 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 31 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGES. 34 */ 35 36 /* 37 * Driver for the Atheros Wireless LAN controller. 38 * 39 * This software is derived from work of Atsushi Onoe; his contribution 40 * is greatly appreciated. It has been modified for OpenBSD to use an 41 * open source HAL instead of the original binary-only HAL. 42 */ 43 44 #include "bpfilter.h" 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/mbuf.h> 49 #include <sys/malloc.h> 50 #include <sys/lock.h> 51 #include <sys/kernel.h> 52 #include <sys/socket.h> 53 #include <sys/sockio.h> 54 #include <sys/device.h> 55 #include <sys/errno.h> 56 #include <sys/timeout.h> 57 #include <sys/gpio.h> 58 #include <sys/endian.h> 59 60 #include <machine/bus.h> 61 62 #include <net/if.h> 63 #include <net/if_dl.h> 64 #include <net/if_media.h> 65 #if NBPFILTER > 0 66 #include <net/bpf.h> 67 #endif 68 #include <netinet/in.h> 69 #include <netinet/if_ether.h> 70 71 #include <net80211/ieee80211_var.h> 72 #include <net80211/ieee80211_rssadapt.h> 73 74 #include <dev/pci/pcidevs.h> 75 #include <dev/gpio/gpiovar.h> 76 77 #include <dev/ic/athvar.h> 78 79 int ath_init(struct ifnet *); 80 int ath_init1(struct ath_softc *); 81 int ath_intr1(struct ath_softc *); 82 void ath_stop(struct ifnet *); 83 void ath_start(struct ifnet *); 84 void ath_reset(struct ath_softc *, int); 85 int ath_media_change(struct ifnet *); 86 void ath_watchdog(struct ifnet *); 87 int ath_ioctl(struct ifnet *, u_long, caddr_t); 88 void ath_fatal_proc(void *, int); 89 void ath_rxorn_proc(void *, int); 90 void ath_bmiss_proc(void *, int); 91 u_int ath_chan2flags(struct ieee80211com *, struct ieee80211_channel *); 92 int ath_initkeytable(struct ath_softc *); 93 void ath_mcastfilter_accum(caddr_t, u_int32_t (*)[2]); 94 void ath_mcastfilter_compute(struct ath_softc *, u_int32_t (*)[2]); 95 u_int32_t ath_calcrxfilter(struct ath_softc *); 96 void ath_mode_init(struct ath_softc *); 97 #ifndef IEEE80211_STA_ONLY 98 int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *); 99 void ath_beacon_proc(void *, int); 100 void ath_beacon_free(struct ath_softc *); 101 #endif 102 void ath_beacon_config(struct ath_softc *); 103 int ath_desc_alloc(struct ath_softc *); 104 void ath_desc_free(struct ath_softc *); 105 struct ieee80211_node *ath_node_alloc(struct ieee80211com *); 106 struct mbuf *ath_getmbuf(int, int, u_int); 107 void ath_node_free(struct ieee80211com *, struct ieee80211_node *); 108 void ath_node_copy(struct ieee80211com *, 109 struct ieee80211_node *, const struct ieee80211_node *); 110 u_int8_t ath_node_getrssi(struct ieee80211com *, 111 const struct ieee80211_node *); 112 int ath_rxbuf_init(struct ath_softc *, struct ath_buf *); 113 void ath_rx_proc(void *, int); 114 int ath_tx_start(struct ath_softc *, struct ieee80211_node *, 115 struct ath_buf *, struct mbuf *); 116 void ath_tx_proc(void *, int); 117 int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 118 void ath_draintxq(struct ath_softc *); 119 void ath_stoprecv(struct ath_softc *); 120 int ath_startrecv(struct ath_softc *); 121 void ath_next_scan(void *); 122 int ath_set_slot_time(struct ath_softc *); 123 void ath_calibrate(void *); 124 void ath_ledstate(struct ath_softc *, enum ieee80211_state); 125 int ath_newstate(struct ieee80211com *, enum ieee80211_state, int); 126 void ath_newassoc(struct ieee80211com *, 127 struct ieee80211_node *, int); 128 int ath_getchannels(struct ath_softc *, HAL_BOOL outdoor, 129 HAL_BOOL xchanmode); 130 int ath_rate_setup(struct ath_softc *sc, u_int mode); 131 void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 132 void ath_rssadapt_updatenode(void *, struct ieee80211_node *); 133 void ath_rssadapt_updatestats(void *); 134 #ifndef IEEE80211_STA_ONLY 135 void ath_recv_mgmt(struct ieee80211com *, struct mbuf *, 136 struct ieee80211_node *, struct ieee80211_rxinfo *, int); 137 #endif 138 void ath_disable(struct ath_softc *); 139 140 int ath_gpio_attach(struct ath_softc *, u_int16_t); 141 int ath_gpio_pin_read(void *, int); 142 void ath_gpio_pin_write(void *, int, int); 143 void ath_gpio_pin_ctl(void *, int, int); 144 145 #ifdef AR_DEBUG 146 void ath_printrxbuf(struct ath_buf *, int); 147 void ath_printtxbuf(struct ath_buf *, int); 148 int ath_debug = 0; 149 #endif 150 151 int ath_dwelltime = 200; /* 5 channels/second */ 152 int ath_calinterval = 30; /* calibrate every 30 secs */ 153 int ath_outdoor = AH_TRUE; /* outdoor operation */ 154 int ath_xchanmode = AH_TRUE; /* enable extended channels */ 155 int ath_softcrypto = 1; /* 1=enable software crypto */ 156 157 struct cfdriver ath_cd = { 158 NULL, "ath", DV_IFNET 159 }; 160 161 int 162 ath_activate(struct device *self, int act) 163 { 164 struct ath_softc *sc = (struct ath_softc *)self; 165 struct ifnet *ifp = &sc->sc_ic.ic_if; 166 167 switch (act) { 168 case DVACT_SUSPEND: 169 if (ifp->if_flags & IFF_RUNNING) { 170 ath_stop(ifp); 171 if (sc->sc_power != NULL) 172 (*sc->sc_power)(sc, act); 173 } 174 break; 175 case DVACT_RESUME: 176 if (ifp->if_flags & IFF_UP) { 177 ath_init(ifp); 178 if (ifp->if_flags & IFF_RUNNING) 179 ath_start(ifp); 180 } 181 break; 182 } 183 return 0; 184 } 185 186 int 187 ath_enable(struct ath_softc *sc) 188 { 189 if (ATH_IS_ENABLED(sc) == 0) { 190 if (sc->sc_enable != NULL && (*sc->sc_enable)(sc) != 0) { 191 printf("%s: device enable failed\n", 192 sc->sc_dev.dv_xname); 193 return (EIO); 194 } 195 sc->sc_flags |= ATH_ENABLED; 196 } 197 return (0); 198 } 199 200 void 201 ath_disable(struct ath_softc *sc) 202 { 203 if (!ATH_IS_ENABLED(sc)) 204 return; 205 if (sc->sc_disable != NULL) 206 (*sc->sc_disable)(sc); 207 sc->sc_flags &= ~ATH_ENABLED; 208 } 209 210 int 211 ath_attach(u_int16_t devid, struct ath_softc *sc) 212 { 213 struct ieee80211com *ic = &sc->sc_ic; 214 struct ifnet *ifp = &ic->ic_if; 215 struct ath_hal *ah; 216 HAL_STATUS status; 217 HAL_TXQ_INFO qinfo; 218 int error = 0, i; 219 220 DPRINTF(ATH_DEBUG_ANY, ("%s: devid 0x%x\n", __func__, devid)); 221 222 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 223 sc->sc_flags &= ~ATH_ATTACHED; /* make sure that it's not attached */ 224 225 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, 226 sc->sc_pcie, &status); 227 if (ah == NULL) { 228 printf("%s: unable to attach hardware; HAL status %d\n", 229 ifp->if_xname, status); 230 error = ENXIO; 231 goto bad; 232 } 233 if (ah->ah_abi != HAL_ABI_VERSION) { 234 printf("%s: HAL ABI mismatch detected (0x%x != 0x%x)\n", 235 ifp->if_xname, ah->ah_abi, HAL_ABI_VERSION); 236 error = ENXIO; 237 goto bad; 238 } 239 240 if (ah->ah_single_chip == AH_TRUE) { 241 printf("%s: AR%s %u.%u phy %u.%u rf %u.%u", ifp->if_xname, 242 ar5k_printver(AR5K_VERSION_DEV, devid), 243 ah->ah_mac_version, ah->ah_mac_revision, 244 ah->ah_phy_revision >> 4, ah->ah_phy_revision & 0xf, 245 ah->ah_radio_5ghz_revision >> 4, 246 ah->ah_radio_5ghz_revision & 0xf); 247 } else { 248 printf("%s: AR%s %u.%u phy %u.%u", ifp->if_xname, 249 ar5k_printver(AR5K_VERSION_VER, ah->ah_mac_srev), 250 ah->ah_mac_version, ah->ah_mac_revision, 251 ah->ah_phy_revision >> 4, ah->ah_phy_revision & 0xf); 252 printf(" rf%s %u.%u", 253 ar5k_printver(AR5K_VERSION_RAD, ah->ah_radio_5ghz_revision), 254 ah->ah_radio_5ghz_revision >> 4, 255 ah->ah_radio_5ghz_revision & 0xf); 256 if (ah->ah_radio_2ghz_revision != 0) { 257 printf(" rf%s %u.%u", 258 ar5k_printver(AR5K_VERSION_RAD, 259 ah->ah_radio_2ghz_revision), 260 ah->ah_radio_2ghz_revision >> 4, 261 ah->ah_radio_2ghz_revision & 0xf); 262 } 263 } 264 if (ah->ah_ee_version == AR5K_EEPROM_VERSION_4_7) 265 printf(" eeprom 4.7"); 266 else 267 printf(" eeprom %1x.%1x", ah->ah_ee_version >> 12, 268 ah->ah_ee_version & 0xff); 269 270 #if 0 271 if (ah->ah_radio_5ghz_revision >= AR5K_SREV_RAD_UNSUPP || 272 ah->ah_radio_2ghz_revision >= AR5K_SREV_RAD_UNSUPP) { 273 printf(": RF radio not supported\n"); 274 error = EOPNOTSUPP; 275 goto bad; 276 } 277 #endif 278 279 sc->sc_ah = ah; 280 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 281 282 /* 283 * Get regulation domain either stored in the EEPROM or defined 284 * as the default value. Some devices are known to have broken 285 * regulation domain values in their EEPROM. 286 */ 287 ath_hal_get_regdomain(ah, &ah->ah_regdomain); 288 289 /* 290 * Construct channel list based on the current regulation domain. 291 */ 292 error = ath_getchannels(sc, ath_outdoor, ath_xchanmode); 293 if (error != 0) 294 goto bad; 295 296 /* 297 * Setup rate tables for all potential media types. 298 */ 299 ath_rate_setup(sc, IEEE80211_MODE_11A); 300 ath_rate_setup(sc, IEEE80211_MODE_11B); 301 ath_rate_setup(sc, IEEE80211_MODE_11G); 302 303 error = ath_desc_alloc(sc); 304 if (error != 0) { 305 printf(": failed to allocate descriptors: %d\n", error); 306 goto bad; 307 } 308 timeout_set(&sc->sc_scan_to, ath_next_scan, sc); 309 timeout_set(&sc->sc_cal_to, ath_calibrate, sc); 310 timeout_set(&sc->sc_rssadapt_to, ath_rssadapt_updatestats, sc); 311 312 #ifdef __FreeBSD__ 313 ATH_TXBUF_LOCK_INIT(sc); 314 ATH_TXQ_LOCK_INIT(sc); 315 #endif 316 317 ATH_TASK_INIT(&sc->sc_txtask, ath_tx_proc, sc); 318 ATH_TASK_INIT(&sc->sc_rxtask, ath_rx_proc, sc); 319 ATH_TASK_INIT(&sc->sc_rxorntask, ath_rxorn_proc, sc); 320 ATH_TASK_INIT(&sc->sc_fataltask, ath_fatal_proc, sc); 321 ATH_TASK_INIT(&sc->sc_bmisstask, ath_bmiss_proc, sc); 322 #ifndef IEEE80211_STA_ONLY 323 ATH_TASK_INIT(&sc->sc_swbatask, ath_beacon_proc, sc); 324 #endif 325 326 /* 327 * For now just pre-allocate one data queue and one 328 * beacon queue. Note that the HAL handles resetting 329 * them at the needed time. Eventually we'll want to 330 * allocate more tx queues for splitting management 331 * frames and for QOS support. 332 */ 333 sc->sc_bhalq = ath_hal_setup_tx_queue(ah, HAL_TX_QUEUE_BEACON, NULL); 334 if (sc->sc_bhalq == (u_int) -1) { 335 printf(": unable to setup a beacon xmit queue!\n"); 336 goto bad2; 337 } 338 339 for (i = 0; i <= HAL_TX_QUEUE_ID_DATA_MAX; i++) { 340 bzero(&qinfo, sizeof(qinfo)); 341 qinfo.tqi_type = HAL_TX_QUEUE_DATA; 342 qinfo.tqi_subtype = i; /* should be mapped to WME types */ 343 sc->sc_txhalq[i] = ath_hal_setup_tx_queue(ah, 344 HAL_TX_QUEUE_DATA, &qinfo); 345 if (sc->sc_txhalq[i] == (u_int) -1) { 346 printf(": unable to setup a data xmit queue %u!\n", i); 347 goto bad2; 348 } 349 } 350 351 ifp->if_softc = sc; 352 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 353 ifp->if_start = ath_start; 354 ifp->if_watchdog = ath_watchdog; 355 ifp->if_ioctl = ath_ioctl; 356 #ifndef __OpenBSD__ 357 ifp->if_stop = ath_stop; /* XXX */ 358 #endif 359 IFQ_SET_MAXLEN(&ifp->if_snd, ATH_TXBUF * ATH_TXDESC); 360 361 ic->ic_softc = sc; 362 ic->ic_newassoc = ath_newassoc; 363 /* XXX not right but it's not used anywhere important */ 364 ic->ic_phytype = IEEE80211_T_OFDM; 365 ic->ic_opmode = IEEE80211_M_STA; 366 ic->ic_caps = IEEE80211_C_WEP /* wep supported */ 367 | IEEE80211_C_PMGT /* power management */ 368 #ifndef IEEE80211_STA_ONLY 369 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 370 | IEEE80211_C_HOSTAP /* hostap mode */ 371 #endif 372 | IEEE80211_C_MONITOR /* monitor mode */ 373 | IEEE80211_C_SHSLOT /* short slot time supported */ 374 | IEEE80211_C_SHPREAMBLE; /* short preamble supported */ 375 if (ath_softcrypto) 376 ic->ic_caps |= IEEE80211_C_RSN; /* wpa/rsn supported */ 377 378 /* 379 * Not all chips have the VEOL support we want to use with 380 * IBSS beacon; check here for it. 381 */ 382 sc->sc_veol = ath_hal_has_veol(ah); 383 384 /* get mac address from hardware */ 385 ath_hal_get_lladdr(ah, ic->ic_myaddr); 386 387 if_attach(ifp); 388 389 /* call MI attach routine. */ 390 ieee80211_ifattach(ifp); 391 392 /* override default methods */ 393 ic->ic_node_alloc = ath_node_alloc; 394 sc->sc_node_free = ic->ic_node_free; 395 ic->ic_node_free = ath_node_free; 396 sc->sc_node_copy = ic->ic_node_copy; 397 ic->ic_node_copy = ath_node_copy; 398 ic->ic_node_getrssi = ath_node_getrssi; 399 sc->sc_newstate = ic->ic_newstate; 400 ic->ic_newstate = ath_newstate; 401 #ifndef IEEE80211_STA_ONLY 402 sc->sc_recv_mgmt = ic->ic_recv_mgmt; 403 ic->ic_recv_mgmt = ath_recv_mgmt; 404 #endif 405 ic->ic_max_rssi = AR5K_MAX_RSSI; 406 bcopy(etherbroadcastaddr, sc->sc_broadcast_addr, IEEE80211_ADDR_LEN); 407 408 /* complete initialization */ 409 ieee80211_media_init(ifp, ath_media_change, ieee80211_media_status); 410 411 #if NBPFILTER > 0 412 bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO, 413 sizeof(struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN); 414 415 sc->sc_rxtap_len = sizeof(sc->sc_rxtapu); 416 bzero(&sc->sc_rxtapu, sc->sc_rxtap_len); 417 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); 418 sc->sc_rxtap.wr_ihdr.it_present = htole32(ATH_RX_RADIOTAP_PRESENT); 419 420 sc->sc_txtap_len = sizeof(sc->sc_txtapu); 421 bzero(&sc->sc_txtapu, sc->sc_txtap_len); 422 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); 423 sc->sc_txtap.wt_ihdr.it_present = htole32(ATH_TX_RADIOTAP_PRESENT); 424 #endif 425 426 sc->sc_flags |= ATH_ATTACHED; 427 428 /* 429 * Print regulation domain and the mac address. The regulation domain 430 * will be marked with a * if the EEPROM value has been overwritten. 431 */ 432 printf(", %s%s, address %s\n", 433 ieee80211_regdomain2name(ah->ah_regdomain), 434 ah->ah_regdomain != ah->ah_regdomain_hw ? "*" : "", 435 ether_sprintf(ic->ic_myaddr)); 436 437 if (ath_gpio_attach(sc, devid) == 0) 438 sc->sc_flags |= ATH_GPIO; 439 440 return 0; 441 bad2: 442 ath_desc_free(sc); 443 bad: 444 if (ah) 445 ath_hal_detach(ah); 446 sc->sc_invalid = 1; 447 return error; 448 } 449 450 int 451 ath_detach(struct ath_softc *sc, int flags) 452 { 453 struct ifnet *ifp = &sc->sc_ic.ic_if; 454 int s; 455 456 if ((sc->sc_flags & ATH_ATTACHED) == 0) 457 return (0); 458 459 config_detach_children(&sc->sc_dev, flags); 460 461 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags %x\n", __func__, ifp->if_flags)); 462 463 timeout_del(&sc->sc_scan_to); 464 timeout_del(&sc->sc_cal_to); 465 timeout_del(&sc->sc_rssadapt_to); 466 467 s = splnet(); 468 ath_stop(ifp); 469 ath_desc_free(sc); 470 ath_hal_detach(sc->sc_ah); 471 472 ieee80211_ifdetach(ifp); 473 if_detach(ifp); 474 475 splx(s); 476 #ifdef __FreeBSD__ 477 ATH_TXBUF_LOCK_DESTROY(sc); 478 ATH_TXQ_LOCK_DESTROY(sc); 479 #endif 480 481 return 0; 482 } 483 484 int 485 ath_intr(void *arg) 486 { 487 return ath_intr1((struct ath_softc *)arg); 488 } 489 490 int 491 ath_intr1(struct ath_softc *sc) 492 { 493 struct ieee80211com *ic = &sc->sc_ic; 494 struct ifnet *ifp = &ic->ic_if; 495 struct ath_hal *ah = sc->sc_ah; 496 HAL_INT status; 497 498 if (sc->sc_invalid) { 499 /* 500 * The hardware is not ready/present, don't touch anything. 501 * Note this can happen early on if the IRQ is shared. 502 */ 503 DPRINTF(ATH_DEBUG_ANY, ("%s: invalid; ignored\n", __func__)); 504 return 0; 505 } 506 if (!ath_hal_is_intr_pending(ah)) /* shared irq, not for us */ 507 return 0; 508 if ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) != (IFF_RUNNING|IFF_UP)) { 509 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags 0x%x\n", 510 __func__, ifp->if_flags)); 511 ath_hal_get_isr(ah, &status); /* clear ISR */ 512 ath_hal_set_intr(ah, 0); /* disable further intr's */ 513 return 1; /* XXX */ 514 } 515 ath_hal_get_isr(ah, &status); /* NB: clears ISR too */ 516 DPRINTF(ATH_DEBUG_INTR, ("%s: status 0x%x\n", __func__, status)); 517 status &= sc->sc_imask; /* discard unasked for bits */ 518 if (status & HAL_INT_FATAL) { 519 sc->sc_stats.ast_hardware++; 520 ath_hal_set_intr(ah, 0); /* disable intr's until reset */ 521 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_fataltask); 522 } else if (status & HAL_INT_RXORN) { 523 sc->sc_stats.ast_rxorn++; 524 ath_hal_set_intr(ah, 0); /* disable intr's until reset */ 525 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_rxorntask); 526 } else if (status & HAL_INT_MIB) { 527 DPRINTF(ATH_DEBUG_INTR, 528 ("%s: resetting MIB counters\n", __func__)); 529 sc->sc_stats.ast_mib++; 530 ath_hal_update_mib_counters(ah, &sc->sc_mib_stats); 531 } else { 532 if (status & HAL_INT_RXEOL) { 533 /* 534 * NB: the hardware should re-read the link when 535 * RXE bit is written, but it doesn't work at 536 * least on older hardware revs. 537 */ 538 sc->sc_stats.ast_rxeol++; 539 sc->sc_rxlink = NULL; 540 } 541 if (status & HAL_INT_TXURN) { 542 sc->sc_stats.ast_txurn++; 543 /* bump tx trigger level */ 544 ath_hal_update_tx_triglevel(ah, AH_TRUE); 545 } 546 if (status & HAL_INT_RX) 547 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_rxtask); 548 if (status & HAL_INT_TX) 549 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_txtask); 550 if (status & HAL_INT_SWBA) 551 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_swbatask); 552 if (status & HAL_INT_BMISS) { 553 sc->sc_stats.ast_bmiss++; 554 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_bmisstask); 555 } 556 } 557 return 1; 558 } 559 560 void 561 ath_fatal_proc(void *arg, int pending) 562 { 563 struct ath_softc *sc = arg; 564 struct ieee80211com *ic = &sc->sc_ic; 565 struct ifnet *ifp = &ic->ic_if; 566 567 if (ifp->if_flags & IFF_DEBUG) 568 printf("%s: hardware error; resetting\n", ifp->if_xname); 569 ath_reset(sc, 1); 570 } 571 572 void 573 ath_rxorn_proc(void *arg, int pending) 574 { 575 struct ath_softc *sc = arg; 576 struct ieee80211com *ic = &sc->sc_ic; 577 struct ifnet *ifp = &ic->ic_if; 578 579 if (ifp->if_flags & IFF_DEBUG) 580 printf("%s: rx FIFO overrun; resetting\n", ifp->if_xname); 581 ath_reset(sc, 1); 582 } 583 584 void 585 ath_bmiss_proc(void *arg, int pending) 586 { 587 struct ath_softc *sc = arg; 588 struct ieee80211com *ic = &sc->sc_ic; 589 590 DPRINTF(ATH_DEBUG_ANY, ("%s: pending %u\n", __func__, pending)); 591 if (ic->ic_opmode != IEEE80211_M_STA) 592 return; 593 if (ic->ic_state == IEEE80211_S_RUN) { 594 /* 595 * Rather than go directly to scan state, try to 596 * reassociate first. If that fails then the state 597 * machine will drop us into scanning after timing 598 * out waiting for a probe response. 599 */ 600 ieee80211_new_state(ic, IEEE80211_S_ASSOC, -1); 601 } 602 } 603 604 u_int 605 ath_chan2flags(struct ieee80211com *ic, struct ieee80211_channel *chan) 606 { 607 enum ieee80211_phymode mode = ieee80211_chan2mode(ic, chan); 608 609 switch (mode) { 610 case IEEE80211_MODE_AUTO: 611 return 0; 612 case IEEE80211_MODE_11A: 613 return CHANNEL_A; 614 case IEEE80211_MODE_11B: 615 return CHANNEL_B; 616 case IEEE80211_MODE_11G: 617 return CHANNEL_G; 618 default: 619 panic("%s: unsupported mode %d", __func__, mode); 620 return 0; 621 } 622 } 623 624 int 625 ath_init(struct ifnet *ifp) 626 { 627 return ath_init1((struct ath_softc *)ifp->if_softc); 628 } 629 630 int 631 ath_init1(struct ath_softc *sc) 632 { 633 struct ieee80211com *ic = &sc->sc_ic; 634 struct ifnet *ifp = &ic->ic_if; 635 struct ieee80211_node *ni; 636 enum ieee80211_phymode mode; 637 struct ath_hal *ah = sc->sc_ah; 638 HAL_STATUS status; 639 HAL_CHANNEL hchan; 640 int error = 0, s; 641 642 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags 0x%x\n", 643 __func__, ifp->if_flags)); 644 645 if ((error = ath_enable(sc)) != 0) 646 return error; 647 648 s = splnet(); 649 /* 650 * Stop anything previously setup. This is safe 651 * whether this is the first time through or not. 652 */ 653 ath_stop(ifp); 654 655 /* 656 * Reset the link layer address to the latest value. 657 */ 658 IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl)); 659 ath_hal_set_lladdr(ah, ic->ic_myaddr); 660 661 /* 662 * The basic interface to setting the hardware in a good 663 * state is ``reset''. On return the hardware is known to 664 * be powered up and with interrupts disabled. This must 665 * be followed by initialization of the appropriate bits 666 * and then setup of the interrupt mask. 667 */ 668 hchan.channel = ic->ic_ibss_chan->ic_freq; 669 hchan.channelFlags = ath_chan2flags(ic, ic->ic_ibss_chan); 670 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, AH_TRUE, &status)) { 671 printf("%s: unable to reset hardware; hal status %u\n", 672 ifp->if_xname, status); 673 error = EIO; 674 goto done; 675 } 676 ath_set_slot_time(sc); 677 678 if ((error = ath_initkeytable(sc)) != 0) { 679 printf("%s: unable to reset the key cache\n", 680 ifp->if_xname); 681 goto done; 682 } 683 684 if ((error = ath_startrecv(sc)) != 0) { 685 printf("%s: unable to start recv logic\n", ifp->if_xname); 686 goto done; 687 } 688 689 /* 690 * Enable interrupts. 691 */ 692 sc->sc_imask = HAL_INT_RX | HAL_INT_TX 693 | HAL_INT_RXEOL | HAL_INT_RXORN 694 | HAL_INT_FATAL | HAL_INT_GLOBAL; 695 #ifndef IEEE80211_STA_ONLY 696 if (ic->ic_opmode == IEEE80211_M_HOSTAP) 697 sc->sc_imask |= HAL_INT_MIB; 698 #endif 699 ath_hal_set_intr(ah, sc->sc_imask); 700 701 ifp->if_flags |= IFF_RUNNING; 702 ic->ic_state = IEEE80211_S_INIT; 703 704 /* 705 * The hardware should be ready to go now so it's safe 706 * to kick the 802.11 state machine as it's likely to 707 * immediately call back to us to send mgmt frames. 708 */ 709 ni = ic->ic_bss; 710 ni->ni_chan = ic->ic_ibss_chan; 711 mode = ieee80211_chan2mode(ic, ni->ni_chan); 712 if (mode != sc->sc_curmode) 713 ath_setcurmode(sc, mode); 714 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 715 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1); 716 } else { 717 ieee80211_new_state(ic, IEEE80211_S_RUN, -1); 718 } 719 done: 720 splx(s); 721 return error; 722 } 723 724 void 725 ath_stop(struct ifnet *ifp) 726 { 727 struct ieee80211com *ic = (struct ieee80211com *) ifp; 728 struct ath_softc *sc = ifp->if_softc; 729 struct ath_hal *ah = sc->sc_ah; 730 int s; 731 732 DPRINTF(ATH_DEBUG_ANY, ("%s: invalid %u if_flags 0x%x\n", 733 __func__, sc->sc_invalid, ifp->if_flags)); 734 735 s = splnet(); 736 if (ifp->if_flags & IFF_RUNNING) { 737 /* 738 * Shutdown the hardware and driver: 739 * disable interrupts 740 * turn off timers 741 * clear transmit machinery 742 * clear receive machinery 743 * drain and release tx queues 744 * reclaim beacon resources 745 * reset 802.11 state machine 746 * power down hardware 747 * 748 * Note that some of this work is not possible if the 749 * hardware is gone (invalid). 750 */ 751 ifp->if_flags &= ~IFF_RUNNING; 752 ifp->if_timer = 0; 753 if (!sc->sc_invalid) 754 ath_hal_set_intr(ah, 0); 755 ath_draintxq(sc); 756 if (!sc->sc_invalid) { 757 ath_stoprecv(sc); 758 } else { 759 sc->sc_rxlink = NULL; 760 } 761 IFQ_PURGE(&ifp->if_snd); 762 #ifndef IEEE80211_STA_ONLY 763 ath_beacon_free(sc); 764 #endif 765 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 766 if (!sc->sc_invalid) { 767 ath_hal_set_power(ah, HAL_PM_FULL_SLEEP, 0); 768 } 769 ath_disable(sc); 770 } 771 splx(s); 772 } 773 774 /* 775 * Reset the hardware w/o losing operational state. This is 776 * basically a more efficient way of doing ath_stop, ath_init, 777 * followed by state transitions to the current 802.11 778 * operational state. Used to recover from errors rx overrun 779 * and to reset the hardware when rf gain settings must be reset. 780 */ 781 void 782 ath_reset(struct ath_softc *sc, int full) 783 { 784 struct ieee80211com *ic = &sc->sc_ic; 785 struct ifnet *ifp = &ic->ic_if; 786 struct ath_hal *ah = sc->sc_ah; 787 struct ieee80211_channel *c; 788 HAL_STATUS status; 789 HAL_CHANNEL hchan; 790 791 /* 792 * Convert to a HAL channel description with the flags 793 * constrained to reflect the current operating mode. 794 */ 795 c = ic->ic_ibss_chan; 796 hchan.channel = c->ic_freq; 797 hchan.channelFlags = ath_chan2flags(ic, c); 798 799 ath_hal_set_intr(ah, 0); /* disable interrupts */ 800 ath_draintxq(sc); /* stop xmit side */ 801 ath_stoprecv(sc); /* stop recv side */ 802 /* NB: indicate channel change so we do a full reset */ 803 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, 804 full ? AH_TRUE : AH_FALSE, &status)) { 805 printf("%s: %s: unable to reset hardware; hal status %u\n", 806 ifp->if_xname, __func__, status); 807 } 808 ath_set_slot_time(sc); 809 /* In case channel changed, save as a node channel */ 810 ic->ic_bss->ni_chan = ic->ic_ibss_chan; 811 ath_hal_set_intr(ah, sc->sc_imask); 812 if (ath_startrecv(sc) != 0) /* restart recv */ 813 printf("%s: %s: unable to start recv logic\n", ifp->if_xname, 814 __func__); 815 ath_start(ifp); /* restart xmit */ 816 if (ic->ic_state == IEEE80211_S_RUN) 817 ath_beacon_config(sc); /* restart beacons */ 818 } 819 820 void 821 ath_start(struct ifnet *ifp) 822 { 823 struct ath_softc *sc = ifp->if_softc; 824 struct ath_hal *ah = sc->sc_ah; 825 struct ieee80211com *ic = &sc->sc_ic; 826 struct ieee80211_node *ni; 827 struct ath_buf *bf; 828 struct mbuf *m; 829 struct ieee80211_frame *wh; 830 int s; 831 832 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd) || 833 sc->sc_invalid) 834 return; 835 for (;;) { 836 /* 837 * Grab a TX buffer and associated resources. 838 */ 839 s = splnet(); 840 bf = TAILQ_FIRST(&sc->sc_txbuf); 841 if (bf != NULL) 842 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list); 843 splx(s); 844 if (bf == NULL) { 845 DPRINTF(ATH_DEBUG_ANY, ("%s: out of xmit buffers\n", 846 __func__)); 847 sc->sc_stats.ast_tx_qstop++; 848 ifq_set_oactive(&ifp->if_snd); 849 break; 850 } 851 /* 852 * Poll the management queue for frames; they 853 * have priority over normal data frames. 854 */ 855 m = mq_dequeue(&ic->ic_mgtq); 856 if (m == NULL) { 857 /* 858 * No data frames go out unless we're associated. 859 */ 860 if (ic->ic_state != IEEE80211_S_RUN) { 861 DPRINTF(ATH_DEBUG_ANY, 862 ("%s: ignore data packet, state %u\n", 863 __func__, ic->ic_state)); 864 sc->sc_stats.ast_tx_discard++; 865 s = splnet(); 866 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 867 splx(s); 868 break; 869 } 870 IFQ_DEQUEUE(&ifp->if_snd, m); 871 if (m == NULL) { 872 s = splnet(); 873 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 874 splx(s); 875 break; 876 } 877 878 #if NBPFILTER > 0 879 if (ifp->if_bpf) 880 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 881 #endif 882 883 /* 884 * Encapsulate the packet in prep for transmission. 885 */ 886 m = ieee80211_encap(ifp, m, &ni); 887 if (m == NULL) { 888 DPRINTF(ATH_DEBUG_ANY, 889 ("%s: encapsulation failure\n", 890 __func__)); 891 sc->sc_stats.ast_tx_encap++; 892 goto bad; 893 } 894 wh = mtod(m, struct ieee80211_frame *); 895 } else { 896 ni = m->m_pkthdr.ph_cookie; 897 898 wh = mtod(m, struct ieee80211_frame *); 899 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == 900 IEEE80211_FC0_SUBTYPE_PROBE_RESP) { 901 /* fill time stamp */ 902 u_int64_t tsf; 903 u_int32_t *tstamp; 904 905 tsf = ath_hal_get_tsf64(ah); 906 /* XXX: adjust 100us delay to xmit */ 907 tsf += 100; 908 tstamp = (u_int32_t *)&wh[1]; 909 tstamp[0] = htole32(tsf & 0xffffffff); 910 tstamp[1] = htole32(tsf >> 32); 911 } 912 sc->sc_stats.ast_tx_mgmt++; 913 } 914 915 if (ath_tx_start(sc, ni, bf, m)) { 916 bad: 917 s = splnet(); 918 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 919 splx(s); 920 ifp->if_oerrors++; 921 if (ni != NULL) 922 ieee80211_release_node(ic, ni); 923 continue; 924 } 925 926 sc->sc_tx_timer = 5; 927 ifp->if_timer = 1; 928 } 929 } 930 931 int 932 ath_media_change(struct ifnet *ifp) 933 { 934 int error; 935 936 error = ieee80211_media_change(ifp); 937 if (error == ENETRESET) { 938 if ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) == 939 (IFF_RUNNING|IFF_UP)) 940 ath_init(ifp); /* XXX lose error */ 941 error = 0; 942 } 943 return error; 944 } 945 946 void 947 ath_watchdog(struct ifnet *ifp) 948 { 949 struct ath_softc *sc = ifp->if_softc; 950 951 ifp->if_timer = 0; 952 if ((ifp->if_flags & IFF_RUNNING) == 0 || sc->sc_invalid) 953 return; 954 if (sc->sc_tx_timer) { 955 if (--sc->sc_tx_timer == 0) { 956 printf("%s: device timeout\n", ifp->if_xname); 957 ath_reset(sc, 1); 958 ifp->if_oerrors++; 959 sc->sc_stats.ast_watchdog++; 960 return; 961 } 962 ifp->if_timer = 1; 963 } 964 965 ieee80211_watchdog(ifp); 966 } 967 968 int 969 ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 970 { 971 struct ath_softc *sc = ifp->if_softc; 972 struct ieee80211com *ic = &sc->sc_ic; 973 struct ifreq *ifr = (struct ifreq *)data; 974 int error = 0, s; 975 976 s = splnet(); 977 switch (cmd) { 978 case SIOCSIFADDR: 979 ifp->if_flags |= IFF_UP; 980 /* FALLTHROUGH */ 981 case SIOCSIFFLAGS: 982 if (ifp->if_flags & IFF_UP) { 983 if (ifp->if_flags & IFF_RUNNING) { 984 /* 985 * To avoid rescanning another access point, 986 * do not call ath_init() here. Instead, 987 * only reflect promisc mode settings. 988 */ 989 ath_mode_init(sc); 990 } else { 991 /* 992 * Beware of being called during detach to 993 * reset promiscuous mode. In that case we 994 * will still be marked UP but not RUNNING. 995 * However trying to re-init the interface 996 * is the wrong thing to do as we've already 997 * torn down much of our state. There's 998 * probably a better way to deal with this. 999 */ 1000 if (!sc->sc_invalid) 1001 ath_init(ifp); /* XXX lose error */ 1002 } 1003 } else 1004 ath_stop(ifp); 1005 break; 1006 case SIOCADDMULTI: 1007 case SIOCDELMULTI: 1008 #ifdef __FreeBSD__ 1009 /* 1010 * The upper layer has already installed/removed 1011 * the multicast address(es), just recalculate the 1012 * multicast filter for the card. 1013 */ 1014 if (ifp->if_flags & IFF_RUNNING) 1015 ath_mode_init(sc); 1016 #endif 1017 error = (cmd == SIOCADDMULTI) ? 1018 ether_addmulti(ifr, &sc->sc_ic.ic_ac) : 1019 ether_delmulti(ifr, &sc->sc_ic.ic_ac); 1020 if (error == ENETRESET) { 1021 if (ifp->if_flags & IFF_RUNNING) 1022 ath_mode_init(sc); 1023 error = 0; 1024 } 1025 break; 1026 case SIOCGATHSTATS: 1027 error = copyout(&sc->sc_stats, 1028 ifr->ifr_data, sizeof (sc->sc_stats)); 1029 break; 1030 default: 1031 error = ieee80211_ioctl(ifp, cmd, data); 1032 if (error == ENETRESET) { 1033 if ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) == 1034 (IFF_RUNNING|IFF_UP)) { 1035 if (ic->ic_opmode != IEEE80211_M_MONITOR) 1036 ath_init(ifp); /* XXX lose error */ 1037 else 1038 ath_reset(sc, 1); 1039 } 1040 error = 0; 1041 } 1042 break; 1043 } 1044 splx(s); 1045 return error; 1046 } 1047 1048 /* 1049 * Fill the hardware key cache with key entries. 1050 */ 1051 int 1052 ath_initkeytable(struct ath_softc *sc) 1053 { 1054 struct ieee80211com *ic = &sc->sc_ic; 1055 struct ath_hal *ah = sc->sc_ah; 1056 int i; 1057 1058 if (ath_softcrypto) { 1059 /* 1060 * Disable the hardware crypto engine and reset the key cache 1061 * to allow software crypto operation for WEP/RSN/WPA2 1062 */ 1063 if (ic->ic_flags & (IEEE80211_F_WEPON|IEEE80211_F_RSNON)) 1064 (void)ath_hal_softcrypto(ah, AH_TRUE); 1065 else 1066 (void)ath_hal_softcrypto(ah, AH_FALSE); 1067 return (0); 1068 } 1069 1070 /* WEP is disabled, we only support WEP in hardware yet */ 1071 if ((ic->ic_flags & IEEE80211_F_WEPON) == 0) 1072 return (0); 1073 1074 /* 1075 * Setup the hardware after reset: the key cache is filled as 1076 * needed and the receive engine is set going. Frame transmit 1077 * is handled entirely in the frame output path; there's nothing 1078 * to do here except setup the interrupt mask. 1079 */ 1080 1081 /* XXX maybe should reset all keys when !WEPON */ 1082 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 1083 struct ieee80211_key *k = &ic->ic_nw_keys[i]; 1084 if (k->k_len == 0) 1085 ath_hal_reset_key(ah, i); 1086 else { 1087 HAL_KEYVAL hk; 1088 1089 bzero(&hk, sizeof(hk)); 1090 /* 1091 * Pad the key to a supported key length. It 1092 * is always a good idea to use full-length 1093 * keys without padded zeros but this seems 1094 * to be the default behaviour used by many 1095 * implementations. 1096 */ 1097 if (k->k_cipher == IEEE80211_CIPHER_WEP40) 1098 hk.wk_len = AR5K_KEYVAL_LENGTH_40; 1099 else if (k->k_cipher == IEEE80211_CIPHER_WEP104) 1100 hk.wk_len = AR5K_KEYVAL_LENGTH_104; 1101 else 1102 return (EINVAL); 1103 bcopy(k->k_key, hk.wk_key, hk.wk_len); 1104 1105 if (ath_hal_set_key(ah, i, &hk) != AH_TRUE) 1106 return (EINVAL); 1107 } 1108 } 1109 1110 return (0); 1111 } 1112 1113 void 1114 ath_mcastfilter_accum(caddr_t dl, u_int32_t (*mfilt)[2]) 1115 { 1116 u_int32_t val; 1117 u_int8_t pos; 1118 1119 val = LE_READ_4(dl + 0); 1120 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 1121 val = LE_READ_4(dl + 3); 1122 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 1123 pos &= 0x3f; 1124 (*mfilt)[pos / 32] |= (1 << (pos % 32)); 1125 } 1126 1127 void 1128 ath_mcastfilter_compute(struct ath_softc *sc, u_int32_t (*mfilt)[2]) 1129 { 1130 struct arpcom *ac = &sc->sc_ic.ic_ac; 1131 struct ifnet *ifp = &sc->sc_ic.ic_if; 1132 struct ether_multi *enm; 1133 struct ether_multistep estep; 1134 1135 if (ac->ac_multirangecnt > 0) { 1136 /* XXX Punt on ranges. */ 1137 (*mfilt)[0] = (*mfilt)[1] = ~((u_int32_t)0); 1138 ifp->if_flags |= IFF_ALLMULTI; 1139 return; 1140 } 1141 1142 ETHER_FIRST_MULTI(estep, ac, enm); 1143 while (enm != NULL) { 1144 ath_mcastfilter_accum(enm->enm_addrlo, mfilt); 1145 ETHER_NEXT_MULTI(estep, enm); 1146 } 1147 ifp->if_flags &= ~IFF_ALLMULTI; 1148 } 1149 1150 /* 1151 * Calculate the receive filter according to the 1152 * operating mode and state: 1153 * 1154 * o always accept unicast, broadcast, and multicast traffic 1155 * o maintain current state of phy error reception 1156 * o probe request frames are accepted only when operating in 1157 * hostap, adhoc, or monitor modes 1158 * o enable promiscuous mode according to the interface state 1159 * o accept beacons: 1160 * - when operating in adhoc mode so the 802.11 layer creates 1161 * node table entries for peers, 1162 * - when operating in station mode for collecting rssi data when 1163 * the station is otherwise quiet, or 1164 * - when scanning 1165 */ 1166 u_int32_t 1167 ath_calcrxfilter(struct ath_softc *sc) 1168 { 1169 struct ieee80211com *ic = &sc->sc_ic; 1170 struct ath_hal *ah = sc->sc_ah; 1171 struct ifnet *ifp = &ic->ic_if; 1172 u_int32_t rfilt; 1173 1174 rfilt = (ath_hal_get_rx_filter(ah) & HAL_RX_FILTER_PHYERR) 1175 | HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST; 1176 if (ic->ic_opmode != IEEE80211_M_STA) 1177 rfilt |= HAL_RX_FILTER_PROBEREQ; 1178 #ifndef IEEE80211_STA_ONLY 1179 if (ic->ic_opmode != IEEE80211_M_AHDEMO) 1180 #endif 1181 rfilt |= HAL_RX_FILTER_BEACON; 1182 if (ifp->if_flags & IFF_PROMISC) 1183 rfilt |= HAL_RX_FILTER_PROM; 1184 return rfilt; 1185 } 1186 1187 void 1188 ath_mode_init(struct ath_softc *sc) 1189 { 1190 struct ath_hal *ah = sc->sc_ah; 1191 u_int32_t rfilt, mfilt[2]; 1192 1193 /* configure rx filter */ 1194 rfilt = ath_calcrxfilter(sc); 1195 ath_hal_set_rx_filter(ah, rfilt); 1196 1197 /* configure operational mode */ 1198 ath_hal_set_opmode(ah); 1199 1200 /* calculate and install multicast filter */ 1201 mfilt[0] = mfilt[1] = 0; 1202 ath_mcastfilter_compute(sc, &mfilt); 1203 ath_hal_set_mcast_filter(ah, mfilt[0], mfilt[1]); 1204 DPRINTF(ATH_DEBUG_MODE, ("%s: RX filter 0x%x, MC filter %08x:%08x\n", 1205 __func__, rfilt, mfilt[0], mfilt[1])); 1206 } 1207 1208 struct mbuf * 1209 ath_getmbuf(int flags, int type, u_int pktlen) 1210 { 1211 struct mbuf *m; 1212 1213 KASSERT(pktlen <= MCLBYTES, ("802.11 packet too large: %u", pktlen)); 1214 #ifdef __FreeBSD__ 1215 if (pktlen <= MHLEN) { 1216 MGETHDR(m, flags, type); 1217 } else { 1218 m = m_getcl(flags, type, M_PKTHDR); 1219 } 1220 #else 1221 MGETHDR(m, flags, type); 1222 if (m != NULL && pktlen > MHLEN) { 1223 MCLGET(m, flags); 1224 if ((m->m_flags & M_EXT) == 0) { 1225 m_free(m); 1226 m = NULL; 1227 } 1228 } 1229 #endif 1230 return m; 1231 } 1232 1233 #ifndef IEEE80211_STA_ONLY 1234 int 1235 ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni) 1236 { 1237 struct ieee80211com *ic = &sc->sc_ic; 1238 struct ath_hal *ah = sc->sc_ah; 1239 struct ath_buf *bf; 1240 struct ath_desc *ds; 1241 struct mbuf *m; 1242 int error; 1243 u_int8_t rate; 1244 const HAL_RATE_TABLE *rt; 1245 u_int flags = 0; 1246 1247 bf = sc->sc_bcbuf; 1248 if (bf->bf_m != NULL) { 1249 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1250 m_freem(bf->bf_m); 1251 bf->bf_m = NULL; 1252 bf->bf_node = NULL; 1253 } 1254 /* 1255 * NB: the beacon data buffer must be 32-bit aligned; 1256 * we assume the mbuf routines will return us something 1257 * with this alignment (perhaps should assert). 1258 */ 1259 m = ieee80211_beacon_alloc(ic, ni); 1260 if (m == NULL) { 1261 DPRINTF(ATH_DEBUG_BEACON, ("%s: cannot get mbuf/cluster\n", 1262 __func__)); 1263 sc->sc_stats.ast_be_nombuf++; 1264 return ENOMEM; 1265 } 1266 1267 DPRINTF(ATH_DEBUG_BEACON, ("%s: m %p len %u\n", __func__, m, m->m_len)); 1268 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m, 1269 BUS_DMA_NOWAIT); 1270 if (error != 0) { 1271 m_freem(m); 1272 return error; 1273 } 1274 KASSERT(bf->bf_nseg == 1, 1275 ("%s: multi-segment packet; nseg %u", __func__, bf->bf_nseg)); 1276 bf->bf_m = m; 1277 1278 /* setup descriptors */ 1279 ds = bf->bf_desc; 1280 bzero(ds, sizeof(struct ath_desc)); 1281 1282 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_veol) { 1283 ds->ds_link = bf->bf_daddr; /* link to self */ 1284 flags |= HAL_TXDESC_VEOL; 1285 } else { 1286 ds->ds_link = 0; 1287 } 1288 ds->ds_data = bf->bf_segs[0].ds_addr; 1289 1290 DPRINTF(ATH_DEBUG_ANY, ("%s: segaddr %p seglen %u\n", __func__, 1291 (caddr_t)bf->bf_segs[0].ds_addr, (u_int)bf->bf_segs[0].ds_len)); 1292 1293 /* 1294 * Calculate rate code. 1295 * XXX everything at min xmit rate 1296 */ 1297 rt = sc->sc_currates; 1298 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 1299 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) { 1300 rate = rt->info[0].rateCode | rt->info[0].shortPreamble; 1301 } else { 1302 rate = rt->info[0].rateCode; 1303 } 1304 1305 flags = HAL_TXDESC_NOACK; 1306 if (ic->ic_opmode == IEEE80211_M_IBSS) 1307 flags |= HAL_TXDESC_VEOL; 1308 1309 if (!ath_hal_setup_tx_desc(ah, ds 1310 , m->m_pkthdr.len + IEEE80211_CRC_LEN /* packet length */ 1311 , sizeof(struct ieee80211_frame) /* header length */ 1312 , HAL_PKT_TYPE_BEACON /* Atheros packet type */ 1313 , 60 /* txpower XXX */ 1314 , rate, 1 /* series 0 rate/tries */ 1315 , HAL_TXKEYIX_INVALID /* no encryption */ 1316 , 0 /* antenna mode */ 1317 , flags /* no ack for beacons */ 1318 , 0 /* rts/cts rate */ 1319 , 0 /* rts/cts duration */ 1320 )) { 1321 printf("%s: ath_hal_setup_tx_desc failed\n", __func__); 1322 return -1; 1323 } 1324 /* NB: beacon's BufLen must be a multiple of 4 bytes */ 1325 /* XXX verify mbuf data area covers this roundup */ 1326 if (!ath_hal_fill_tx_desc(ah, ds 1327 , roundup(bf->bf_segs[0].ds_len, 4) /* buffer length */ 1328 , AH_TRUE /* first segment */ 1329 , AH_TRUE /* last segment */ 1330 )) { 1331 printf("%s: ath_hal_fill_tx_desc failed\n", __func__); 1332 return -1; 1333 } 1334 1335 /* XXX it is not appropriate to bus_dmamap_sync? -dcy */ 1336 1337 return 0; 1338 } 1339 1340 void 1341 ath_beacon_proc(void *arg, int pending) 1342 { 1343 struct ath_softc *sc = arg; 1344 struct ieee80211com *ic = &sc->sc_ic; 1345 struct ath_buf *bf = sc->sc_bcbuf; 1346 struct ath_hal *ah = sc->sc_ah; 1347 1348 DPRINTF(ATH_DEBUG_BEACON_PROC, ("%s: pending %u\n", __func__, pending)); 1349 if (ic->ic_opmode == IEEE80211_M_STA || 1350 bf == NULL || bf->bf_m == NULL) { 1351 DPRINTF(ATH_DEBUG_ANY, ("%s: ic_flags=%x bf=%p bf_m=%p\n", 1352 __func__, ic->ic_flags, bf, bf ? bf->bf_m : NULL)); 1353 return; 1354 } 1355 /* TODO: update beacon to reflect PS poll state */ 1356 if (!ath_hal_stop_tx_dma(ah, sc->sc_bhalq)) { 1357 DPRINTF(ATH_DEBUG_ANY, ("%s: beacon queue %u did not stop?\n", 1358 __func__, sc->sc_bhalq)); 1359 } 1360 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 1361 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1362 1363 ath_hal_put_tx_buf(ah, sc->sc_bhalq, bf->bf_daddr); 1364 ath_hal_tx_start(ah, sc->sc_bhalq); 1365 DPRINTF(ATH_DEBUG_BEACON_PROC, 1366 ("%s: TXDP%u = %p (%p)\n", __func__, 1367 sc->sc_bhalq, (caddr_t)bf->bf_daddr, bf->bf_desc)); 1368 } 1369 1370 void 1371 ath_beacon_free(struct ath_softc *sc) 1372 { 1373 struct ath_buf *bf = sc->sc_bcbuf; 1374 1375 if (bf->bf_m != NULL) { 1376 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1377 m_freem(bf->bf_m); 1378 bf->bf_m = NULL; 1379 bf->bf_node = NULL; 1380 } 1381 } 1382 #endif /* IEEE80211_STA_ONLY */ 1383 1384 /* 1385 * Configure the beacon and sleep timers. 1386 * 1387 * When operating as an AP this resets the TSF and sets 1388 * up the hardware to notify us when we need to issue beacons. 1389 * 1390 * When operating in station mode this sets up the beacon 1391 * timers according to the timestamp of the last received 1392 * beacon and the current TSF, configures PCF and DTIM 1393 * handling, programs the sleep registers so the hardware 1394 * will wakeup in time to receive beacons, and configures 1395 * the beacon miss handling so we'll receive a BMISS 1396 * interrupt when we stop seeing beacons from the AP 1397 * we've associated with. 1398 */ 1399 void 1400 ath_beacon_config(struct ath_softc *sc) 1401 { 1402 #define MS_TO_TU(x) (((x) * 1000) / 1024) 1403 struct ath_hal *ah = sc->sc_ah; 1404 struct ieee80211com *ic = &sc->sc_ic; 1405 struct ieee80211_node *ni = ic->ic_bss; 1406 u_int32_t nexttbtt, intval; 1407 1408 nexttbtt = (LE_READ_4(ni->ni_tstamp + 4) << 22) | 1409 (LE_READ_4(ni->ni_tstamp) >> 10); 1410 intval = MAX(1, ni->ni_intval) & HAL_BEACON_PERIOD; 1411 if (nexttbtt == 0) { /* e.g. for ap mode */ 1412 nexttbtt = intval; 1413 } else if (intval) { 1414 nexttbtt = roundup(nexttbtt, intval); 1415 } 1416 DPRINTF(ATH_DEBUG_BEACON, ("%s: intval %u nexttbtt %u\n", 1417 __func__, ni->ni_intval, nexttbtt)); 1418 if (ic->ic_opmode == IEEE80211_M_STA) { 1419 HAL_BEACON_STATE bs; 1420 u_int32_t bmisstime; 1421 1422 /* NB: no PCF support right now */ 1423 bzero(&bs, sizeof(bs)); 1424 bs.bs_intval = intval; 1425 bs.bs_nexttbtt = nexttbtt; 1426 bs.bs_dtimperiod = bs.bs_intval; 1427 bs.bs_nextdtim = nexttbtt; 1428 /* 1429 * Calculate the number of consecutive beacons to miss 1430 * before taking a BMISS interrupt. The configuration 1431 * is specified in ms, so we need to convert that to 1432 * TU's and then calculate based on the beacon interval. 1433 * Note that we clamp the result to at most 10 beacons. 1434 */ 1435 bmisstime = MAX(7, ic->ic_bmisstimeout); 1436 bs.bs_bmissthreshold = howmany(bmisstime, intval); 1437 if (bs.bs_bmissthreshold > 7) { 1438 bs.bs_bmissthreshold = 7; 1439 } else if (bs.bs_bmissthreshold <= 0) { 1440 bs.bs_bmissthreshold = 1; 1441 } 1442 1443 /* 1444 * Calculate sleep duration. The configuration is 1445 * given in ms. We insure a multiple of the beacon 1446 * period is used. Also, if the sleep duration is 1447 * greater than the DTIM period then it makes senses 1448 * to make it a multiple of that. 1449 * 1450 * XXX fixed at 100ms 1451 */ 1452 bs.bs_sleepduration = 1453 roundup(MS_TO_TU(100), bs.bs_intval); 1454 if (bs.bs_sleepduration > bs.bs_dtimperiod) { 1455 bs.bs_sleepduration = 1456 roundup(bs.bs_sleepduration, bs.bs_dtimperiod); 1457 } 1458 1459 DPRINTF(ATH_DEBUG_BEACON, 1460 ("%s: intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u" 1461 " sleep %u\n" 1462 , __func__ 1463 , bs.bs_intval 1464 , bs.bs_nexttbtt 1465 , bs.bs_dtimperiod 1466 , bs.bs_nextdtim 1467 , bs.bs_bmissthreshold 1468 , bs.bs_sleepduration 1469 )); 1470 ath_hal_set_intr(ah, 0); 1471 ath_hal_set_beacon_timers(ah, &bs, 0/*XXX*/, 0, 0); 1472 sc->sc_imask |= HAL_INT_BMISS; 1473 ath_hal_set_intr(ah, sc->sc_imask); 1474 } 1475 #ifndef IEEE80211_STA_ONLY 1476 else { 1477 ath_hal_set_intr(ah, 0); 1478 if (nexttbtt == intval) 1479 intval |= HAL_BEACON_RESET_TSF; 1480 if (ic->ic_opmode == IEEE80211_M_IBSS) { 1481 /* 1482 * In IBSS mode enable the beacon timers but only 1483 * enable SWBA interrupts if we need to manually 1484 * prepare beacon frames. Otherwise we use a 1485 * self-linked tx descriptor and let the hardware 1486 * deal with things. 1487 */ 1488 intval |= HAL_BEACON_ENA; 1489 if (!sc->sc_veol) 1490 sc->sc_imask |= HAL_INT_SWBA; 1491 } else if (ic->ic_opmode == IEEE80211_M_HOSTAP) { 1492 /* 1493 * In AP mode we enable the beacon timers and 1494 * SWBA interrupts to prepare beacon frames. 1495 */ 1496 intval |= HAL_BEACON_ENA; 1497 sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */ 1498 } 1499 ath_hal_init_beacon(ah, nexttbtt, intval); 1500 ath_hal_set_intr(ah, sc->sc_imask); 1501 /* 1502 * When using a self-linked beacon descriptor in IBBS 1503 * mode load it once here. 1504 */ 1505 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_veol) 1506 ath_beacon_proc(sc, 0); 1507 } 1508 #endif 1509 } 1510 1511 int 1512 ath_desc_alloc(struct ath_softc *sc) 1513 { 1514 int i, bsize, error = -1; 1515 struct ath_desc *ds; 1516 struct ath_buf *bf; 1517 1518 /* allocate descriptors */ 1519 sc->sc_desc_len = sizeof(struct ath_desc) * 1520 (ATH_TXBUF * ATH_TXDESC + ATH_RXBUF + 1); 1521 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_desc_len, PAGE_SIZE, 1522 0, &sc->sc_dseg, 1, &sc->sc_dnseg, 0)) != 0) { 1523 printf("%s: unable to allocate control data, error = %d\n", 1524 sc->sc_dev.dv_xname, error); 1525 goto fail0; 1526 } 1527 1528 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg, 1529 sc->sc_desc_len, (caddr_t *)&sc->sc_desc, BUS_DMA_COHERENT)) != 0) { 1530 printf("%s: unable to map control data, error = %d\n", 1531 sc->sc_dev.dv_xname, error); 1532 goto fail1; 1533 } 1534 1535 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_desc_len, 1, 1536 sc->sc_desc_len, 0, 0, &sc->sc_ddmamap)) != 0) { 1537 printf("%s: unable to create control data DMA map, " 1538 "error = %d\n", sc->sc_dev.dv_xname, error); 1539 goto fail2; 1540 } 1541 1542 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_ddmamap, sc->sc_desc, 1543 sc->sc_desc_len, NULL, 0)) != 0) { 1544 printf("%s: unable to load control data DMA map, error = %d\n", 1545 sc->sc_dev.dv_xname, error); 1546 goto fail3; 1547 } 1548 1549 ds = sc->sc_desc; 1550 sc->sc_desc_paddr = sc->sc_ddmamap->dm_segs[0].ds_addr; 1551 1552 DPRINTF(ATH_DEBUG_XMIT_DESC|ATH_DEBUG_RECV_DESC, 1553 ("ath_desc_alloc: DMA map: %p (%lu) -> %p (%lu)\n", 1554 ds, (u_long)sc->sc_desc_len, 1555 (caddr_t) sc->sc_desc_paddr, /*XXX*/ (u_long) sc->sc_desc_len)); 1556 1557 /* allocate buffers */ 1558 bsize = sizeof(struct ath_buf) * (ATH_TXBUF + ATH_RXBUF + 1); 1559 bf = malloc(bsize, M_DEVBUF, M_NOWAIT | M_ZERO); 1560 if (bf == NULL) { 1561 printf("%s: unable to allocate Tx/Rx buffers\n", 1562 sc->sc_dev.dv_xname); 1563 error = ENOMEM; 1564 goto fail3; 1565 } 1566 sc->sc_bufptr = bf; 1567 1568 TAILQ_INIT(&sc->sc_rxbuf); 1569 for (i = 0; i < ATH_RXBUF; i++, bf++, ds++) { 1570 bf->bf_desc = ds; 1571 bf->bf_daddr = sc->sc_desc_paddr + 1572 ((caddr_t)ds - (caddr_t)sc->sc_desc); 1573 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1574 MCLBYTES, 0, 0, &bf->bf_dmamap)) != 0) { 1575 printf("%s: unable to create Rx dmamap, error = %d\n", 1576 sc->sc_dev.dv_xname, error); 1577 goto fail4; 1578 } 1579 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 1580 } 1581 1582 TAILQ_INIT(&sc->sc_txbuf); 1583 for (i = 0; i < ATH_TXBUF; i++, bf++, ds += ATH_TXDESC) { 1584 bf->bf_desc = ds; 1585 bf->bf_daddr = sc->sc_desc_paddr + 1586 ((caddr_t)ds - (caddr_t)sc->sc_desc); 1587 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1588 ATH_TXDESC, MCLBYTES, 0, 0, &bf->bf_dmamap)) != 0) { 1589 printf("%s: unable to create Tx dmamap, error = %d\n", 1590 sc->sc_dev.dv_xname, error); 1591 goto fail5; 1592 } 1593 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 1594 } 1595 TAILQ_INIT(&sc->sc_txq); 1596 1597 /* beacon buffer */ 1598 bf->bf_desc = ds; 1599 bf->bf_daddr = sc->sc_desc_paddr + ((caddr_t)ds - (caddr_t)sc->sc_desc); 1600 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 0, 1601 &bf->bf_dmamap)) != 0) { 1602 printf("%s: unable to create beacon dmamap, error = %d\n", 1603 sc->sc_dev.dv_xname, error); 1604 goto fail5; 1605 } 1606 sc->sc_bcbuf = bf; 1607 return 0; 1608 1609 fail5: 1610 for (i = ATH_RXBUF; i < ATH_RXBUF + ATH_TXBUF; i++) { 1611 if (sc->sc_bufptr[i].bf_dmamap == NULL) 1612 continue; 1613 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bufptr[i].bf_dmamap); 1614 } 1615 fail4: 1616 for (i = 0; i < ATH_RXBUF; i++) { 1617 if (sc->sc_bufptr[i].bf_dmamap == NULL) 1618 continue; 1619 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bufptr[i].bf_dmamap); 1620 } 1621 fail3: 1622 bus_dmamap_unload(sc->sc_dmat, sc->sc_ddmamap); 1623 fail2: 1624 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ddmamap); 1625 sc->sc_ddmamap = NULL; 1626 fail1: 1627 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_desc, sc->sc_desc_len); 1628 fail0: 1629 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg); 1630 return error; 1631 } 1632 1633 void 1634 ath_desc_free(struct ath_softc *sc) 1635 { 1636 struct ath_buf *bf; 1637 1638 bus_dmamap_unload(sc->sc_dmat, sc->sc_ddmamap); 1639 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ddmamap); 1640 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg); 1641 1642 TAILQ_FOREACH(bf, &sc->sc_txq, bf_list) { 1643 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1644 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 1645 m_freem(bf->bf_m); 1646 } 1647 TAILQ_FOREACH(bf, &sc->sc_txbuf, bf_list) 1648 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 1649 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 1650 if (bf->bf_m) { 1651 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1652 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 1653 m_freem(bf->bf_m); 1654 bf->bf_m = NULL; 1655 } 1656 } 1657 if (sc->sc_bcbuf != NULL) { 1658 bus_dmamap_unload(sc->sc_dmat, sc->sc_bcbuf->bf_dmamap); 1659 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bcbuf->bf_dmamap); 1660 sc->sc_bcbuf = NULL; 1661 } 1662 1663 TAILQ_INIT(&sc->sc_rxbuf); 1664 TAILQ_INIT(&sc->sc_txbuf); 1665 TAILQ_INIT(&sc->sc_txq); 1666 free(sc->sc_bufptr, M_DEVBUF, 0); 1667 sc->sc_bufptr = NULL; 1668 } 1669 1670 struct ieee80211_node * 1671 ath_node_alloc(struct ieee80211com *ic) 1672 { 1673 struct ath_node *an; 1674 1675 an = malloc(sizeof(*an), M_DEVBUF, M_NOWAIT | M_ZERO); 1676 if (an) { 1677 int i; 1678 for (i = 0; i < ATH_RHIST_SIZE; i++) 1679 an->an_rx_hist[i].arh_ticks = ATH_RHIST_NOTIME; 1680 an->an_rx_hist_next = ATH_RHIST_SIZE-1; 1681 return &an->an_node; 1682 } else 1683 return NULL; 1684 } 1685 1686 void 1687 ath_node_free(struct ieee80211com *ic, struct ieee80211_node *ni) 1688 { 1689 struct ath_softc *sc = ic->ic_if.if_softc; 1690 struct ath_buf *bf; 1691 1692 TAILQ_FOREACH(bf, &sc->sc_txq, bf_list) { 1693 if (bf->bf_node == ni) 1694 bf->bf_node = NULL; 1695 } 1696 (*sc->sc_node_free)(ic, ni); 1697 } 1698 1699 void 1700 ath_node_copy(struct ieee80211com *ic, 1701 struct ieee80211_node *dst, const struct ieee80211_node *src) 1702 { 1703 struct ath_softc *sc = ic->ic_if.if_softc; 1704 1705 bcopy(&src[1], &dst[1], 1706 sizeof(struct ath_node) - sizeof(struct ieee80211_node)); 1707 (*sc->sc_node_copy)(ic, dst, src); 1708 } 1709 1710 u_int8_t 1711 ath_node_getrssi(struct ieee80211com *ic, const struct ieee80211_node *ni) 1712 { 1713 const struct ath_node *an = ATH_NODE(ni); 1714 int i, now, nsamples, rssi; 1715 1716 /* 1717 * Calculate the average over the last second of sampled data. 1718 */ 1719 now = ATH_TICKS(); 1720 nsamples = 0; 1721 rssi = 0; 1722 i = an->an_rx_hist_next; 1723 do { 1724 const struct ath_recv_hist *rh = &an->an_rx_hist[i]; 1725 if (rh->arh_ticks == ATH_RHIST_NOTIME) 1726 goto done; 1727 if (now - rh->arh_ticks > hz) 1728 goto done; 1729 rssi += rh->arh_rssi; 1730 nsamples++; 1731 if (i == 0) { 1732 i = ATH_RHIST_SIZE-1; 1733 } else { 1734 i--; 1735 } 1736 } while (i != an->an_rx_hist_next); 1737 done: 1738 /* 1739 * Return either the average or the last known 1740 * value if there is no recent data. 1741 */ 1742 return (nsamples ? rssi / nsamples : an->an_rx_hist[i].arh_rssi); 1743 } 1744 1745 int 1746 ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 1747 { 1748 struct ath_hal *ah = sc->sc_ah; 1749 int error; 1750 struct mbuf *m; 1751 struct ath_desc *ds; 1752 1753 m = bf->bf_m; 1754 if (m == NULL) { 1755 /* 1756 * NB: by assigning a page to the rx dma buffer we 1757 * implicitly satisfy the Atheros requirement that 1758 * this buffer be cache-line-aligned and sized to be 1759 * multiple of the cache line size. Not doing this 1760 * causes weird stuff to happen (for the 5210 at least). 1761 */ 1762 m = ath_getmbuf(M_DONTWAIT, MT_DATA, MCLBYTES); 1763 if (m == NULL) { 1764 DPRINTF(ATH_DEBUG_ANY, 1765 ("%s: no mbuf/cluster\n", __func__)); 1766 sc->sc_stats.ast_rx_nombuf++; 1767 return ENOMEM; 1768 } 1769 bf->bf_m = m; 1770 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 1771 1772 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m, 1773 BUS_DMA_NOWAIT); 1774 if (error != 0) { 1775 DPRINTF(ATH_DEBUG_ANY, 1776 ("%s: ath_bus_dmamap_load_mbuf failed;" 1777 " error %d\n", __func__, error)); 1778 sc->sc_stats.ast_rx_busdma++; 1779 return error; 1780 } 1781 KASSERT(bf->bf_nseg == 1, 1782 ("ath_rxbuf_init: multi-segment packet; nseg %u", 1783 bf->bf_nseg)); 1784 } 1785 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 1786 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1787 1788 /* 1789 * Setup descriptors. For receive we always terminate 1790 * the descriptor list with a self-linked entry so we'll 1791 * not get overrun under high load (as can happen with a 1792 * 5212 when ANI processing enables PHY errors). 1793 * 1794 * To insure the last descriptor is self-linked we create 1795 * each descriptor as self-linked and add it to the end. As 1796 * each additional descriptor is added the previous self-linked 1797 * entry is ``fixed'' naturally. This should be safe even 1798 * if DMA is happening. When processing RX interrupts we 1799 * never remove/process the last, self-linked, entry on the 1800 * descriptor list. This insures the hardware always has 1801 * someplace to write a new frame. 1802 */ 1803 ds = bf->bf_desc; 1804 bzero(ds, sizeof(struct ath_desc)); 1805 #ifndef IEEE80211_STA_ONLY 1806 if (sc->sc_ic.ic_opmode != IEEE80211_M_HOSTAP) 1807 ds->ds_link = bf->bf_daddr; /* link to self */ 1808 #endif 1809 ds->ds_data = bf->bf_segs[0].ds_addr; 1810 ath_hal_setup_rx_desc(ah, ds 1811 , m->m_len /* buffer size */ 1812 , 0 1813 ); 1814 1815 if (sc->sc_rxlink != NULL) 1816 *sc->sc_rxlink = bf->bf_daddr; 1817 sc->sc_rxlink = &ds->ds_link; 1818 return 0; 1819 } 1820 1821 void 1822 ath_rx_proc(void *arg, int npending) 1823 { 1824 #define PA2DESC(_sc, _pa) \ 1825 ((struct ath_desc *)((caddr_t)(_sc)->sc_desc + \ 1826 ((_pa) - (_sc)->sc_desc_paddr))) 1827 struct ath_softc *sc = arg; 1828 struct ath_buf *bf; 1829 struct ieee80211com *ic = &sc->sc_ic; 1830 struct ifnet *ifp = &ic->ic_if; 1831 struct ath_hal *ah = sc->sc_ah; 1832 struct ath_desc *ds; 1833 struct mbuf *m; 1834 struct ieee80211_frame *wh; 1835 struct ieee80211_frame whbuf; 1836 struct ieee80211_rxinfo rxi; 1837 struct ieee80211_node *ni; 1838 struct ath_node *an; 1839 struct ath_recv_hist *rh; 1840 int len; 1841 u_int phyerr; 1842 HAL_STATUS status; 1843 1844 DPRINTF(ATH_DEBUG_RX_PROC, ("%s: pending %u\n", __func__, npending)); 1845 do { 1846 bf = TAILQ_FIRST(&sc->sc_rxbuf); 1847 if (bf == NULL) { /* NB: shouldn't happen */ 1848 printf("%s: ath_rx_proc: no buffer!\n", ifp->if_xname); 1849 break; 1850 } 1851 ds = bf->bf_desc; 1852 if (ds->ds_link == bf->bf_daddr) { 1853 /* NB: never process the self-linked entry at the end */ 1854 break; 1855 } 1856 m = bf->bf_m; 1857 if (m == NULL) { /* NB: shouldn't happen */ 1858 printf("%s: ath_rx_proc: no mbuf!\n", ifp->if_xname); 1859 continue; 1860 } 1861 /* XXX sync descriptor memory */ 1862 /* 1863 * Must provide the virtual address of the current 1864 * descriptor, the physical address, and the virtual 1865 * address of the next descriptor in the h/w chain. 1866 * This allows the HAL to look ahead to see if the 1867 * hardware is done with a descriptor by checking the 1868 * done bit in the following descriptor and the address 1869 * of the current descriptor the DMA engine is working 1870 * on. All this is necessary because of our use of 1871 * a self-linked list to avoid rx overruns. 1872 */ 1873 status = ath_hal_proc_rx_desc(ah, ds, 1874 bf->bf_daddr, PA2DESC(sc, ds->ds_link)); 1875 #ifdef AR_DEBUG 1876 if (ath_debug & ATH_DEBUG_RECV_DESC) 1877 ath_printrxbuf(bf, status == HAL_OK); 1878 #endif 1879 if (status == HAL_EINPROGRESS) 1880 break; 1881 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 1882 1883 if (ds->ds_rxstat.rs_more) { 1884 /* 1885 * Frame spans multiple descriptors; this 1886 * cannot happen yet as we don't support 1887 * jumbograms. If not in monitor mode, 1888 * discard the frame. 1889 */ 1890 1891 /* 1892 * Enable this if you want to see error 1893 * frames in Monitor mode. 1894 */ 1895 #ifdef ERROR_FRAMES 1896 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 1897 /* XXX statistic */ 1898 goto rx_next; 1899 } 1900 #endif 1901 /* fall thru for monitor mode handling... */ 1902 1903 } else if (ds->ds_rxstat.rs_status != 0) { 1904 if (ds->ds_rxstat.rs_status & HAL_RXERR_CRC) 1905 sc->sc_stats.ast_rx_crcerr++; 1906 if (ds->ds_rxstat.rs_status & HAL_RXERR_FIFO) 1907 sc->sc_stats.ast_rx_fifoerr++; 1908 if (ds->ds_rxstat.rs_status & HAL_RXERR_DECRYPT) 1909 sc->sc_stats.ast_rx_badcrypt++; 1910 if (ds->ds_rxstat.rs_status & HAL_RXERR_PHY) { 1911 sc->sc_stats.ast_rx_phyerr++; 1912 phyerr = ds->ds_rxstat.rs_phyerr & 0x1f; 1913 sc->sc_stats.ast_rx_phy[phyerr]++; 1914 } 1915 1916 /* 1917 * reject error frames, we normally don't want 1918 * to see them in monitor mode. 1919 */ 1920 if ((ds->ds_rxstat.rs_status & HAL_RXERR_DECRYPT ) || 1921 (ds->ds_rxstat.rs_status & HAL_RXERR_PHY)) 1922 goto rx_next; 1923 1924 /* 1925 * In monitor mode, allow through packets that 1926 * cannot be decrypted 1927 */ 1928 if ((ds->ds_rxstat.rs_status & ~HAL_RXERR_DECRYPT) || 1929 sc->sc_ic.ic_opmode != IEEE80211_M_MONITOR) 1930 goto rx_next; 1931 } 1932 1933 len = ds->ds_rxstat.rs_datalen; 1934 if (len < IEEE80211_MIN_LEN) { 1935 DPRINTF(ATH_DEBUG_RECV, ("%s: short packet %d\n", 1936 __func__, len)); 1937 sc->sc_stats.ast_rx_tooshort++; 1938 goto rx_next; 1939 } 1940 1941 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 1942 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1943 1944 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1945 bf->bf_m = NULL; 1946 m->m_pkthdr.len = m->m_len = len; 1947 1948 #if NBPFILTER > 0 1949 if (sc->sc_drvbpf) { 1950 struct mbuf mb; 1951 1952 sc->sc_rxtap.wr_flags = IEEE80211_RADIOTAP_F_FCS; 1953 sc->sc_rxtap.wr_rate = 1954 sc->sc_hwmap[ds->ds_rxstat.rs_rate] & 1955 IEEE80211_RATE_VAL; 1956 sc->sc_rxtap.wr_antenna = ds->ds_rxstat.rs_antenna; 1957 sc->sc_rxtap.wr_rssi = ds->ds_rxstat.rs_rssi; 1958 sc->sc_rxtap.wr_max_rssi = ic->ic_max_rssi; 1959 1960 mb.m_data = (caddr_t)&sc->sc_rxtap; 1961 mb.m_len = sc->sc_rxtap_len; 1962 mb.m_next = m; 1963 mb.m_nextpkt = NULL; 1964 mb.m_type = 0; 1965 mb.m_flags = 0; 1966 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN); 1967 } 1968 #endif 1969 m_adj(m, -IEEE80211_CRC_LEN); 1970 wh = mtod(m, struct ieee80211_frame *); 1971 rxi.rxi_flags = 0; 1972 if (!ath_softcrypto && (wh->i_fc[1] & IEEE80211_FC1_WEP)) { 1973 /* 1974 * WEP is decrypted by hardware. Clear WEP bit 1975 * and trim WEP header for ieee80211_input(). 1976 */ 1977 wh->i_fc[1] &= ~IEEE80211_FC1_WEP; 1978 bcopy(wh, &whbuf, sizeof(whbuf)); 1979 m_adj(m, IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN); 1980 wh = mtod(m, struct ieee80211_frame *); 1981 bcopy(&whbuf, wh, sizeof(whbuf)); 1982 /* 1983 * Also trim WEP ICV from the tail. 1984 */ 1985 m_adj(m, -IEEE80211_WEP_CRCLEN); 1986 /* 1987 * The header has probably moved. 1988 */ 1989 wh = mtod(m, struct ieee80211_frame *); 1990 1991 rxi.rxi_flags |= IEEE80211_RXI_HWDEC; 1992 } 1993 1994 /* 1995 * Locate the node for sender, track state, and 1996 * then pass this node (referenced) up to the 802.11 1997 * layer for its use. 1998 */ 1999 ni = ieee80211_find_rxnode(ic, wh); 2000 2001 /* 2002 * Record driver-specific state. 2003 */ 2004 an = ATH_NODE(ni); 2005 if (++(an->an_rx_hist_next) == ATH_RHIST_SIZE) 2006 an->an_rx_hist_next = 0; 2007 rh = &an->an_rx_hist[an->an_rx_hist_next]; 2008 rh->arh_ticks = ATH_TICKS(); 2009 rh->arh_rssi = ds->ds_rxstat.rs_rssi; 2010 rh->arh_antenna = ds->ds_rxstat.rs_antenna; 2011 2012 /* 2013 * Send frame up for processing. 2014 */ 2015 rxi.rxi_rssi = ds->ds_rxstat.rs_rssi; 2016 rxi.rxi_tstamp = ds->ds_rxstat.rs_tstamp; 2017 ieee80211_input(ifp, m, ni, &rxi); 2018 2019 /* Handle the rate adaption */ 2020 ieee80211_rssadapt_input(ic, ni, &an->an_rssadapt, 2021 ds->ds_rxstat.rs_rssi); 2022 2023 /* 2024 * The frame may have caused the node to be marked for 2025 * reclamation (e.g. in response to a DEAUTH message) 2026 * so use release_node here instead of unref_node. 2027 */ 2028 ieee80211_release_node(ic, ni); 2029 2030 rx_next: 2031 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 2032 } while (ath_rxbuf_init(sc, bf) == 0); 2033 2034 ath_hal_set_rx_signal(ah); /* rx signal state monitoring */ 2035 ath_hal_start_rx(ah); /* in case of RXEOL */ 2036 #undef PA2DESC 2037 } 2038 2039 /* 2040 * XXX Size of an ACK control frame in bytes. 2041 */ 2042 #define IEEE80211_ACK_SIZE (2+2+IEEE80211_ADDR_LEN+4) 2043 2044 int 2045 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, 2046 struct ath_buf *bf, struct mbuf *m0) 2047 { 2048 struct ieee80211com *ic = &sc->sc_ic; 2049 struct ath_hal *ah = sc->sc_ah; 2050 struct ifnet *ifp = &sc->sc_ic.ic_if; 2051 int i, error, iswep, hdrlen, pktlen, len, s, tries; 2052 u_int8_t rix, cix, txrate, ctsrate; 2053 struct ath_desc *ds; 2054 struct ieee80211_frame *wh; 2055 struct ieee80211_key *k; 2056 u_int32_t iv; 2057 u_int8_t *ivp; 2058 u_int8_t hdrbuf[sizeof(struct ieee80211_frame) + 2059 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN]; 2060 u_int subtype, flags, ctsduration, antenna; 2061 HAL_PKT_TYPE atype; 2062 const HAL_RATE_TABLE *rt; 2063 HAL_BOOL shortPreamble; 2064 struct ath_node *an; 2065 u_int8_t hwqueue = HAL_TX_QUEUE_ID_DATA_MIN; 2066 2067 wh = mtod(m0, struct ieee80211_frame *); 2068 iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED; 2069 hdrlen = sizeof(struct ieee80211_frame); 2070 pktlen = m0->m_pkthdr.len; 2071 2072 if (ath_softcrypto && iswep) { 2073 k = ieee80211_get_txkey(ic, wh, ni); 2074 if ((m0 = ieee80211_encrypt(ic, m0, k)) == NULL) 2075 return ENOMEM; 2076 wh = mtod(m0, struct ieee80211_frame *); 2077 2078 /* reset len in case we got a new mbuf */ 2079 pktlen = m0->m_pkthdr.len; 2080 } else if (!ath_softcrypto && iswep) { 2081 bcopy(mtod(m0, caddr_t), hdrbuf, hdrlen); 2082 m_adj(m0, hdrlen); 2083 M_PREPEND(m0, sizeof(hdrbuf), M_DONTWAIT); 2084 if (m0 == NULL) { 2085 sc->sc_stats.ast_tx_nombuf++; 2086 return ENOMEM; 2087 } 2088 ivp = hdrbuf + hdrlen; 2089 wh = mtod(m0, struct ieee80211_frame *); 2090 /* 2091 * XXX 2092 * IV must not duplicate during the lifetime of the key. 2093 * But no mechanism to renew keys is defined in IEEE 802.11 2094 * for WEP. And the IV may be duplicated at other stations 2095 * because the session key itself is shared. So we use a 2096 * pseudo random IV for now, though it is not the right way. 2097 * 2098 * NB: Rather than use a strictly random IV we select a 2099 * random one to start and then increment the value for 2100 * each frame. This is an explicit tradeoff between 2101 * overhead and security. Given the basic insecurity of 2102 * WEP this seems worthwhile. 2103 */ 2104 2105 /* 2106 * Skip 'bad' IVs from Fluhrer/Mantin/Shamir: 2107 * (B, 255, N) with 3 <= B < 16 and 0 <= N <= 255 2108 */ 2109 iv = ic->ic_iv; 2110 if ((iv & 0xff00) == 0xff00) { 2111 int B = (iv & 0xff0000) >> 16; 2112 if (3 <= B && B < 16) 2113 iv = (B+1) << 16; 2114 } 2115 ic->ic_iv = iv + 1; 2116 2117 /* 2118 * NB: Preserve byte order of IV for packet 2119 * sniffers; it doesn't matter otherwise. 2120 */ 2121 #if BYTE_ORDER == BIG_ENDIAN 2122 ivp[0] = iv >> 0; 2123 ivp[1] = iv >> 8; 2124 ivp[2] = iv >> 16; 2125 #else 2126 ivp[2] = iv >> 0; 2127 ivp[1] = iv >> 8; 2128 ivp[0] = iv >> 16; 2129 #endif 2130 ivp[3] = ic->ic_wep_txkey << 6; /* Key ID and pad */ 2131 bcopy(hdrbuf, mtod(m0, caddr_t), sizeof(hdrbuf)); 2132 /* 2133 * The length of hdrlen and pktlen must be increased for WEP 2134 */ 2135 len = IEEE80211_WEP_IVLEN + 2136 IEEE80211_WEP_KIDLEN + 2137 IEEE80211_WEP_CRCLEN; 2138 hdrlen += len; 2139 pktlen += len; 2140 } 2141 pktlen += IEEE80211_CRC_LEN; 2142 2143 /* 2144 * Load the DMA map so any coalescing is done. This 2145 * also calculates the number of descriptors we need. 2146 */ 2147 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m0, 2148 BUS_DMA_NOWAIT); 2149 /* 2150 * Discard null packets and check for packets that 2151 * require too many TX descriptors. We try to convert 2152 * the latter to a cluster. 2153 */ 2154 if (error == EFBIG) { /* too many desc's, linearize */ 2155 sc->sc_stats.ast_tx_linear++; 2156 if (m_defrag(m0, M_DONTWAIT)) { 2157 sc->sc_stats.ast_tx_nomcl++; 2158 m_freem(m0); 2159 return ENOMEM; 2160 } 2161 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m0, 2162 BUS_DMA_NOWAIT); 2163 if (error != 0) { 2164 sc->sc_stats.ast_tx_busdma++; 2165 m_freem(m0); 2166 return error; 2167 } 2168 KASSERT(bf->bf_nseg == 1, 2169 ("ath_tx_start: packet not one segment; nseg %u", 2170 bf->bf_nseg)); 2171 } else if (error != 0) { 2172 sc->sc_stats.ast_tx_busdma++; 2173 m_freem(m0); 2174 return error; 2175 } else if (bf->bf_nseg == 0) { /* null packet, discard */ 2176 sc->sc_stats.ast_tx_nodata++; 2177 m_freem(m0); 2178 return EIO; 2179 } 2180 DPRINTF(ATH_DEBUG_XMIT, ("%s: m %p len %u\n", __func__, m0, pktlen)); 2181 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 2182 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 2183 bf->bf_m = m0; 2184 bf->bf_node = ni; /* NB: held reference */ 2185 an = ATH_NODE(ni); 2186 2187 /* setup descriptors */ 2188 ds = bf->bf_desc; 2189 rt = sc->sc_currates; 2190 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 2191 2192 /* 2193 * Calculate Atheros packet type from IEEE80211 packet header 2194 * and setup for rate calculations. 2195 */ 2196 bf->bf_id.id_node = NULL; 2197 atype = HAL_PKT_TYPE_NORMAL; /* default */ 2198 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 2199 case IEEE80211_FC0_TYPE_MGT: 2200 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2201 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) { 2202 atype = HAL_PKT_TYPE_BEACON; 2203 } else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) { 2204 atype = HAL_PKT_TYPE_PROBE_RESP; 2205 } else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) { 2206 atype = HAL_PKT_TYPE_ATIM; 2207 } 2208 rix = 0; /* XXX lowest rate */ 2209 break; 2210 case IEEE80211_FC0_TYPE_CTL: 2211 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2212 if (subtype == IEEE80211_FC0_SUBTYPE_PS_POLL) 2213 atype = HAL_PKT_TYPE_PSPOLL; 2214 rix = 0; /* XXX lowest rate */ 2215 break; 2216 default: 2217 /* remember link conditions for rate adaptation algorithm */ 2218 if (ic->ic_fixed_rate == -1) { 2219 bf->bf_id.id_len = m0->m_pkthdr.len; 2220 bf->bf_id.id_rateidx = ni->ni_txrate; 2221 bf->bf_id.id_node = ni; 2222 bf->bf_id.id_rssi = ath_node_getrssi(ic, ni); 2223 } 2224 ni->ni_txrate = ieee80211_rssadapt_choose(&an->an_rssadapt, 2225 &ni->ni_rates, wh, m0->m_pkthdr.len, ic->ic_fixed_rate, 2226 ifp->if_xname, 0); 2227 rix = sc->sc_rixmap[ni->ni_rates.rs_rates[ni->ni_txrate] & 2228 IEEE80211_RATE_VAL]; 2229 if (rix == 0xff) { 2230 printf("%s: bogus xmit rate 0x%x (idx 0x%x)\n", 2231 ifp->if_xname, ni->ni_rates.rs_rates[ni->ni_txrate], 2232 ni->ni_txrate); 2233 sc->sc_stats.ast_tx_badrate++; 2234 m_freem(m0); 2235 return EIO; 2236 } 2237 break; 2238 } 2239 2240 /* 2241 * NB: the 802.11 layer marks whether or not we should 2242 * use short preamble based on the current mode and 2243 * negotiated parameters. 2244 */ 2245 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 2246 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 2247 txrate = rt->info[rix].rateCode | rt->info[rix].shortPreamble; 2248 shortPreamble = AH_TRUE; 2249 sc->sc_stats.ast_tx_shortpre++; 2250 } else { 2251 txrate = rt->info[rix].rateCode; 2252 shortPreamble = AH_FALSE; 2253 } 2254 2255 /* 2256 * Calculate miscellaneous flags. 2257 */ 2258 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for wep errors */ 2259 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2260 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 2261 sc->sc_stats.ast_tx_noack++; 2262 } else if (pktlen > ic->ic_rtsthreshold) { 2263 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 2264 sc->sc_stats.ast_tx_rts++; 2265 } 2266 2267 /* 2268 * Calculate duration. This logically belongs in the 802.11 2269 * layer but it lacks sufficient information to calculate it. 2270 */ 2271 if ((flags & HAL_TXDESC_NOACK) == 0 && 2272 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 2273 u_int16_t dur; 2274 /* 2275 * XXX not right with fragmentation. 2276 */ 2277 dur = ath_hal_computetxtime(ah, rt, IEEE80211_ACK_SIZE, 2278 rix, shortPreamble); 2279 *((u_int16_t*) wh->i_dur) = htole16(dur); 2280 } 2281 2282 /* 2283 * Calculate RTS/CTS rate and duration if needed. 2284 */ 2285 ctsduration = 0; 2286 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) { 2287 /* 2288 * CTS transmit rate is derived from the transmit rate 2289 * by looking in the h/w rate table. We must also factor 2290 * in whether or not a short preamble is to be used. 2291 */ 2292 cix = rt->info[rix].controlRate; 2293 ctsrate = rt->info[cix].rateCode; 2294 if (shortPreamble) 2295 ctsrate |= rt->info[cix].shortPreamble; 2296 /* 2297 * Compute the transmit duration based on the size 2298 * of an ACK frame. We call into the HAL to do the 2299 * computation since it depends on the characteristics 2300 * of the actual PHY being used. 2301 */ 2302 if (flags & HAL_TXDESC_RTSENA) { /* SIFS + CTS */ 2303 ctsduration += ath_hal_computetxtime(ah, 2304 rt, IEEE80211_ACK_SIZE, cix, shortPreamble); 2305 } 2306 /* SIFS + data */ 2307 ctsduration += ath_hal_computetxtime(ah, 2308 rt, pktlen, rix, shortPreamble); 2309 if ((flags & HAL_TXDESC_NOACK) == 0) { /* SIFS + ACK */ 2310 ctsduration += ath_hal_computetxtime(ah, 2311 rt, IEEE80211_ACK_SIZE, cix, shortPreamble); 2312 } 2313 } else 2314 ctsrate = 0; 2315 2316 /* 2317 * For now use the antenna on which the last good 2318 * frame was received on. We assume this field is 2319 * initialized to 0 which gives us ``auto'' or the 2320 * ``default'' antenna. 2321 */ 2322 if (an->an_tx_antenna) { 2323 antenna = an->an_tx_antenna; 2324 } else { 2325 antenna = an->an_rx_hist[an->an_rx_hist_next].arh_antenna; 2326 } 2327 2328 #if NBPFILTER > 0 2329 if (ic->ic_rawbpf) 2330 bpf_mtap(ic->ic_rawbpf, m0, BPF_DIRECTION_OUT); 2331 2332 if (sc->sc_drvbpf) { 2333 struct mbuf mb; 2334 2335 sc->sc_txtap.wt_flags = 0; 2336 if (shortPreamble) 2337 sc->sc_txtap.wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2338 if (!ath_softcrypto && iswep) 2339 sc->sc_txtap.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2340 sc->sc_txtap.wt_rate = ni->ni_rates.rs_rates[ni->ni_txrate] & 2341 IEEE80211_RATE_VAL; 2342 sc->sc_txtap.wt_txpower = 30; 2343 sc->sc_txtap.wt_antenna = antenna; 2344 sc->sc_txtap.wt_hwqueue = hwqueue; 2345 2346 mb.m_data = (caddr_t)&sc->sc_txtap; 2347 mb.m_len = sc->sc_txtap_len; 2348 mb.m_next = m0; 2349 mb.m_nextpkt = NULL; 2350 mb.m_type = 0; 2351 mb.m_flags = 0; 2352 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT); 2353 } 2354 #endif 2355 2356 /* 2357 * Formulate first tx descriptor with tx controls. 2358 */ 2359 tries = IEEE80211_IS_MULTICAST(wh->i_addr1) ? 1 : 15; 2360 /* XXX check return value? */ 2361 ath_hal_setup_tx_desc(ah, ds 2362 , pktlen /* packet length */ 2363 , hdrlen /* header length */ 2364 , atype /* Atheros packet type */ 2365 , 60 /* txpower XXX */ 2366 , txrate, tries /* series 0 rate/tries */ 2367 , iswep ? sc->sc_ic.ic_wep_txkey : HAL_TXKEYIX_INVALID 2368 , antenna /* antenna mode */ 2369 , flags /* flags */ 2370 , ctsrate /* rts/cts rate */ 2371 , ctsduration /* rts/cts duration */ 2372 ); 2373 #ifdef notyet 2374 ath_hal_setup_xtx_desc(ah, ds 2375 , AH_FALSE /* short preamble */ 2376 , 0, 0 /* series 1 rate/tries */ 2377 , 0, 0 /* series 2 rate/tries */ 2378 , 0, 0 /* series 3 rate/tries */ 2379 ); 2380 #endif 2381 /* 2382 * Fillin the remainder of the descriptor info. 2383 */ 2384 for (i = 0; i < bf->bf_nseg; i++, ds++) { 2385 ds->ds_data = bf->bf_segs[i].ds_addr; 2386 if (i == bf->bf_nseg - 1) { 2387 ds->ds_link = 0; 2388 } else { 2389 ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1); 2390 } 2391 ath_hal_fill_tx_desc(ah, ds 2392 , bf->bf_segs[i].ds_len /* segment length */ 2393 , i == 0 /* first segment */ 2394 , i == bf->bf_nseg - 1 /* last segment */ 2395 ); 2396 DPRINTF(ATH_DEBUG_XMIT, 2397 ("%s: %d: %08x %08x %08x %08x %08x %08x\n", 2398 __func__, i, ds->ds_link, ds->ds_data, 2399 ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1])); 2400 } 2401 2402 /* 2403 * Insert the frame on the outbound list and 2404 * pass it on to the hardware. 2405 */ 2406 s = splnet(); 2407 TAILQ_INSERT_TAIL(&sc->sc_txq, bf, bf_list); 2408 if (sc->sc_txlink == NULL) { 2409 ath_hal_put_tx_buf(ah, sc->sc_txhalq[hwqueue], bf->bf_daddr); 2410 DPRINTF(ATH_DEBUG_XMIT, ("%s: TXDP0 = %p (%p)\n", __func__, 2411 (caddr_t)bf->bf_daddr, bf->bf_desc)); 2412 } else { 2413 *sc->sc_txlink = bf->bf_daddr; 2414 DPRINTF(ATH_DEBUG_XMIT, ("%s: link(%p)=%p (%p)\n", __func__, 2415 sc->sc_txlink, (caddr_t)bf->bf_daddr, bf->bf_desc)); 2416 } 2417 sc->sc_txlink = &bf->bf_desc[bf->bf_nseg - 1].ds_link; 2418 splx(s); 2419 2420 ath_hal_tx_start(ah, sc->sc_txhalq[hwqueue]); 2421 return 0; 2422 } 2423 2424 void 2425 ath_tx_proc(void *arg, int npending) 2426 { 2427 struct ath_softc *sc = arg; 2428 struct ath_hal *ah = sc->sc_ah; 2429 struct ath_buf *bf; 2430 struct ieee80211com *ic = &sc->sc_ic; 2431 struct ifnet *ifp = &ic->ic_if; 2432 struct ath_desc *ds; 2433 struct ieee80211_node *ni; 2434 struct ath_node *an; 2435 int sr, lr, s; 2436 HAL_STATUS status; 2437 2438 for (;;) { 2439 s = splnet(); 2440 bf = TAILQ_FIRST(&sc->sc_txq); 2441 if (bf == NULL) { 2442 sc->sc_txlink = NULL; 2443 splx(s); 2444 break; 2445 } 2446 /* only the last descriptor is needed */ 2447 ds = &bf->bf_desc[bf->bf_nseg - 1]; 2448 status = ath_hal_proc_tx_desc(ah, ds); 2449 #ifdef AR_DEBUG 2450 if (ath_debug & ATH_DEBUG_XMIT_DESC) 2451 ath_printtxbuf(bf, status == HAL_OK); 2452 #endif 2453 if (status == HAL_EINPROGRESS) { 2454 splx(s); 2455 break; 2456 } 2457 TAILQ_REMOVE(&sc->sc_txq, bf, bf_list); 2458 splx(s); 2459 2460 ni = bf->bf_node; 2461 if (ni != NULL) { 2462 an = (struct ath_node *) ni; 2463 if (ds->ds_txstat.ts_status == 0) { 2464 if (bf->bf_id.id_node != NULL) 2465 ieee80211_rssadapt_raise_rate(ic, 2466 &an->an_rssadapt, &bf->bf_id); 2467 an->an_tx_antenna = ds->ds_txstat.ts_antenna; 2468 } else { 2469 if (bf->bf_id.id_node != NULL) 2470 ieee80211_rssadapt_lower_rate(ic, ni, 2471 &an->an_rssadapt, &bf->bf_id); 2472 if (ds->ds_txstat.ts_status & HAL_TXERR_XRETRY) 2473 sc->sc_stats.ast_tx_xretries++; 2474 if (ds->ds_txstat.ts_status & HAL_TXERR_FIFO) 2475 sc->sc_stats.ast_tx_fifoerr++; 2476 if (ds->ds_txstat.ts_status & HAL_TXERR_FILT) 2477 sc->sc_stats.ast_tx_filtered++; 2478 an->an_tx_antenna = 0; /* invalidate */ 2479 } 2480 sr = ds->ds_txstat.ts_shortretry; 2481 lr = ds->ds_txstat.ts_longretry; 2482 sc->sc_stats.ast_tx_shortretry += sr; 2483 sc->sc_stats.ast_tx_longretry += lr; 2484 /* 2485 * Reclaim reference to node. 2486 * 2487 * NB: the node may be reclaimed here if, for example 2488 * this is a DEAUTH message that was sent and the 2489 * node was timed out due to inactivity. 2490 */ 2491 ieee80211_release_node(ic, ni); 2492 } 2493 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 2494 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2495 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2496 m_freem(bf->bf_m); 2497 bf->bf_m = NULL; 2498 bf->bf_node = NULL; 2499 2500 s = splnet(); 2501 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 2502 splx(s); 2503 } 2504 ifq_clr_oactive(&ifp->if_snd); 2505 sc->sc_tx_timer = 0; 2506 2507 ath_start(ifp); 2508 } 2509 2510 /* 2511 * Drain the transmit queue and reclaim resources. 2512 */ 2513 void 2514 ath_draintxq(struct ath_softc *sc) 2515 { 2516 struct ath_hal *ah = sc->sc_ah; 2517 struct ieee80211com *ic = &sc->sc_ic; 2518 struct ifnet *ifp = &ic->ic_if; 2519 struct ieee80211_node *ni; 2520 struct ath_buf *bf; 2521 int s, i; 2522 2523 /* XXX return value */ 2524 if (!sc->sc_invalid) { 2525 for (i = 0; i <= HAL_TX_QUEUE_ID_DATA_MAX; i++) { 2526 /* don't touch the hardware if marked invalid */ 2527 (void) ath_hal_stop_tx_dma(ah, sc->sc_txhalq[i]); 2528 DPRINTF(ATH_DEBUG_RESET, 2529 ("%s: tx queue %d (%p), link %p\n", __func__, i, 2530 (caddr_t)(u_intptr_t)ath_hal_get_tx_buf(ah, 2531 sc->sc_txhalq[i]), sc->sc_txlink)); 2532 } 2533 (void) ath_hal_stop_tx_dma(ah, sc->sc_bhalq); 2534 DPRINTF(ATH_DEBUG_RESET, 2535 ("%s: beacon queue (%p)\n", __func__, 2536 (caddr_t)(u_intptr_t)ath_hal_get_tx_buf(ah, sc->sc_bhalq))); 2537 } 2538 for (;;) { 2539 s = splnet(); 2540 bf = TAILQ_FIRST(&sc->sc_txq); 2541 if (bf == NULL) { 2542 sc->sc_txlink = NULL; 2543 splx(s); 2544 break; 2545 } 2546 TAILQ_REMOVE(&sc->sc_txq, bf, bf_list); 2547 splx(s); 2548 #ifdef AR_DEBUG 2549 if (ath_debug & ATH_DEBUG_RESET) { 2550 ath_printtxbuf(bf, 2551 ath_hal_proc_tx_desc(ah, bf->bf_desc) == HAL_OK); 2552 } 2553 #endif /* AR_DEBUG */ 2554 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2555 m_freem(bf->bf_m); 2556 bf->bf_m = NULL; 2557 ni = bf->bf_node; 2558 bf->bf_node = NULL; 2559 s = splnet(); 2560 if (ni != NULL) { 2561 /* 2562 * Reclaim node reference. 2563 */ 2564 ieee80211_release_node(ic, ni); 2565 } 2566 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 2567 splx(s); 2568 } 2569 ifq_clr_oactive(&ifp->if_snd); 2570 sc->sc_tx_timer = 0; 2571 } 2572 2573 /* 2574 * Disable the receive h/w in preparation for a reset. 2575 */ 2576 void 2577 ath_stoprecv(struct ath_softc *sc) 2578 { 2579 #define PA2DESC(_sc, _pa) \ 2580 ((struct ath_desc *)((caddr_t)(_sc)->sc_desc + \ 2581 ((_pa) - (_sc)->sc_desc_paddr))) 2582 struct ath_hal *ah = sc->sc_ah; 2583 2584 ath_hal_stop_pcu_recv(ah); /* disable PCU */ 2585 ath_hal_set_rx_filter(ah, 0); /* clear recv filter */ 2586 ath_hal_stop_rx_dma(ah); /* disable DMA engine */ 2587 #ifdef AR_DEBUG 2588 if (ath_debug & ATH_DEBUG_RESET) { 2589 struct ath_buf *bf; 2590 2591 printf("%s: rx queue %p, link %p\n", __func__, 2592 (caddr_t)(u_intptr_t)ath_hal_get_rx_buf(ah), sc->sc_rxlink); 2593 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 2594 struct ath_desc *ds = bf->bf_desc; 2595 if (ath_hal_proc_rx_desc(ah, ds, bf->bf_daddr, 2596 PA2DESC(sc, ds->ds_link)) == HAL_OK) 2597 ath_printrxbuf(bf, 1); 2598 } 2599 } 2600 #endif 2601 sc->sc_rxlink = NULL; /* just in case */ 2602 #undef PA2DESC 2603 } 2604 2605 /* 2606 * Enable the receive h/w following a reset. 2607 */ 2608 int 2609 ath_startrecv(struct ath_softc *sc) 2610 { 2611 struct ath_hal *ah = sc->sc_ah; 2612 struct ath_buf *bf; 2613 2614 sc->sc_rxlink = NULL; 2615 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 2616 int error = ath_rxbuf_init(sc, bf); 2617 if (error != 0) { 2618 DPRINTF(ATH_DEBUG_RECV, 2619 ("%s: ath_rxbuf_init failed %d\n", 2620 __func__, error)); 2621 return error; 2622 } 2623 } 2624 2625 bf = TAILQ_FIRST(&sc->sc_rxbuf); 2626 ath_hal_put_rx_buf(ah, bf->bf_daddr); 2627 ath_hal_start_rx(ah); /* enable recv descriptors */ 2628 ath_mode_init(sc); /* set filters, etc. */ 2629 ath_hal_start_rx_pcu(ah); /* re-enable PCU/DMA engine */ 2630 return 0; 2631 } 2632 2633 /* 2634 * Set/change channels. If the channel is really being changed, 2635 * it's done by resetting the chip. To accomplish this we must 2636 * first cleanup any pending DMA, then restart stuff after a la 2637 * ath_init. 2638 */ 2639 int 2640 ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 2641 { 2642 struct ath_hal *ah = sc->sc_ah; 2643 struct ieee80211com *ic = &sc->sc_ic; 2644 struct ifnet *ifp = &ic->ic_if; 2645 2646 DPRINTF(ATH_DEBUG_ANY, ("%s: %u (%u MHz) -> %u (%u MHz)\n", __func__, 2647 ieee80211_chan2ieee(ic, ic->ic_ibss_chan), 2648 ic->ic_ibss_chan->ic_freq, 2649 ieee80211_chan2ieee(ic, chan), chan->ic_freq)); 2650 if (chan != ic->ic_ibss_chan) { 2651 HAL_STATUS status; 2652 HAL_CHANNEL hchan; 2653 enum ieee80211_phymode mode; 2654 2655 /* 2656 * To switch channels clear any pending DMA operations; 2657 * wait long enough for the RX fifo to drain, reset the 2658 * hardware at the new frequency, and then re-enable 2659 * the relevant bits of the h/w. 2660 */ 2661 ath_hal_set_intr(ah, 0); /* disable interrupts */ 2662 ath_draintxq(sc); /* clear pending tx frames */ 2663 ath_stoprecv(sc); /* turn off frame recv */ 2664 /* 2665 * Convert to a HAL channel description with 2666 * the flags constrained to reflect the current 2667 * operating mode. 2668 */ 2669 hchan.channel = chan->ic_freq; 2670 hchan.channelFlags = ath_chan2flags(ic, chan); 2671 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, AH_TRUE, 2672 &status)) { 2673 printf("%s: ath_chan_set: unable to reset " 2674 "channel %u (%u MHz)\n", ifp->if_xname, 2675 ieee80211_chan2ieee(ic, chan), chan->ic_freq); 2676 return EIO; 2677 } 2678 ath_set_slot_time(sc); 2679 /* 2680 * Re-enable rx framework. 2681 */ 2682 if (ath_startrecv(sc) != 0) { 2683 printf("%s: ath_chan_set: unable to restart recv " 2684 "logic\n", ifp->if_xname); 2685 return EIO; 2686 } 2687 2688 #if NBPFILTER > 0 2689 /* 2690 * Update BPF state. 2691 */ 2692 sc->sc_txtap.wt_chan_freq = sc->sc_rxtap.wr_chan_freq = 2693 htole16(chan->ic_freq); 2694 sc->sc_txtap.wt_chan_flags = sc->sc_rxtap.wr_chan_flags = 2695 htole16(chan->ic_flags); 2696 #endif 2697 2698 /* 2699 * Change channels and update the h/w rate map 2700 * if we're switching; e.g. 11a to 11b/g. 2701 */ 2702 ic->ic_ibss_chan = chan; 2703 mode = ieee80211_chan2mode(ic, chan); 2704 if (mode != sc->sc_curmode) 2705 ath_setcurmode(sc, mode); 2706 2707 /* 2708 * Re-enable interrupts. 2709 */ 2710 ath_hal_set_intr(ah, sc->sc_imask); 2711 } 2712 return 0; 2713 } 2714 2715 void 2716 ath_next_scan(void *arg) 2717 { 2718 struct ath_softc *sc = arg; 2719 struct ieee80211com *ic = &sc->sc_ic; 2720 struct ifnet *ifp = &ic->ic_if; 2721 int s; 2722 2723 /* don't call ath_start w/o network interrupts blocked */ 2724 s = splnet(); 2725 2726 if (ic->ic_state == IEEE80211_S_SCAN) 2727 ieee80211_next_scan(ifp); 2728 splx(s); 2729 } 2730 2731 int 2732 ath_set_slot_time(struct ath_softc *sc) 2733 { 2734 struct ath_hal *ah = sc->sc_ah; 2735 struct ieee80211com *ic = &sc->sc_ic; 2736 2737 if (ic->ic_flags & IEEE80211_F_SHSLOT) 2738 return (ath_hal_set_slot_time(ah, HAL_SLOT_TIME_9)); 2739 2740 return (0); 2741 } 2742 2743 /* 2744 * Periodically recalibrate the PHY to account 2745 * for temperature/environment changes. 2746 */ 2747 void 2748 ath_calibrate(void *arg) 2749 { 2750 struct ath_softc *sc = arg; 2751 struct ath_hal *ah = sc->sc_ah; 2752 struct ieee80211com *ic = &sc->sc_ic; 2753 struct ieee80211_channel *c; 2754 HAL_CHANNEL hchan; 2755 int s; 2756 2757 sc->sc_stats.ast_per_cal++; 2758 2759 /* 2760 * Convert to a HAL channel description with the flags 2761 * constrained to reflect the current operating mode. 2762 */ 2763 c = ic->ic_ibss_chan; 2764 hchan.channel = c->ic_freq; 2765 hchan.channelFlags = ath_chan2flags(ic, c); 2766 2767 s = splnet(); 2768 DPRINTF(ATH_DEBUG_CALIBRATE, 2769 ("%s: channel %u/%x\n", __func__, c->ic_freq, c->ic_flags)); 2770 2771 if (ath_hal_get_rf_gain(ah) == HAL_RFGAIN_NEED_CHANGE) { 2772 /* 2773 * Rfgain is out of bounds, reset the chip 2774 * to load new gain values. 2775 */ 2776 sc->sc_stats.ast_per_rfgain++; 2777 ath_reset(sc, 1); 2778 } 2779 if (!ath_hal_calibrate(ah, &hchan)) { 2780 DPRINTF(ATH_DEBUG_ANY, 2781 ("%s: calibration of channel %u failed\n", 2782 __func__, c->ic_freq)); 2783 sc->sc_stats.ast_per_calfail++; 2784 } 2785 timeout_add_sec(&sc->sc_cal_to, ath_calinterval); 2786 splx(s); 2787 } 2788 2789 void 2790 ath_ledstate(struct ath_softc *sc, enum ieee80211_state state) 2791 { 2792 HAL_LED_STATE led = HAL_LED_INIT; 2793 u_int32_t softled = AR5K_SOFTLED_OFF; 2794 2795 switch (state) { 2796 case IEEE80211_S_INIT: 2797 break; 2798 case IEEE80211_S_SCAN: 2799 led = HAL_LED_SCAN; 2800 break; 2801 case IEEE80211_S_AUTH: 2802 led = HAL_LED_AUTH; 2803 break; 2804 case IEEE80211_S_ASSOC: 2805 led = HAL_LED_ASSOC; 2806 softled = AR5K_SOFTLED_ON; 2807 break; 2808 case IEEE80211_S_RUN: 2809 led = HAL_LED_RUN; 2810 softled = AR5K_SOFTLED_ON; 2811 break; 2812 } 2813 2814 ath_hal_set_ledstate(sc->sc_ah, led); 2815 if (sc->sc_softled) { 2816 ath_hal_set_gpio_output(sc->sc_ah, AR5K_SOFTLED_PIN); 2817 ath_hal_set_gpio(sc->sc_ah, AR5K_SOFTLED_PIN, softled); 2818 } 2819 } 2820 2821 int 2822 ath_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg) 2823 { 2824 struct ifnet *ifp = &ic->ic_if; 2825 struct ath_softc *sc = ifp->if_softc; 2826 struct ath_hal *ah = sc->sc_ah; 2827 struct ieee80211_node *ni; 2828 const u_int8_t *bssid; 2829 int error, i; 2830 2831 u_int32_t rfilt; 2832 2833 DPRINTF(ATH_DEBUG_ANY, ("%s: %s -> %s\n", __func__, 2834 ieee80211_state_name[ic->ic_state], 2835 ieee80211_state_name[nstate])); 2836 2837 timeout_del(&sc->sc_scan_to); 2838 timeout_del(&sc->sc_cal_to); 2839 ath_ledstate(sc, nstate); 2840 2841 if (nstate == IEEE80211_S_INIT) { 2842 timeout_del(&sc->sc_rssadapt_to); 2843 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 2844 ath_hal_set_intr(ah, sc->sc_imask); 2845 return (*sc->sc_newstate)(ic, nstate, arg); 2846 } 2847 ni = ic->ic_bss; 2848 error = ath_chan_set(sc, ni->ni_chan); 2849 if (error != 0) 2850 goto bad; 2851 rfilt = ath_calcrxfilter(sc); 2852 if (nstate == IEEE80211_S_SCAN || 2853 ic->ic_opmode == IEEE80211_M_MONITOR) { 2854 bssid = sc->sc_broadcast_addr; 2855 } else { 2856 bssid = ni->ni_bssid; 2857 } 2858 ath_hal_set_rx_filter(ah, rfilt); 2859 DPRINTF(ATH_DEBUG_ANY, ("%s: RX filter 0x%x bssid %s\n", 2860 __func__, rfilt, ether_sprintf((u_char*)bssid))); 2861 2862 if (nstate == IEEE80211_S_RUN && ic->ic_opmode == IEEE80211_M_STA) { 2863 ath_hal_set_associd(ah, bssid, ni->ni_associd); 2864 } else { 2865 ath_hal_set_associd(ah, bssid, 0); 2866 } 2867 2868 if (!ath_softcrypto && (ic->ic_flags & IEEE80211_F_WEPON)) { 2869 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 2870 if (ath_hal_is_key_valid(ah, i)) 2871 ath_hal_set_key_lladdr(ah, i, bssid); 2872 } 2873 } 2874 2875 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 2876 /* nothing to do */ 2877 } else if (nstate == IEEE80211_S_RUN) { 2878 DPRINTF(ATH_DEBUG_ANY, ("%s(RUN): " 2879 "ic_flags=0x%08x iv=%d bssid=%s " 2880 "capinfo=0x%04x chan=%d\n", 2881 __func__, 2882 ic->ic_flags, 2883 ni->ni_intval, 2884 ether_sprintf(ni->ni_bssid), 2885 ni->ni_capinfo, 2886 ieee80211_chan2ieee(ic, ni->ni_chan))); 2887 2888 /* 2889 * Allocate and setup the beacon frame for AP or adhoc mode. 2890 */ 2891 #ifndef IEEE80211_STA_ONLY 2892 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 2893 ic->ic_opmode == IEEE80211_M_IBSS) { 2894 error = ath_beacon_alloc(sc, ni); 2895 if (error != 0) 2896 goto bad; 2897 } 2898 #endif 2899 /* 2900 * Configure the beacon and sleep timers. 2901 */ 2902 ath_beacon_config(sc); 2903 } else { 2904 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 2905 ath_hal_set_intr(ah, sc->sc_imask); 2906 } 2907 2908 /* 2909 * Invoke the parent method to complete the work. 2910 */ 2911 error = (*sc->sc_newstate)(ic, nstate, arg); 2912 2913 if (nstate == IEEE80211_S_RUN) { 2914 /* start periodic recalibration timer */ 2915 timeout_add_sec(&sc->sc_cal_to, ath_calinterval); 2916 2917 if (ic->ic_opmode != IEEE80211_M_MONITOR) 2918 timeout_add_msec(&sc->sc_rssadapt_to, 100); 2919 } else if (nstate == IEEE80211_S_SCAN) { 2920 /* start ap/neighbor scan timer */ 2921 timeout_add_msec(&sc->sc_scan_to, ath_dwelltime); 2922 } 2923 bad: 2924 return error; 2925 } 2926 2927 #ifndef IEEE80211_STA_ONLY 2928 void 2929 ath_recv_mgmt(struct ieee80211com *ic, struct mbuf *m, 2930 struct ieee80211_node *ni, struct ieee80211_rxinfo *rxi, int subtype) 2931 { 2932 struct ath_softc *sc = (struct ath_softc*)ic->ic_softc; 2933 struct ath_hal *ah = sc->sc_ah; 2934 2935 (*sc->sc_recv_mgmt)(ic, m, ni, rxi, subtype); 2936 2937 switch (subtype) { 2938 case IEEE80211_FC0_SUBTYPE_PROBE_RESP: 2939 case IEEE80211_FC0_SUBTYPE_BEACON: 2940 if (ic->ic_opmode != IEEE80211_M_IBSS || 2941 ic->ic_state != IEEE80211_S_RUN) 2942 break; 2943 if (ieee80211_ibss_merge(ic, ni, ath_hal_get_tsf64(ah)) == 2944 ENETRESET) 2945 ath_hal_set_associd(ah, ic->ic_bss->ni_bssid, 0); 2946 break; 2947 default: 2948 break; 2949 } 2950 return; 2951 } 2952 #endif 2953 2954 /* 2955 * Setup driver-specific state for a newly associated node. 2956 * Note that we're called also on a re-associate, the isnew 2957 * param tells us if this is the first time or not. 2958 */ 2959 void 2960 ath_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, int isnew) 2961 { 2962 if (ic->ic_opmode == IEEE80211_M_MONITOR) 2963 return; 2964 } 2965 2966 int 2967 ath_getchannels(struct ath_softc *sc, HAL_BOOL outdoor, HAL_BOOL xchanmode) 2968 { 2969 struct ieee80211com *ic = &sc->sc_ic; 2970 struct ifnet *ifp = &ic->ic_if; 2971 struct ath_hal *ah = sc->sc_ah; 2972 HAL_CHANNEL *chans; 2973 int i, ix, nchan; 2974 2975 sc->sc_nchan = 0; 2976 chans = malloc(IEEE80211_CHAN_MAX * sizeof(HAL_CHANNEL), 2977 M_TEMP, M_NOWAIT); 2978 if (chans == NULL) { 2979 printf("%s: unable to allocate channel table\n", ifp->if_xname); 2980 return ENOMEM; 2981 } 2982 if (!ath_hal_init_channels(ah, chans, IEEE80211_CHAN_MAX, &nchan, 2983 HAL_MODE_ALL, outdoor, xchanmode)) { 2984 printf("%s: unable to collect channel list from hal\n", 2985 ifp->if_xname); 2986 free(chans, M_TEMP, 0); 2987 return EINVAL; 2988 } 2989 2990 /* 2991 * Convert HAL channels to ieee80211 ones and insert 2992 * them in the table according to their channel number. 2993 */ 2994 for (i = 0; i < nchan; i++) { 2995 HAL_CHANNEL *c = &chans[i]; 2996 ix = ieee80211_mhz2ieee(c->channel, c->channelFlags); 2997 if (ix > IEEE80211_CHAN_MAX) { 2998 printf("%s: bad hal channel %u (%u/%x) ignored\n", 2999 ifp->if_xname, ix, c->channel, c->channelFlags); 3000 continue; 3001 } 3002 DPRINTF(ATH_DEBUG_ANY, 3003 ("%s: HAL channel %d/%d freq %d flags %#04x idx %d\n", 3004 sc->sc_dev.dv_xname, i, nchan, c->channel, c->channelFlags, 3005 ix)); 3006 /* NB: flags are known to be compatible */ 3007 if (ic->ic_channels[ix].ic_freq == 0) { 3008 ic->ic_channels[ix].ic_freq = c->channel; 3009 ic->ic_channels[ix].ic_flags = c->channelFlags; 3010 } else { 3011 /* channels overlap; e.g. 11g and 11b */ 3012 ic->ic_channels[ix].ic_flags |= c->channelFlags; 3013 } 3014 /* count valid channels */ 3015 sc->sc_nchan++; 3016 } 3017 free(chans, M_TEMP, 0); 3018 3019 if (sc->sc_nchan < 1) { 3020 printf("%s: no valid channels for regdomain %s(%u)\n", 3021 ifp->if_xname, ieee80211_regdomain2name(ah->ah_regdomain), 3022 ah->ah_regdomain); 3023 return ENOENT; 3024 } 3025 3026 /* set an initial channel */ 3027 ic->ic_ibss_chan = &ic->ic_channels[0]; 3028 3029 return 0; 3030 } 3031 3032 int 3033 ath_rate_setup(struct ath_softc *sc, u_int mode) 3034 { 3035 struct ath_hal *ah = sc->sc_ah; 3036 struct ieee80211com *ic = &sc->sc_ic; 3037 const HAL_RATE_TABLE *rt; 3038 struct ieee80211_rateset *rs; 3039 int i, maxrates; 3040 3041 switch (mode) { 3042 case IEEE80211_MODE_11A: 3043 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_11A); 3044 break; 3045 case IEEE80211_MODE_11B: 3046 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_11B); 3047 break; 3048 case IEEE80211_MODE_11G: 3049 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_11G); 3050 break; 3051 default: 3052 DPRINTF(ATH_DEBUG_ANY, 3053 ("%s: invalid mode %u\n", __func__, mode)); 3054 return 0; 3055 } 3056 rt = sc->sc_rates[mode]; 3057 if (rt == NULL) 3058 return 0; 3059 if (rt->rateCount > IEEE80211_RATE_MAXSIZE) { 3060 DPRINTF(ATH_DEBUG_ANY, 3061 ("%s: rate table too small (%u > %u)\n", 3062 __func__, rt->rateCount, IEEE80211_RATE_MAXSIZE)); 3063 maxrates = IEEE80211_RATE_MAXSIZE; 3064 } else { 3065 maxrates = rt->rateCount; 3066 } 3067 rs = &ic->ic_sup_rates[mode]; 3068 for (i = 0; i < maxrates; i++) 3069 rs->rs_rates[i] = rt->info[i].dot11Rate; 3070 rs->rs_nrates = maxrates; 3071 return 1; 3072 } 3073 3074 void 3075 ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 3076 { 3077 const HAL_RATE_TABLE *rt; 3078 struct ieee80211com *ic = &sc->sc_ic; 3079 struct ieee80211_node *ni; 3080 int i; 3081 3082 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 3083 rt = sc->sc_rates[mode]; 3084 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 3085 for (i = 0; i < rt->rateCount; i++) 3086 sc->sc_rixmap[rt->info[i].dot11Rate & IEEE80211_RATE_VAL] = i; 3087 bzero(sc->sc_hwmap, sizeof(sc->sc_hwmap)); 3088 for (i = 0; i < 32; i++) 3089 sc->sc_hwmap[i] = rt->info[rt->rateCodeToIndex[i]].dot11Rate; 3090 sc->sc_currates = rt; 3091 sc->sc_curmode = mode; 3092 ni = ic->ic_bss; 3093 ni->ni_rates.rs_nrates = sc->sc_currates->rateCount; 3094 if (ni->ni_txrate >= ni->ni_rates.rs_nrates) 3095 ni->ni_txrate = 0; 3096 } 3097 3098 void 3099 ath_rssadapt_updatenode(void *arg, struct ieee80211_node *ni) 3100 { 3101 struct ath_node *an = ATH_NODE(ni); 3102 3103 ieee80211_rssadapt_updatestats(&an->an_rssadapt); 3104 } 3105 3106 void 3107 ath_rssadapt_updatestats(void *arg) 3108 { 3109 struct ath_softc *sc = (struct ath_softc *)arg; 3110 struct ieee80211com *ic = &sc->sc_ic; 3111 3112 if (ic->ic_opmode == IEEE80211_M_STA) { 3113 ath_rssadapt_updatenode(arg, ic->ic_bss); 3114 } else { 3115 ieee80211_iterate_nodes(ic, ath_rssadapt_updatenode, arg); 3116 } 3117 3118 timeout_add_msec(&sc->sc_rssadapt_to, 100); 3119 } 3120 3121 #ifdef AR_DEBUG 3122 void 3123 ath_printrxbuf(struct ath_buf *bf, int done) 3124 { 3125 struct ath_desc *ds; 3126 int i; 3127 3128 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { 3129 printf("R%d (%p %p) %08x %08x %08x %08x %08x %08x %c\n", 3130 i, ds, (struct ath_desc *)bf->bf_daddr + i, 3131 ds->ds_link, ds->ds_data, 3132 ds->ds_ctl0, ds->ds_ctl1, 3133 ds->ds_hw[0], ds->ds_hw[1], 3134 !done ? ' ' : (ds->ds_rxstat.rs_status == 0) ? '*' : '!'); 3135 } 3136 } 3137 3138 void 3139 ath_printtxbuf(struct ath_buf *bf, int done) 3140 { 3141 struct ath_desc *ds; 3142 int i; 3143 3144 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { 3145 printf("T%d (%p %p) " 3146 "%08x %08x %08x %08x %08x %08x %08x %08x %c\n", 3147 i, ds, (struct ath_desc *)bf->bf_daddr + i, 3148 ds->ds_link, ds->ds_data, 3149 ds->ds_ctl0, ds->ds_ctl1, 3150 ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3], 3151 !done ? ' ' : (ds->ds_txstat.ts_status == 0) ? '*' : '!'); 3152 } 3153 } 3154 #endif /* AR_DEBUG */ 3155 3156 int 3157 ath_gpio_attach(struct ath_softc *sc, u_int16_t devid) 3158 { 3159 struct ath_hal *ah = sc->sc_ah; 3160 struct gpiobus_attach_args gba; 3161 int i; 3162 3163 if (ah->ah_gpio_npins < 1) 3164 return 0; 3165 3166 /* Initialize gpio pins array */ 3167 for (i = 0; i < ah->ah_gpio_npins && i < AR5K_MAX_GPIO; i++) { 3168 sc->sc_gpio_pins[i].pin_num = i; 3169 sc->sc_gpio_pins[i].pin_caps = GPIO_PIN_INPUT | 3170 GPIO_PIN_OUTPUT; 3171 3172 /* Set pin mode to input */ 3173 ath_hal_set_gpio_input(ah, i); 3174 sc->sc_gpio_pins[i].pin_flags = GPIO_PIN_INPUT; 3175 3176 /* Get pin input */ 3177 sc->sc_gpio_pins[i].pin_state = ath_hal_get_gpio(ah, i) ? 3178 GPIO_PIN_HIGH : GPIO_PIN_LOW; 3179 } 3180 3181 /* Enable GPIO-controlled software LED if available */ 3182 if ((ah->ah_version == AR5K_AR5211) || 3183 (devid == PCI_PRODUCT_ATHEROS_AR5212_IBM)) { 3184 sc->sc_softled = 1; 3185 ath_hal_set_gpio_output(ah, AR5K_SOFTLED_PIN); 3186 ath_hal_set_gpio(ah, AR5K_SOFTLED_PIN, AR5K_SOFTLED_OFF); 3187 } 3188 3189 /* Create gpio controller tag */ 3190 sc->sc_gpio_gc.gp_cookie = sc; 3191 sc->sc_gpio_gc.gp_pin_read = ath_gpio_pin_read; 3192 sc->sc_gpio_gc.gp_pin_write = ath_gpio_pin_write; 3193 sc->sc_gpio_gc.gp_pin_ctl = ath_gpio_pin_ctl; 3194 3195 gba.gba_name = "gpio"; 3196 gba.gba_gc = &sc->sc_gpio_gc; 3197 gba.gba_pins = sc->sc_gpio_pins; 3198 gba.gba_npins = ah->ah_gpio_npins; 3199 3200 #ifdef notyet 3201 #if NGPIO > 0 3202 if (config_found(&sc->sc_dev, &gba, gpiobus_print) == NULL) 3203 return (ENODEV); 3204 #endif 3205 #endif 3206 3207 return (0); 3208 } 3209 3210 int 3211 ath_gpio_pin_read(void *arg, int pin) 3212 { 3213 struct ath_softc *sc = arg; 3214 struct ath_hal *ah = sc->sc_ah; 3215 return (ath_hal_get_gpio(ah, pin) ? GPIO_PIN_HIGH : GPIO_PIN_LOW); 3216 } 3217 3218 void 3219 ath_gpio_pin_write(void *arg, int pin, int value) 3220 { 3221 struct ath_softc *sc = arg; 3222 struct ath_hal *ah = sc->sc_ah; 3223 ath_hal_set_gpio(ah, pin, value ? GPIO_PIN_HIGH : GPIO_PIN_LOW); 3224 } 3225 3226 void 3227 ath_gpio_pin_ctl(void *arg, int pin, int flags) 3228 { 3229 struct ath_softc *sc = arg; 3230 struct ath_hal *ah = sc->sc_ah; 3231 3232 if (flags & GPIO_PIN_INPUT) { 3233 ath_hal_set_gpio_input(ah, pin); 3234 } else if (flags & GPIO_PIN_OUTPUT) { 3235 ath_hal_set_gpio_output(ah, pin); 3236 } 3237 } 3238