1 /* $OpenBSD: ath.c,v 1.111 2016/04/13 10:49:26 mpi Exp $ */ 2 /* $NetBSD: ath.c,v 1.37 2004/08/18 21:59:39 dyoung Exp $ */ 3 4 /*- 5 * Copyright (c) 2002-2004 Sam Leffler, Errno Consulting 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer, 13 * without modification. 14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 15 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 16 * redistribution must be conditioned upon including a substantially 17 * similar Disclaimer requirement for further binary redistribution. 18 * 3. Neither the names of the above-listed copyright holders nor the names 19 * of any contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 26 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 27 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 28 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 31 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGES. 34 */ 35 36 /* 37 * Driver for the Atheros Wireless LAN controller. 38 * 39 * This software is derived from work of Atsushi Onoe; his contribution 40 * is greatly appreciated. It has been modified for OpenBSD to use an 41 * open source HAL instead of the original binary-only HAL. 42 */ 43 44 #include "bpfilter.h" 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/mbuf.h> 49 #include <sys/malloc.h> 50 #include <sys/lock.h> 51 #include <sys/kernel.h> 52 #include <sys/socket.h> 53 #include <sys/sockio.h> 54 #include <sys/device.h> 55 #include <sys/errno.h> 56 #include <sys/timeout.h> 57 #include <sys/gpio.h> 58 #include <sys/endian.h> 59 60 #include <machine/bus.h> 61 62 #include <net/if.h> 63 #include <net/if_dl.h> 64 #include <net/if_media.h> 65 #if NBPFILTER > 0 66 #include <net/bpf.h> 67 #endif 68 #include <netinet/in.h> 69 #include <netinet/if_ether.h> 70 71 #include <net80211/ieee80211_var.h> 72 #include <net80211/ieee80211_rssadapt.h> 73 74 #include <dev/pci/pcidevs.h> 75 #include <dev/gpio/gpiovar.h> 76 77 #include <dev/ic/athvar.h> 78 79 int ath_init(struct ifnet *); 80 int ath_init1(struct ath_softc *); 81 int ath_intr1(struct ath_softc *); 82 void ath_stop(struct ifnet *); 83 void ath_start(struct ifnet *); 84 void ath_reset(struct ath_softc *, int); 85 int ath_media_change(struct ifnet *); 86 void ath_watchdog(struct ifnet *); 87 int ath_ioctl(struct ifnet *, u_long, caddr_t); 88 void ath_fatal_proc(void *, int); 89 void ath_rxorn_proc(void *, int); 90 void ath_bmiss_proc(void *, int); 91 u_int ath_chan2flags(struct ieee80211com *, struct ieee80211_channel *); 92 int ath_initkeytable(struct ath_softc *); 93 void ath_mcastfilter_accum(caddr_t, u_int32_t (*)[2]); 94 void ath_mcastfilter_compute(struct ath_softc *, u_int32_t (*)[2]); 95 u_int32_t ath_calcrxfilter(struct ath_softc *); 96 void ath_mode_init(struct ath_softc *); 97 #ifndef IEEE80211_STA_ONLY 98 int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *); 99 void ath_beacon_proc(void *, int); 100 void ath_beacon_free(struct ath_softc *); 101 #endif 102 void ath_beacon_config(struct ath_softc *); 103 int ath_desc_alloc(struct ath_softc *); 104 void ath_desc_free(struct ath_softc *); 105 struct ieee80211_node *ath_node_alloc(struct ieee80211com *); 106 struct mbuf *ath_getmbuf(int, int, u_int); 107 void ath_node_free(struct ieee80211com *, struct ieee80211_node *); 108 void ath_node_copy(struct ieee80211com *, 109 struct ieee80211_node *, const struct ieee80211_node *); 110 u_int8_t ath_node_getrssi(struct ieee80211com *, 111 const struct ieee80211_node *); 112 int ath_rxbuf_init(struct ath_softc *, struct ath_buf *); 113 void ath_rx_proc(void *, int); 114 int ath_tx_start(struct ath_softc *, struct ieee80211_node *, 115 struct ath_buf *, struct mbuf *); 116 void ath_tx_proc(void *, int); 117 int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 118 void ath_draintxq(struct ath_softc *); 119 void ath_stoprecv(struct ath_softc *); 120 int ath_startrecv(struct ath_softc *); 121 void ath_next_scan(void *); 122 int ath_set_slot_time(struct ath_softc *); 123 void ath_calibrate(void *); 124 void ath_ledstate(struct ath_softc *, enum ieee80211_state); 125 int ath_newstate(struct ieee80211com *, enum ieee80211_state, int); 126 void ath_newassoc(struct ieee80211com *, 127 struct ieee80211_node *, int); 128 int ath_getchannels(struct ath_softc *, HAL_BOOL outdoor, 129 HAL_BOOL xchanmode); 130 int ath_rate_setup(struct ath_softc *sc, u_int mode); 131 void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 132 void ath_rssadapt_updatenode(void *, struct ieee80211_node *); 133 void ath_rssadapt_updatestats(void *); 134 #ifndef IEEE80211_STA_ONLY 135 void ath_recv_mgmt(struct ieee80211com *, struct mbuf *, 136 struct ieee80211_node *, struct ieee80211_rxinfo *, int); 137 #endif 138 void ath_disable(struct ath_softc *); 139 140 int ath_gpio_attach(struct ath_softc *, u_int16_t); 141 int ath_gpio_pin_read(void *, int); 142 void ath_gpio_pin_write(void *, int, int); 143 void ath_gpio_pin_ctl(void *, int, int); 144 145 #ifdef AR_DEBUG 146 void ath_printrxbuf(struct ath_buf *, int); 147 void ath_printtxbuf(struct ath_buf *, int); 148 int ath_debug = 0; 149 #endif 150 151 int ath_dwelltime = 200; /* 5 channels/second */ 152 int ath_calinterval = 30; /* calibrate every 30 secs */ 153 int ath_outdoor = AH_TRUE; /* outdoor operation */ 154 int ath_xchanmode = AH_TRUE; /* enable extended channels */ 155 int ath_softcrypto = 1; /* 1=enable software crypto */ 156 157 struct cfdriver ath_cd = { 158 NULL, "ath", DV_IFNET 159 }; 160 161 int 162 ath_activate(struct device *self, int act) 163 { 164 struct ath_softc *sc = (struct ath_softc *)self; 165 struct ifnet *ifp = &sc->sc_ic.ic_if; 166 167 switch (act) { 168 case DVACT_SUSPEND: 169 if (ifp->if_flags & IFF_RUNNING) { 170 ath_stop(ifp); 171 if (sc->sc_power != NULL) 172 (*sc->sc_power)(sc, act); 173 } 174 break; 175 case DVACT_RESUME: 176 if (ifp->if_flags & IFF_UP) { 177 ath_init(ifp); 178 if (ifp->if_flags & IFF_RUNNING) 179 ath_start(ifp); 180 } 181 break; 182 } 183 return 0; 184 } 185 186 int 187 ath_enable(struct ath_softc *sc) 188 { 189 if (ATH_IS_ENABLED(sc) == 0) { 190 if (sc->sc_enable != NULL && (*sc->sc_enable)(sc) != 0) { 191 printf("%s: device enable failed\n", 192 sc->sc_dev.dv_xname); 193 return (EIO); 194 } 195 sc->sc_flags |= ATH_ENABLED; 196 } 197 return (0); 198 } 199 200 void 201 ath_disable(struct ath_softc *sc) 202 { 203 if (!ATH_IS_ENABLED(sc)) 204 return; 205 if (sc->sc_disable != NULL) 206 (*sc->sc_disable)(sc); 207 sc->sc_flags &= ~ATH_ENABLED; 208 } 209 210 int 211 ath_attach(u_int16_t devid, struct ath_softc *sc) 212 { 213 struct ieee80211com *ic = &sc->sc_ic; 214 struct ifnet *ifp = &ic->ic_if; 215 struct ath_hal *ah; 216 HAL_STATUS status; 217 HAL_TXQ_INFO qinfo; 218 int error = 0, i; 219 220 DPRINTF(ATH_DEBUG_ANY, ("%s: devid 0x%x\n", __func__, devid)); 221 222 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 223 sc->sc_flags &= ~ATH_ATTACHED; /* make sure that it's not attached */ 224 225 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, 226 sc->sc_pcie, &status); 227 if (ah == NULL) { 228 printf("%s: unable to attach hardware; HAL status %d\n", 229 ifp->if_xname, status); 230 error = ENXIO; 231 goto bad; 232 } 233 if (ah->ah_abi != HAL_ABI_VERSION) { 234 printf("%s: HAL ABI mismatch detected (0x%x != 0x%x)\n", 235 ifp->if_xname, ah->ah_abi, HAL_ABI_VERSION); 236 error = ENXIO; 237 goto bad; 238 } 239 240 if (ah->ah_single_chip == AH_TRUE) { 241 printf("%s: AR%s %u.%u phy %u.%u rf %u.%u", ifp->if_xname, 242 ar5k_printver(AR5K_VERSION_DEV, devid), 243 ah->ah_mac_version, ah->ah_mac_revision, 244 ah->ah_phy_revision >> 4, ah->ah_phy_revision & 0xf, 245 ah->ah_radio_5ghz_revision >> 4, 246 ah->ah_radio_5ghz_revision & 0xf); 247 } else { 248 printf("%s: AR%s %u.%u phy %u.%u", ifp->if_xname, 249 ar5k_printver(AR5K_VERSION_VER, ah->ah_mac_srev), 250 ah->ah_mac_version, ah->ah_mac_revision, 251 ah->ah_phy_revision >> 4, ah->ah_phy_revision & 0xf); 252 printf(" rf%s %u.%u", 253 ar5k_printver(AR5K_VERSION_RAD, ah->ah_radio_5ghz_revision), 254 ah->ah_radio_5ghz_revision >> 4, 255 ah->ah_radio_5ghz_revision & 0xf); 256 if (ah->ah_radio_2ghz_revision != 0) { 257 printf(" rf%s %u.%u", 258 ar5k_printver(AR5K_VERSION_RAD, 259 ah->ah_radio_2ghz_revision), 260 ah->ah_radio_2ghz_revision >> 4, 261 ah->ah_radio_2ghz_revision & 0xf); 262 } 263 } 264 if (ah->ah_ee_version == AR5K_EEPROM_VERSION_4_7) 265 printf(" eeprom 4.7"); 266 else 267 printf(" eeprom %1x.%1x", ah->ah_ee_version >> 12, 268 ah->ah_ee_version & 0xff); 269 270 #if 0 271 if (ah->ah_radio_5ghz_revision >= AR5K_SREV_RAD_UNSUPP || 272 ah->ah_radio_2ghz_revision >= AR5K_SREV_RAD_UNSUPP) { 273 printf(": RF radio not supported\n"); 274 error = EOPNOTSUPP; 275 goto bad; 276 } 277 #endif 278 279 sc->sc_ah = ah; 280 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 281 282 /* 283 * Get regulation domain either stored in the EEPROM or defined 284 * as the default value. Some devices are known to have broken 285 * regulation domain values in their EEPROM. 286 */ 287 ath_hal_get_regdomain(ah, &ah->ah_regdomain); 288 289 /* 290 * Construct channel list based on the current regulation domain. 291 */ 292 error = ath_getchannels(sc, ath_outdoor, ath_xchanmode); 293 if (error != 0) 294 goto bad; 295 296 /* 297 * Setup rate tables for all potential media types. 298 */ 299 ath_rate_setup(sc, IEEE80211_MODE_11A); 300 ath_rate_setup(sc, IEEE80211_MODE_11B); 301 ath_rate_setup(sc, IEEE80211_MODE_11G); 302 303 error = ath_desc_alloc(sc); 304 if (error != 0) { 305 printf(": failed to allocate descriptors: %d\n", error); 306 goto bad; 307 } 308 timeout_set(&sc->sc_scan_to, ath_next_scan, sc); 309 timeout_set(&sc->sc_cal_to, ath_calibrate, sc); 310 timeout_set(&sc->sc_rssadapt_to, ath_rssadapt_updatestats, sc); 311 312 #ifdef __FreeBSD__ 313 ATH_TXBUF_LOCK_INIT(sc); 314 ATH_TXQ_LOCK_INIT(sc); 315 #endif 316 317 ATH_TASK_INIT(&sc->sc_txtask, ath_tx_proc, sc); 318 ATH_TASK_INIT(&sc->sc_rxtask, ath_rx_proc, sc); 319 ATH_TASK_INIT(&sc->sc_rxorntask, ath_rxorn_proc, sc); 320 ATH_TASK_INIT(&sc->sc_fataltask, ath_fatal_proc, sc); 321 ATH_TASK_INIT(&sc->sc_bmisstask, ath_bmiss_proc, sc); 322 #ifndef IEEE80211_STA_ONLY 323 ATH_TASK_INIT(&sc->sc_swbatask, ath_beacon_proc, sc); 324 #endif 325 326 /* 327 * For now just pre-allocate one data queue and one 328 * beacon queue. Note that the HAL handles resetting 329 * them at the needed time. Eventually we'll want to 330 * allocate more tx queues for splitting management 331 * frames and for QOS support. 332 */ 333 sc->sc_bhalq = ath_hal_setup_tx_queue(ah, HAL_TX_QUEUE_BEACON, NULL); 334 if (sc->sc_bhalq == (u_int) -1) { 335 printf(": unable to setup a beacon xmit queue!\n"); 336 goto bad2; 337 } 338 339 for (i = 0; i <= HAL_TX_QUEUE_ID_DATA_MAX; i++) { 340 bzero(&qinfo, sizeof(qinfo)); 341 qinfo.tqi_type = HAL_TX_QUEUE_DATA; 342 qinfo.tqi_subtype = i; /* should be mapped to WME types */ 343 sc->sc_txhalq[i] = ath_hal_setup_tx_queue(ah, 344 HAL_TX_QUEUE_DATA, &qinfo); 345 if (sc->sc_txhalq[i] == (u_int) -1) { 346 printf(": unable to setup a data xmit queue %u!\n", i); 347 goto bad2; 348 } 349 } 350 351 ifp->if_softc = sc; 352 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 353 ifp->if_start = ath_start; 354 ifp->if_watchdog = ath_watchdog; 355 ifp->if_ioctl = ath_ioctl; 356 #ifndef __OpenBSD__ 357 ifp->if_stop = ath_stop; /* XXX */ 358 #endif 359 IFQ_SET_MAXLEN(&ifp->if_snd, ATH_TXBUF * ATH_TXDESC); 360 361 ic->ic_softc = sc; 362 ic->ic_newassoc = ath_newassoc; 363 /* XXX not right but it's not used anywhere important */ 364 ic->ic_phytype = IEEE80211_T_OFDM; 365 ic->ic_opmode = IEEE80211_M_STA; 366 ic->ic_caps = IEEE80211_C_WEP /* wep supported */ 367 | IEEE80211_C_PMGT /* power management */ 368 #ifndef IEEE80211_STA_ONLY 369 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 370 | IEEE80211_C_HOSTAP /* hostap mode */ 371 #endif 372 | IEEE80211_C_MONITOR /* monitor mode */ 373 | IEEE80211_C_SHSLOT /* short slot time supported */ 374 | IEEE80211_C_SHPREAMBLE; /* short preamble supported */ 375 if (ath_softcrypto) 376 ic->ic_caps |= IEEE80211_C_RSN; /* wpa/rsn supported */ 377 378 /* 379 * Not all chips have the VEOL support we want to use with 380 * IBSS beacon; check here for it. 381 */ 382 sc->sc_veol = ath_hal_has_veol(ah); 383 384 /* get mac address from hardware */ 385 ath_hal_get_lladdr(ah, ic->ic_myaddr); 386 387 if_attach(ifp); 388 389 /* call MI attach routine. */ 390 ieee80211_ifattach(ifp); 391 392 /* override default methods */ 393 ic->ic_node_alloc = ath_node_alloc; 394 sc->sc_node_free = ic->ic_node_free; 395 ic->ic_node_free = ath_node_free; 396 sc->sc_node_copy = ic->ic_node_copy; 397 ic->ic_node_copy = ath_node_copy; 398 ic->ic_node_getrssi = ath_node_getrssi; 399 sc->sc_newstate = ic->ic_newstate; 400 ic->ic_newstate = ath_newstate; 401 #ifndef IEEE80211_STA_ONLY 402 sc->sc_recv_mgmt = ic->ic_recv_mgmt; 403 ic->ic_recv_mgmt = ath_recv_mgmt; 404 #endif 405 ic->ic_max_rssi = AR5K_MAX_RSSI; 406 bcopy(etherbroadcastaddr, sc->sc_broadcast_addr, IEEE80211_ADDR_LEN); 407 408 /* complete initialization */ 409 ieee80211_media_init(ifp, ath_media_change, ieee80211_media_status); 410 411 #if NBPFILTER > 0 412 bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO, 413 sizeof(struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN); 414 415 sc->sc_rxtap_len = sizeof(sc->sc_rxtapu); 416 bzero(&sc->sc_rxtapu, sc->sc_rxtap_len); 417 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); 418 sc->sc_rxtap.wr_ihdr.it_present = htole32(ATH_RX_RADIOTAP_PRESENT); 419 420 sc->sc_txtap_len = sizeof(sc->sc_txtapu); 421 bzero(&sc->sc_txtapu, sc->sc_txtap_len); 422 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); 423 sc->sc_txtap.wt_ihdr.it_present = htole32(ATH_TX_RADIOTAP_PRESENT); 424 #endif 425 426 sc->sc_flags |= ATH_ATTACHED; 427 428 /* 429 * Print regulation domain and the mac address. The regulation domain 430 * will be marked with a * if the EEPROM value has been overwritten. 431 */ 432 printf(", %s%s, address %s\n", 433 ieee80211_regdomain2name(ah->ah_regdomain), 434 ah->ah_regdomain != ah->ah_regdomain_hw ? "*" : "", 435 ether_sprintf(ic->ic_myaddr)); 436 437 if (ath_gpio_attach(sc, devid) == 0) 438 sc->sc_flags |= ATH_GPIO; 439 440 return 0; 441 bad2: 442 ath_desc_free(sc); 443 bad: 444 if (ah) 445 ath_hal_detach(ah); 446 sc->sc_invalid = 1; 447 return error; 448 } 449 450 int 451 ath_detach(struct ath_softc *sc, int flags) 452 { 453 struct ifnet *ifp = &sc->sc_ic.ic_if; 454 int s; 455 456 if ((sc->sc_flags & ATH_ATTACHED) == 0) 457 return (0); 458 459 config_detach_children(&sc->sc_dev, flags); 460 461 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags %x\n", __func__, ifp->if_flags)); 462 463 timeout_del(&sc->sc_scan_to); 464 timeout_del(&sc->sc_cal_to); 465 timeout_del(&sc->sc_rssadapt_to); 466 467 s = splnet(); 468 ath_stop(ifp); 469 ath_desc_free(sc); 470 ath_hal_detach(sc->sc_ah); 471 472 ieee80211_ifdetach(ifp); 473 if_detach(ifp); 474 475 splx(s); 476 #ifdef __FreeBSD__ 477 ATH_TXBUF_LOCK_DESTROY(sc); 478 ATH_TXQ_LOCK_DESTROY(sc); 479 #endif 480 481 return 0; 482 } 483 484 int 485 ath_intr(void *arg) 486 { 487 return ath_intr1((struct ath_softc *)arg); 488 } 489 490 int 491 ath_intr1(struct ath_softc *sc) 492 { 493 struct ieee80211com *ic = &sc->sc_ic; 494 struct ifnet *ifp = &ic->ic_if; 495 struct ath_hal *ah = sc->sc_ah; 496 HAL_INT status; 497 498 if (sc->sc_invalid) { 499 /* 500 * The hardware is not ready/present, don't touch anything. 501 * Note this can happen early on if the IRQ is shared. 502 */ 503 DPRINTF(ATH_DEBUG_ANY, ("%s: invalid; ignored\n", __func__)); 504 return 0; 505 } 506 if (!ath_hal_is_intr_pending(ah)) /* shared irq, not for us */ 507 return 0; 508 if ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) != (IFF_RUNNING|IFF_UP)) { 509 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags 0x%x\n", 510 __func__, ifp->if_flags)); 511 ath_hal_get_isr(ah, &status); /* clear ISR */ 512 ath_hal_set_intr(ah, 0); /* disable further intr's */ 513 return 1; /* XXX */ 514 } 515 ath_hal_get_isr(ah, &status); /* NB: clears ISR too */ 516 DPRINTF(ATH_DEBUG_INTR, ("%s: status 0x%x\n", __func__, status)); 517 status &= sc->sc_imask; /* discard unasked for bits */ 518 if (status & HAL_INT_FATAL) { 519 sc->sc_stats.ast_hardware++; 520 ath_hal_set_intr(ah, 0); /* disable intr's until reset */ 521 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_fataltask); 522 } else if (status & HAL_INT_RXORN) { 523 sc->sc_stats.ast_rxorn++; 524 ath_hal_set_intr(ah, 0); /* disable intr's until reset */ 525 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_rxorntask); 526 } else if (status & HAL_INT_MIB) { 527 DPRINTF(ATH_DEBUG_INTR, 528 ("%s: resetting MIB counters\n", __func__)); 529 sc->sc_stats.ast_mib++; 530 ath_hal_update_mib_counters(ah, &sc->sc_mib_stats); 531 } else { 532 if (status & HAL_INT_RXEOL) { 533 /* 534 * NB: the hardware should re-read the link when 535 * RXE bit is written, but it doesn't work at 536 * least on older hardware revs. 537 */ 538 sc->sc_stats.ast_rxeol++; 539 sc->sc_rxlink = NULL; 540 } 541 if (status & HAL_INT_TXURN) { 542 sc->sc_stats.ast_txurn++; 543 /* bump tx trigger level */ 544 ath_hal_update_tx_triglevel(ah, AH_TRUE); 545 } 546 if (status & HAL_INT_RX) 547 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_rxtask); 548 if (status & HAL_INT_TX) 549 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_txtask); 550 if (status & HAL_INT_SWBA) 551 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_swbatask); 552 if (status & HAL_INT_BMISS) { 553 sc->sc_stats.ast_bmiss++; 554 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_bmisstask); 555 } 556 } 557 return 1; 558 } 559 560 void 561 ath_fatal_proc(void *arg, int pending) 562 { 563 struct ath_softc *sc = arg; 564 struct ieee80211com *ic = &sc->sc_ic; 565 struct ifnet *ifp = &ic->ic_if; 566 567 if (ifp->if_flags & IFF_DEBUG) 568 printf("%s: hardware error; resetting\n", ifp->if_xname); 569 ath_reset(sc, 1); 570 } 571 572 void 573 ath_rxorn_proc(void *arg, int pending) 574 { 575 struct ath_softc *sc = arg; 576 struct ieee80211com *ic = &sc->sc_ic; 577 struct ifnet *ifp = &ic->ic_if; 578 579 if (ifp->if_flags & IFF_DEBUG) 580 printf("%s: rx FIFO overrun; resetting\n", ifp->if_xname); 581 ath_reset(sc, 1); 582 } 583 584 void 585 ath_bmiss_proc(void *arg, int pending) 586 { 587 struct ath_softc *sc = arg; 588 struct ieee80211com *ic = &sc->sc_ic; 589 590 DPRINTF(ATH_DEBUG_ANY, ("%s: pending %u\n", __func__, pending)); 591 if (ic->ic_opmode != IEEE80211_M_STA) 592 return; 593 if (ic->ic_state == IEEE80211_S_RUN) { 594 /* 595 * Rather than go directly to scan state, try to 596 * reassociate first. If that fails then the state 597 * machine will drop us into scanning after timing 598 * out waiting for a probe response. 599 */ 600 ieee80211_new_state(ic, IEEE80211_S_ASSOC, -1); 601 } 602 } 603 604 u_int 605 ath_chan2flags(struct ieee80211com *ic, struct ieee80211_channel *chan) 606 { 607 enum ieee80211_phymode mode = ieee80211_chan2mode(ic, chan); 608 609 switch (mode) { 610 case IEEE80211_MODE_AUTO: 611 return 0; 612 case IEEE80211_MODE_11A: 613 return CHANNEL_A; 614 case IEEE80211_MODE_11B: 615 return CHANNEL_B; 616 case IEEE80211_MODE_11G: 617 return CHANNEL_G; 618 default: 619 panic("%s: unsupported mode %d", __func__, mode); 620 return 0; 621 } 622 } 623 624 int 625 ath_init(struct ifnet *ifp) 626 { 627 return ath_init1((struct ath_softc *)ifp->if_softc); 628 } 629 630 int 631 ath_init1(struct ath_softc *sc) 632 { 633 struct ieee80211com *ic = &sc->sc_ic; 634 struct ifnet *ifp = &ic->ic_if; 635 struct ieee80211_node *ni; 636 enum ieee80211_phymode mode; 637 struct ath_hal *ah = sc->sc_ah; 638 HAL_STATUS status; 639 HAL_CHANNEL hchan; 640 int error = 0, s; 641 642 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags 0x%x\n", 643 __func__, ifp->if_flags)); 644 645 if ((error = ath_enable(sc)) != 0) 646 return error; 647 648 s = splnet(); 649 /* 650 * Stop anything previously setup. This is safe 651 * whether this is the first time through or not. 652 */ 653 ath_stop(ifp); 654 655 /* 656 * Reset the link layer address to the latest value. 657 */ 658 IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl)); 659 ath_hal_set_lladdr(ah, ic->ic_myaddr); 660 661 /* 662 * The basic interface to setting the hardware in a good 663 * state is ``reset''. On return the hardware is known to 664 * be powered up and with interrupts disabled. This must 665 * be followed by initialization of the appropriate bits 666 * and then setup of the interrupt mask. 667 */ 668 hchan.channel = ic->ic_ibss_chan->ic_freq; 669 hchan.channelFlags = ath_chan2flags(ic, ic->ic_ibss_chan); 670 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, AH_TRUE, &status)) { 671 printf("%s: unable to reset hardware; hal status %u\n", 672 ifp->if_xname, status); 673 error = EIO; 674 goto done; 675 } 676 ath_set_slot_time(sc); 677 678 if ((error = ath_initkeytable(sc)) != 0) { 679 printf("%s: unable to reset the key cache\n", 680 ifp->if_xname); 681 goto done; 682 } 683 684 if ((error = ath_startrecv(sc)) != 0) { 685 printf("%s: unable to start recv logic\n", ifp->if_xname); 686 goto done; 687 } 688 689 /* 690 * Enable interrupts. 691 */ 692 sc->sc_imask = HAL_INT_RX | HAL_INT_TX 693 | HAL_INT_RXEOL | HAL_INT_RXORN 694 | HAL_INT_FATAL | HAL_INT_GLOBAL; 695 #ifndef IEEE80211_STA_ONLY 696 if (ic->ic_opmode == IEEE80211_M_HOSTAP) 697 sc->sc_imask |= HAL_INT_MIB; 698 #endif 699 ath_hal_set_intr(ah, sc->sc_imask); 700 701 ifp->if_flags |= IFF_RUNNING; 702 ic->ic_state = IEEE80211_S_INIT; 703 704 /* 705 * The hardware should be ready to go now so it's safe 706 * to kick the 802.11 state machine as it's likely to 707 * immediately call back to us to send mgmt frames. 708 */ 709 ni = ic->ic_bss; 710 ni->ni_chan = ic->ic_ibss_chan; 711 mode = ieee80211_chan2mode(ic, ni->ni_chan); 712 if (mode != sc->sc_curmode) 713 ath_setcurmode(sc, mode); 714 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 715 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1); 716 } else { 717 ieee80211_new_state(ic, IEEE80211_S_RUN, -1); 718 } 719 done: 720 splx(s); 721 return error; 722 } 723 724 void 725 ath_stop(struct ifnet *ifp) 726 { 727 struct ieee80211com *ic = (struct ieee80211com *) ifp; 728 struct ath_softc *sc = ifp->if_softc; 729 struct ath_hal *ah = sc->sc_ah; 730 int s; 731 732 DPRINTF(ATH_DEBUG_ANY, ("%s: invalid %u if_flags 0x%x\n", 733 __func__, sc->sc_invalid, ifp->if_flags)); 734 735 s = splnet(); 736 if (ifp->if_flags & IFF_RUNNING) { 737 /* 738 * Shutdown the hardware and driver: 739 * disable interrupts 740 * turn off timers 741 * clear transmit machinery 742 * clear receive machinery 743 * drain and release tx queues 744 * reclaim beacon resources 745 * reset 802.11 state machine 746 * power down hardware 747 * 748 * Note that some of this work is not possible if the 749 * hardware is gone (invalid). 750 */ 751 ifp->if_flags &= ~IFF_RUNNING; 752 ifp->if_timer = 0; 753 if (!sc->sc_invalid) 754 ath_hal_set_intr(ah, 0); 755 ath_draintxq(sc); 756 if (!sc->sc_invalid) { 757 ath_stoprecv(sc); 758 } else { 759 sc->sc_rxlink = NULL; 760 } 761 IFQ_PURGE(&ifp->if_snd); 762 #ifndef IEEE80211_STA_ONLY 763 ath_beacon_free(sc); 764 #endif 765 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 766 if (!sc->sc_invalid) { 767 ath_hal_set_power(ah, HAL_PM_FULL_SLEEP, 0); 768 } 769 ath_disable(sc); 770 } 771 splx(s); 772 } 773 774 /* 775 * Reset the hardware w/o losing operational state. This is 776 * basically a more efficient way of doing ath_stop, ath_init, 777 * followed by state transitions to the current 802.11 778 * operational state. Used to recover from errors rx overrun 779 * and to reset the hardware when rf gain settings must be reset. 780 */ 781 void 782 ath_reset(struct ath_softc *sc, int full) 783 { 784 struct ieee80211com *ic = &sc->sc_ic; 785 struct ifnet *ifp = &ic->ic_if; 786 struct ath_hal *ah = sc->sc_ah; 787 struct ieee80211_channel *c; 788 HAL_STATUS status; 789 HAL_CHANNEL hchan; 790 791 /* 792 * Convert to a HAL channel description with the flags 793 * constrained to reflect the current operating mode. 794 */ 795 c = ic->ic_ibss_chan; 796 hchan.channel = c->ic_freq; 797 hchan.channelFlags = ath_chan2flags(ic, c); 798 799 ath_hal_set_intr(ah, 0); /* disable interrupts */ 800 ath_draintxq(sc); /* stop xmit side */ 801 ath_stoprecv(sc); /* stop recv side */ 802 /* NB: indicate channel change so we do a full reset */ 803 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, 804 full ? AH_TRUE : AH_FALSE, &status)) { 805 printf("%s: %s: unable to reset hardware; hal status %u\n", 806 ifp->if_xname, __func__, status); 807 } 808 ath_set_slot_time(sc); 809 /* In case channel changed, save as a node channel */ 810 ic->ic_bss->ni_chan = ic->ic_ibss_chan; 811 ath_hal_set_intr(ah, sc->sc_imask); 812 if (ath_startrecv(sc) != 0) /* restart recv */ 813 printf("%s: %s: unable to start recv logic\n", ifp->if_xname, 814 __func__); 815 ath_start(ifp); /* restart xmit */ 816 if (ic->ic_state == IEEE80211_S_RUN) 817 ath_beacon_config(sc); /* restart beacons */ 818 } 819 820 void 821 ath_start(struct ifnet *ifp) 822 { 823 struct ath_softc *sc = ifp->if_softc; 824 struct ath_hal *ah = sc->sc_ah; 825 struct ieee80211com *ic = &sc->sc_ic; 826 struct ieee80211_node *ni; 827 struct ath_buf *bf; 828 struct mbuf *m; 829 struct ieee80211_frame *wh; 830 int s; 831 832 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd) || 833 sc->sc_invalid) 834 return; 835 for (;;) { 836 /* 837 * Grab a TX buffer and associated resources. 838 */ 839 s = splnet(); 840 bf = TAILQ_FIRST(&sc->sc_txbuf); 841 if (bf != NULL) 842 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list); 843 splx(s); 844 if (bf == NULL) { 845 DPRINTF(ATH_DEBUG_ANY, ("%s: out of xmit buffers\n", 846 __func__)); 847 sc->sc_stats.ast_tx_qstop++; 848 ifq_set_oactive(&ifp->if_snd); 849 break; 850 } 851 /* 852 * Poll the management queue for frames; they 853 * have priority over normal data frames. 854 */ 855 m = mq_dequeue(&ic->ic_mgtq); 856 if (m == NULL) { 857 /* 858 * No data frames go out unless we're associated. 859 */ 860 if (ic->ic_state != IEEE80211_S_RUN) { 861 DPRINTF(ATH_DEBUG_ANY, 862 ("%s: ignore data packet, state %u\n", 863 __func__, ic->ic_state)); 864 sc->sc_stats.ast_tx_discard++; 865 s = splnet(); 866 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 867 splx(s); 868 break; 869 } 870 IFQ_DEQUEUE(&ifp->if_snd, m); 871 if (m == NULL) { 872 s = splnet(); 873 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 874 splx(s); 875 break; 876 } 877 ifp->if_opackets++; 878 879 #if NBPFILTER > 0 880 if (ifp->if_bpf) 881 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 882 #endif 883 884 /* 885 * Encapsulate the packet in prep for transmission. 886 */ 887 m = ieee80211_encap(ifp, m, &ni); 888 if (m == NULL) { 889 DPRINTF(ATH_DEBUG_ANY, 890 ("%s: encapsulation failure\n", 891 __func__)); 892 sc->sc_stats.ast_tx_encap++; 893 goto bad; 894 } 895 wh = mtod(m, struct ieee80211_frame *); 896 } else { 897 ni = m->m_pkthdr.ph_cookie; 898 899 wh = mtod(m, struct ieee80211_frame *); 900 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == 901 IEEE80211_FC0_SUBTYPE_PROBE_RESP) { 902 /* fill time stamp */ 903 u_int64_t tsf; 904 u_int32_t *tstamp; 905 906 tsf = ath_hal_get_tsf64(ah); 907 /* XXX: adjust 100us delay to xmit */ 908 tsf += 100; 909 tstamp = (u_int32_t *)&wh[1]; 910 tstamp[0] = htole32(tsf & 0xffffffff); 911 tstamp[1] = htole32(tsf >> 32); 912 } 913 sc->sc_stats.ast_tx_mgmt++; 914 } 915 916 if (ath_tx_start(sc, ni, bf, m)) { 917 bad: 918 s = splnet(); 919 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 920 splx(s); 921 ifp->if_oerrors++; 922 if (ni != NULL) 923 ieee80211_release_node(ic, ni); 924 continue; 925 } 926 927 sc->sc_tx_timer = 5; 928 ifp->if_timer = 1; 929 } 930 } 931 932 int 933 ath_media_change(struct ifnet *ifp) 934 { 935 int error; 936 937 error = ieee80211_media_change(ifp); 938 if (error == ENETRESET) { 939 if ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) == 940 (IFF_RUNNING|IFF_UP)) 941 ath_init(ifp); /* XXX lose error */ 942 error = 0; 943 } 944 return error; 945 } 946 947 void 948 ath_watchdog(struct ifnet *ifp) 949 { 950 struct ath_softc *sc = ifp->if_softc; 951 952 ifp->if_timer = 0; 953 if ((ifp->if_flags & IFF_RUNNING) == 0 || sc->sc_invalid) 954 return; 955 if (sc->sc_tx_timer) { 956 if (--sc->sc_tx_timer == 0) { 957 printf("%s: device timeout\n", ifp->if_xname); 958 ath_reset(sc, 1); 959 ifp->if_oerrors++; 960 sc->sc_stats.ast_watchdog++; 961 return; 962 } 963 ifp->if_timer = 1; 964 } 965 966 ieee80211_watchdog(ifp); 967 } 968 969 int 970 ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 971 { 972 struct ath_softc *sc = ifp->if_softc; 973 struct ieee80211com *ic = &sc->sc_ic; 974 struct ifreq *ifr = (struct ifreq *)data; 975 int error = 0, s; 976 977 s = splnet(); 978 switch (cmd) { 979 case SIOCSIFADDR: 980 ifp->if_flags |= IFF_UP; 981 /* FALLTHROUGH */ 982 case SIOCSIFFLAGS: 983 if (ifp->if_flags & IFF_UP) { 984 if (ifp->if_flags & IFF_RUNNING) { 985 /* 986 * To avoid rescanning another access point, 987 * do not call ath_init() here. Instead, 988 * only reflect promisc mode settings. 989 */ 990 ath_mode_init(sc); 991 } else { 992 /* 993 * Beware of being called during detach to 994 * reset promiscuous mode. In that case we 995 * will still be marked UP but not RUNNING. 996 * However trying to re-init the interface 997 * is the wrong thing to do as we've already 998 * torn down much of our state. There's 999 * probably a better way to deal with this. 1000 */ 1001 if (!sc->sc_invalid) 1002 ath_init(ifp); /* XXX lose error */ 1003 } 1004 } else 1005 ath_stop(ifp); 1006 break; 1007 case SIOCADDMULTI: 1008 case SIOCDELMULTI: 1009 #ifdef __FreeBSD__ 1010 /* 1011 * The upper layer has already installed/removed 1012 * the multicast address(es), just recalculate the 1013 * multicast filter for the card. 1014 */ 1015 if (ifp->if_flags & IFF_RUNNING) 1016 ath_mode_init(sc); 1017 #endif 1018 error = (cmd == SIOCADDMULTI) ? 1019 ether_addmulti(ifr, &sc->sc_ic.ic_ac) : 1020 ether_delmulti(ifr, &sc->sc_ic.ic_ac); 1021 if (error == ENETRESET) { 1022 if (ifp->if_flags & IFF_RUNNING) 1023 ath_mode_init(sc); 1024 error = 0; 1025 } 1026 break; 1027 case SIOCGATHSTATS: 1028 error = copyout(&sc->sc_stats, 1029 ifr->ifr_data, sizeof (sc->sc_stats)); 1030 break; 1031 default: 1032 error = ieee80211_ioctl(ifp, cmd, data); 1033 if (error == ENETRESET) { 1034 if ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) == 1035 (IFF_RUNNING|IFF_UP)) { 1036 if (ic->ic_opmode != IEEE80211_M_MONITOR) 1037 ath_init(ifp); /* XXX lose error */ 1038 else 1039 ath_reset(sc, 1); 1040 } 1041 error = 0; 1042 } 1043 break; 1044 } 1045 splx(s); 1046 return error; 1047 } 1048 1049 /* 1050 * Fill the hardware key cache with key entries. 1051 */ 1052 int 1053 ath_initkeytable(struct ath_softc *sc) 1054 { 1055 struct ieee80211com *ic = &sc->sc_ic; 1056 struct ath_hal *ah = sc->sc_ah; 1057 int i; 1058 1059 if (ath_softcrypto) { 1060 /* 1061 * Disable the hardware crypto engine and reset the key cache 1062 * to allow software crypto operation for WEP/RSN/WPA2 1063 */ 1064 if (ic->ic_flags & (IEEE80211_F_WEPON|IEEE80211_F_RSNON)) 1065 (void)ath_hal_softcrypto(ah, AH_TRUE); 1066 else 1067 (void)ath_hal_softcrypto(ah, AH_FALSE); 1068 return (0); 1069 } 1070 1071 /* WEP is disabled, we only support WEP in hardware yet */ 1072 if ((ic->ic_flags & IEEE80211_F_WEPON) == 0) 1073 return (0); 1074 1075 /* 1076 * Setup the hardware after reset: the key cache is filled as 1077 * needed and the receive engine is set going. Frame transmit 1078 * is handled entirely in the frame output path; there's nothing 1079 * to do here except setup the interrupt mask. 1080 */ 1081 1082 /* XXX maybe should reset all keys when !WEPON */ 1083 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 1084 struct ieee80211_key *k = &ic->ic_nw_keys[i]; 1085 if (k->k_len == 0) 1086 ath_hal_reset_key(ah, i); 1087 else { 1088 HAL_KEYVAL hk; 1089 1090 bzero(&hk, sizeof(hk)); 1091 /* 1092 * Pad the key to a supported key length. It 1093 * is always a good idea to use full-length 1094 * keys without padded zeros but this seems 1095 * to be the default behaviour used by many 1096 * implementations. 1097 */ 1098 if (k->k_cipher == IEEE80211_CIPHER_WEP40) 1099 hk.wk_len = AR5K_KEYVAL_LENGTH_40; 1100 else if (k->k_cipher == IEEE80211_CIPHER_WEP104) 1101 hk.wk_len = AR5K_KEYVAL_LENGTH_104; 1102 else 1103 return (EINVAL); 1104 bcopy(k->k_key, hk.wk_key, hk.wk_len); 1105 1106 if (ath_hal_set_key(ah, i, &hk) != AH_TRUE) 1107 return (EINVAL); 1108 } 1109 } 1110 1111 return (0); 1112 } 1113 1114 void 1115 ath_mcastfilter_accum(caddr_t dl, u_int32_t (*mfilt)[2]) 1116 { 1117 u_int32_t val; 1118 u_int8_t pos; 1119 1120 val = LE_READ_4(dl + 0); 1121 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 1122 val = LE_READ_4(dl + 3); 1123 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 1124 pos &= 0x3f; 1125 (*mfilt)[pos / 32] |= (1 << (pos % 32)); 1126 } 1127 1128 void 1129 ath_mcastfilter_compute(struct ath_softc *sc, u_int32_t (*mfilt)[2]) 1130 { 1131 struct arpcom *ac = &sc->sc_ic.ic_ac; 1132 struct ifnet *ifp = &sc->sc_ic.ic_if; 1133 struct ether_multi *enm; 1134 struct ether_multistep estep; 1135 1136 if (ac->ac_multirangecnt > 0) { 1137 /* XXX Punt on ranges. */ 1138 (*mfilt)[0] = (*mfilt)[1] = ~((u_int32_t)0); 1139 ifp->if_flags |= IFF_ALLMULTI; 1140 return; 1141 } 1142 1143 ETHER_FIRST_MULTI(estep, ac, enm); 1144 while (enm != NULL) { 1145 ath_mcastfilter_accum(enm->enm_addrlo, mfilt); 1146 ETHER_NEXT_MULTI(estep, enm); 1147 } 1148 ifp->if_flags &= ~IFF_ALLMULTI; 1149 } 1150 1151 /* 1152 * Calculate the receive filter according to the 1153 * operating mode and state: 1154 * 1155 * o always accept unicast, broadcast, and multicast traffic 1156 * o maintain current state of phy error reception 1157 * o probe request frames are accepted only when operating in 1158 * hostap, adhoc, or monitor modes 1159 * o enable promiscuous mode according to the interface state 1160 * o accept beacons: 1161 * - when operating in adhoc mode so the 802.11 layer creates 1162 * node table entries for peers, 1163 * - when operating in station mode for collecting rssi data when 1164 * the station is otherwise quiet, or 1165 * - when scanning 1166 */ 1167 u_int32_t 1168 ath_calcrxfilter(struct ath_softc *sc) 1169 { 1170 struct ieee80211com *ic = &sc->sc_ic; 1171 struct ath_hal *ah = sc->sc_ah; 1172 struct ifnet *ifp = &ic->ic_if; 1173 u_int32_t rfilt; 1174 1175 rfilt = (ath_hal_get_rx_filter(ah) & HAL_RX_FILTER_PHYERR) 1176 | HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST; 1177 if (ic->ic_opmode != IEEE80211_M_STA) 1178 rfilt |= HAL_RX_FILTER_PROBEREQ; 1179 #ifndef IEEE80211_STA_ONLY 1180 if (ic->ic_opmode != IEEE80211_M_AHDEMO) 1181 #endif 1182 rfilt |= HAL_RX_FILTER_BEACON; 1183 if (ifp->if_flags & IFF_PROMISC) 1184 rfilt |= HAL_RX_FILTER_PROM; 1185 return rfilt; 1186 } 1187 1188 void 1189 ath_mode_init(struct ath_softc *sc) 1190 { 1191 struct ath_hal *ah = sc->sc_ah; 1192 u_int32_t rfilt, mfilt[2]; 1193 1194 /* configure rx filter */ 1195 rfilt = ath_calcrxfilter(sc); 1196 ath_hal_set_rx_filter(ah, rfilt); 1197 1198 /* configure operational mode */ 1199 ath_hal_set_opmode(ah); 1200 1201 /* calculate and install multicast filter */ 1202 mfilt[0] = mfilt[1] = 0; 1203 ath_mcastfilter_compute(sc, &mfilt); 1204 ath_hal_set_mcast_filter(ah, mfilt[0], mfilt[1]); 1205 DPRINTF(ATH_DEBUG_MODE, ("%s: RX filter 0x%x, MC filter %08x:%08x\n", 1206 __func__, rfilt, mfilt[0], mfilt[1])); 1207 } 1208 1209 struct mbuf * 1210 ath_getmbuf(int flags, int type, u_int pktlen) 1211 { 1212 struct mbuf *m; 1213 1214 KASSERT(pktlen <= MCLBYTES, ("802.11 packet too large: %u", pktlen)); 1215 #ifdef __FreeBSD__ 1216 if (pktlen <= MHLEN) { 1217 MGETHDR(m, flags, type); 1218 } else { 1219 m = m_getcl(flags, type, M_PKTHDR); 1220 } 1221 #else 1222 MGETHDR(m, flags, type); 1223 if (m != NULL && pktlen > MHLEN) { 1224 MCLGET(m, flags); 1225 if ((m->m_flags & M_EXT) == 0) { 1226 m_free(m); 1227 m = NULL; 1228 } 1229 } 1230 #endif 1231 return m; 1232 } 1233 1234 #ifndef IEEE80211_STA_ONLY 1235 int 1236 ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni) 1237 { 1238 struct ieee80211com *ic = &sc->sc_ic; 1239 struct ath_hal *ah = sc->sc_ah; 1240 struct ath_buf *bf; 1241 struct ath_desc *ds; 1242 struct mbuf *m; 1243 int error; 1244 u_int8_t rate; 1245 const HAL_RATE_TABLE *rt; 1246 u_int flags = 0; 1247 1248 bf = sc->sc_bcbuf; 1249 if (bf->bf_m != NULL) { 1250 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1251 m_freem(bf->bf_m); 1252 bf->bf_m = NULL; 1253 bf->bf_node = NULL; 1254 } 1255 /* 1256 * NB: the beacon data buffer must be 32-bit aligned; 1257 * we assume the mbuf routines will return us something 1258 * with this alignment (perhaps should assert). 1259 */ 1260 m = ieee80211_beacon_alloc(ic, ni); 1261 if (m == NULL) { 1262 DPRINTF(ATH_DEBUG_BEACON, ("%s: cannot get mbuf/cluster\n", 1263 __func__)); 1264 sc->sc_stats.ast_be_nombuf++; 1265 return ENOMEM; 1266 } 1267 1268 DPRINTF(ATH_DEBUG_BEACON, ("%s: m %p len %u\n", __func__, m, m->m_len)); 1269 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m, 1270 BUS_DMA_NOWAIT); 1271 if (error != 0) { 1272 m_freem(m); 1273 return error; 1274 } 1275 KASSERT(bf->bf_nseg == 1, 1276 ("%s: multi-segment packet; nseg %u", __func__, bf->bf_nseg)); 1277 bf->bf_m = m; 1278 1279 /* setup descriptors */ 1280 ds = bf->bf_desc; 1281 bzero(ds, sizeof(struct ath_desc)); 1282 1283 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_veol) { 1284 ds->ds_link = bf->bf_daddr; /* link to self */ 1285 flags |= HAL_TXDESC_VEOL; 1286 } else { 1287 ds->ds_link = 0; 1288 } 1289 ds->ds_data = bf->bf_segs[0].ds_addr; 1290 1291 DPRINTF(ATH_DEBUG_ANY, ("%s: segaddr %p seglen %u\n", __func__, 1292 (caddr_t)bf->bf_segs[0].ds_addr, (u_int)bf->bf_segs[0].ds_len)); 1293 1294 /* 1295 * Calculate rate code. 1296 * XXX everything at min xmit rate 1297 */ 1298 rt = sc->sc_currates; 1299 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 1300 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) { 1301 rate = rt->info[0].rateCode | rt->info[0].shortPreamble; 1302 } else { 1303 rate = rt->info[0].rateCode; 1304 } 1305 1306 flags = HAL_TXDESC_NOACK; 1307 if (ic->ic_opmode == IEEE80211_M_IBSS) 1308 flags |= HAL_TXDESC_VEOL; 1309 1310 if (!ath_hal_setup_tx_desc(ah, ds 1311 , m->m_pkthdr.len + IEEE80211_CRC_LEN /* packet length */ 1312 , sizeof(struct ieee80211_frame) /* header length */ 1313 , HAL_PKT_TYPE_BEACON /* Atheros packet type */ 1314 , 60 /* txpower XXX */ 1315 , rate, 1 /* series 0 rate/tries */ 1316 , HAL_TXKEYIX_INVALID /* no encryption */ 1317 , 0 /* antenna mode */ 1318 , flags /* no ack for beacons */ 1319 , 0 /* rts/cts rate */ 1320 , 0 /* rts/cts duration */ 1321 )) { 1322 printf("%s: ath_hal_setup_tx_desc failed\n", __func__); 1323 return -1; 1324 } 1325 /* NB: beacon's BufLen must be a multiple of 4 bytes */ 1326 /* XXX verify mbuf data area covers this roundup */ 1327 if (!ath_hal_fill_tx_desc(ah, ds 1328 , roundup(bf->bf_segs[0].ds_len, 4) /* buffer length */ 1329 , AH_TRUE /* first segment */ 1330 , AH_TRUE /* last segment */ 1331 )) { 1332 printf("%s: ath_hal_fill_tx_desc failed\n", __func__); 1333 return -1; 1334 } 1335 1336 /* XXX it is not appropriate to bus_dmamap_sync? -dcy */ 1337 1338 return 0; 1339 } 1340 1341 void 1342 ath_beacon_proc(void *arg, int pending) 1343 { 1344 struct ath_softc *sc = arg; 1345 struct ieee80211com *ic = &sc->sc_ic; 1346 struct ath_buf *bf = sc->sc_bcbuf; 1347 struct ath_hal *ah = sc->sc_ah; 1348 1349 DPRINTF(ATH_DEBUG_BEACON_PROC, ("%s: pending %u\n", __func__, pending)); 1350 if (ic->ic_opmode == IEEE80211_M_STA || 1351 bf == NULL || bf->bf_m == NULL) { 1352 DPRINTF(ATH_DEBUG_ANY, ("%s: ic_flags=%x bf=%p bf_m=%p\n", 1353 __func__, ic->ic_flags, bf, bf ? bf->bf_m : NULL)); 1354 return; 1355 } 1356 /* TODO: update beacon to reflect PS poll state */ 1357 if (!ath_hal_stop_tx_dma(ah, sc->sc_bhalq)) { 1358 DPRINTF(ATH_DEBUG_ANY, ("%s: beacon queue %u did not stop?\n", 1359 __func__, sc->sc_bhalq)); 1360 } 1361 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 1362 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1363 1364 ath_hal_put_tx_buf(ah, sc->sc_bhalq, bf->bf_daddr); 1365 ath_hal_tx_start(ah, sc->sc_bhalq); 1366 DPRINTF(ATH_DEBUG_BEACON_PROC, 1367 ("%s: TXDP%u = %p (%p)\n", __func__, 1368 sc->sc_bhalq, (caddr_t)bf->bf_daddr, bf->bf_desc)); 1369 } 1370 1371 void 1372 ath_beacon_free(struct ath_softc *sc) 1373 { 1374 struct ath_buf *bf = sc->sc_bcbuf; 1375 1376 if (bf->bf_m != NULL) { 1377 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1378 m_freem(bf->bf_m); 1379 bf->bf_m = NULL; 1380 bf->bf_node = NULL; 1381 } 1382 } 1383 #endif /* IEEE80211_STA_ONLY */ 1384 1385 /* 1386 * Configure the beacon and sleep timers. 1387 * 1388 * When operating as an AP this resets the TSF and sets 1389 * up the hardware to notify us when we need to issue beacons. 1390 * 1391 * When operating in station mode this sets up the beacon 1392 * timers according to the timestamp of the last received 1393 * beacon and the current TSF, configures PCF and DTIM 1394 * handling, programs the sleep registers so the hardware 1395 * will wakeup in time to receive beacons, and configures 1396 * the beacon miss handling so we'll receive a BMISS 1397 * interrupt when we stop seeing beacons from the AP 1398 * we've associated with. 1399 */ 1400 void 1401 ath_beacon_config(struct ath_softc *sc) 1402 { 1403 #define MS_TO_TU(x) (((x) * 1000) / 1024) 1404 struct ath_hal *ah = sc->sc_ah; 1405 struct ieee80211com *ic = &sc->sc_ic; 1406 struct ieee80211_node *ni = ic->ic_bss; 1407 u_int32_t nexttbtt, intval; 1408 1409 nexttbtt = (LE_READ_4(ni->ni_tstamp + 4) << 22) | 1410 (LE_READ_4(ni->ni_tstamp) >> 10); 1411 intval = MAX(1, ni->ni_intval) & HAL_BEACON_PERIOD; 1412 if (nexttbtt == 0) { /* e.g. for ap mode */ 1413 nexttbtt = intval; 1414 } else if (intval) { 1415 nexttbtt = roundup(nexttbtt, intval); 1416 } 1417 DPRINTF(ATH_DEBUG_BEACON, ("%s: intval %u nexttbtt %u\n", 1418 __func__, ni->ni_intval, nexttbtt)); 1419 if (ic->ic_opmode == IEEE80211_M_STA) { 1420 HAL_BEACON_STATE bs; 1421 u_int32_t bmisstime; 1422 1423 /* NB: no PCF support right now */ 1424 bzero(&bs, sizeof(bs)); 1425 bs.bs_intval = intval; 1426 bs.bs_nexttbtt = nexttbtt; 1427 bs.bs_dtimperiod = bs.bs_intval; 1428 bs.bs_nextdtim = nexttbtt; 1429 /* 1430 * Calculate the number of consecutive beacons to miss 1431 * before taking a BMISS interrupt. The configuration 1432 * is specified in ms, so we need to convert that to 1433 * TU's and then calculate based on the beacon interval. 1434 * Note that we clamp the result to at most 10 beacons. 1435 */ 1436 bmisstime = MAX(7, ic->ic_bmisstimeout); 1437 bs.bs_bmissthreshold = howmany(bmisstime, intval); 1438 if (bs.bs_bmissthreshold > 7) { 1439 bs.bs_bmissthreshold = 7; 1440 } else if (bs.bs_bmissthreshold <= 0) { 1441 bs.bs_bmissthreshold = 1; 1442 } 1443 1444 /* 1445 * Calculate sleep duration. The configuration is 1446 * given in ms. We insure a multiple of the beacon 1447 * period is used. Also, if the sleep duration is 1448 * greater than the DTIM period then it makes senses 1449 * to make it a multiple of that. 1450 * 1451 * XXX fixed at 100ms 1452 */ 1453 bs.bs_sleepduration = 1454 roundup(MS_TO_TU(100), bs.bs_intval); 1455 if (bs.bs_sleepduration > bs.bs_dtimperiod) { 1456 bs.bs_sleepduration = 1457 roundup(bs.bs_sleepduration, bs.bs_dtimperiod); 1458 } 1459 1460 DPRINTF(ATH_DEBUG_BEACON, 1461 ("%s: intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u" 1462 " sleep %u\n" 1463 , __func__ 1464 , bs.bs_intval 1465 , bs.bs_nexttbtt 1466 , bs.bs_dtimperiod 1467 , bs.bs_nextdtim 1468 , bs.bs_bmissthreshold 1469 , bs.bs_sleepduration 1470 )); 1471 ath_hal_set_intr(ah, 0); 1472 ath_hal_set_beacon_timers(ah, &bs, 0/*XXX*/, 0, 0); 1473 sc->sc_imask |= HAL_INT_BMISS; 1474 ath_hal_set_intr(ah, sc->sc_imask); 1475 } 1476 #ifndef IEEE80211_STA_ONLY 1477 else { 1478 ath_hal_set_intr(ah, 0); 1479 if (nexttbtt == intval) 1480 intval |= HAL_BEACON_RESET_TSF; 1481 if (ic->ic_opmode == IEEE80211_M_IBSS) { 1482 /* 1483 * In IBSS mode enable the beacon timers but only 1484 * enable SWBA interrupts if we need to manually 1485 * prepare beacon frames. Otherwise we use a 1486 * self-linked tx descriptor and let the hardware 1487 * deal with things. 1488 */ 1489 intval |= HAL_BEACON_ENA; 1490 if (!sc->sc_veol) 1491 sc->sc_imask |= HAL_INT_SWBA; 1492 } else if (ic->ic_opmode == IEEE80211_M_HOSTAP) { 1493 /* 1494 * In AP mode we enable the beacon timers and 1495 * SWBA interrupts to prepare beacon frames. 1496 */ 1497 intval |= HAL_BEACON_ENA; 1498 sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */ 1499 } 1500 ath_hal_init_beacon(ah, nexttbtt, intval); 1501 ath_hal_set_intr(ah, sc->sc_imask); 1502 /* 1503 * When using a self-linked beacon descriptor in IBBS 1504 * mode load it once here. 1505 */ 1506 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_veol) 1507 ath_beacon_proc(sc, 0); 1508 } 1509 #endif 1510 } 1511 1512 int 1513 ath_desc_alloc(struct ath_softc *sc) 1514 { 1515 int i, bsize, error = -1; 1516 struct ath_desc *ds; 1517 struct ath_buf *bf; 1518 1519 /* allocate descriptors */ 1520 sc->sc_desc_len = sizeof(struct ath_desc) * 1521 (ATH_TXBUF * ATH_TXDESC + ATH_RXBUF + 1); 1522 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_desc_len, PAGE_SIZE, 1523 0, &sc->sc_dseg, 1, &sc->sc_dnseg, 0)) != 0) { 1524 printf("%s: unable to allocate control data, error = %d\n", 1525 sc->sc_dev.dv_xname, error); 1526 goto fail0; 1527 } 1528 1529 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg, 1530 sc->sc_desc_len, (caddr_t *)&sc->sc_desc, BUS_DMA_COHERENT)) != 0) { 1531 printf("%s: unable to map control data, error = %d\n", 1532 sc->sc_dev.dv_xname, error); 1533 goto fail1; 1534 } 1535 1536 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_desc_len, 1, 1537 sc->sc_desc_len, 0, 0, &sc->sc_ddmamap)) != 0) { 1538 printf("%s: unable to create control data DMA map, " 1539 "error = %d\n", sc->sc_dev.dv_xname, error); 1540 goto fail2; 1541 } 1542 1543 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_ddmamap, sc->sc_desc, 1544 sc->sc_desc_len, NULL, 0)) != 0) { 1545 printf("%s: unable to load control data DMA map, error = %d\n", 1546 sc->sc_dev.dv_xname, error); 1547 goto fail3; 1548 } 1549 1550 ds = sc->sc_desc; 1551 sc->sc_desc_paddr = sc->sc_ddmamap->dm_segs[0].ds_addr; 1552 1553 DPRINTF(ATH_DEBUG_XMIT_DESC|ATH_DEBUG_RECV_DESC, 1554 ("ath_desc_alloc: DMA map: %p (%lu) -> %p (%lu)\n", 1555 ds, (u_long)sc->sc_desc_len, 1556 (caddr_t) sc->sc_desc_paddr, /*XXX*/ (u_long) sc->sc_desc_len)); 1557 1558 /* allocate buffers */ 1559 bsize = sizeof(struct ath_buf) * (ATH_TXBUF + ATH_RXBUF + 1); 1560 bf = malloc(bsize, M_DEVBUF, M_NOWAIT | M_ZERO); 1561 if (bf == NULL) { 1562 printf("%s: unable to allocate Tx/Rx buffers\n", 1563 sc->sc_dev.dv_xname); 1564 error = ENOMEM; 1565 goto fail3; 1566 } 1567 sc->sc_bufptr = bf; 1568 1569 TAILQ_INIT(&sc->sc_rxbuf); 1570 for (i = 0; i < ATH_RXBUF; i++, bf++, ds++) { 1571 bf->bf_desc = ds; 1572 bf->bf_daddr = sc->sc_desc_paddr + 1573 ((caddr_t)ds - (caddr_t)sc->sc_desc); 1574 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1575 MCLBYTES, 0, 0, &bf->bf_dmamap)) != 0) { 1576 printf("%s: unable to create Rx dmamap, error = %d\n", 1577 sc->sc_dev.dv_xname, error); 1578 goto fail4; 1579 } 1580 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 1581 } 1582 1583 TAILQ_INIT(&sc->sc_txbuf); 1584 for (i = 0; i < ATH_TXBUF; i++, bf++, ds += ATH_TXDESC) { 1585 bf->bf_desc = ds; 1586 bf->bf_daddr = sc->sc_desc_paddr + 1587 ((caddr_t)ds - (caddr_t)sc->sc_desc); 1588 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1589 ATH_TXDESC, MCLBYTES, 0, 0, &bf->bf_dmamap)) != 0) { 1590 printf("%s: unable to create Tx dmamap, error = %d\n", 1591 sc->sc_dev.dv_xname, error); 1592 goto fail5; 1593 } 1594 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 1595 } 1596 TAILQ_INIT(&sc->sc_txq); 1597 1598 /* beacon buffer */ 1599 bf->bf_desc = ds; 1600 bf->bf_daddr = sc->sc_desc_paddr + ((caddr_t)ds - (caddr_t)sc->sc_desc); 1601 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 0, 1602 &bf->bf_dmamap)) != 0) { 1603 printf("%s: unable to create beacon dmamap, error = %d\n", 1604 sc->sc_dev.dv_xname, error); 1605 goto fail5; 1606 } 1607 sc->sc_bcbuf = bf; 1608 return 0; 1609 1610 fail5: 1611 for (i = ATH_RXBUF; i < ATH_RXBUF + ATH_TXBUF; i++) { 1612 if (sc->sc_bufptr[i].bf_dmamap == NULL) 1613 continue; 1614 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bufptr[i].bf_dmamap); 1615 } 1616 fail4: 1617 for (i = 0; i < ATH_RXBUF; i++) { 1618 if (sc->sc_bufptr[i].bf_dmamap == NULL) 1619 continue; 1620 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bufptr[i].bf_dmamap); 1621 } 1622 fail3: 1623 bus_dmamap_unload(sc->sc_dmat, sc->sc_ddmamap); 1624 fail2: 1625 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ddmamap); 1626 sc->sc_ddmamap = NULL; 1627 fail1: 1628 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_desc, sc->sc_desc_len); 1629 fail0: 1630 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg); 1631 return error; 1632 } 1633 1634 void 1635 ath_desc_free(struct ath_softc *sc) 1636 { 1637 struct ath_buf *bf; 1638 1639 bus_dmamap_unload(sc->sc_dmat, sc->sc_ddmamap); 1640 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ddmamap); 1641 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg); 1642 1643 TAILQ_FOREACH(bf, &sc->sc_txq, bf_list) { 1644 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1645 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 1646 m_freem(bf->bf_m); 1647 } 1648 TAILQ_FOREACH(bf, &sc->sc_txbuf, bf_list) 1649 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 1650 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 1651 if (bf->bf_m) { 1652 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1653 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 1654 m_freem(bf->bf_m); 1655 bf->bf_m = NULL; 1656 } 1657 } 1658 if (sc->sc_bcbuf != NULL) { 1659 bus_dmamap_unload(sc->sc_dmat, sc->sc_bcbuf->bf_dmamap); 1660 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bcbuf->bf_dmamap); 1661 sc->sc_bcbuf = NULL; 1662 } 1663 1664 TAILQ_INIT(&sc->sc_rxbuf); 1665 TAILQ_INIT(&sc->sc_txbuf); 1666 TAILQ_INIT(&sc->sc_txq); 1667 free(sc->sc_bufptr, M_DEVBUF, 0); 1668 sc->sc_bufptr = NULL; 1669 } 1670 1671 struct ieee80211_node * 1672 ath_node_alloc(struct ieee80211com *ic) 1673 { 1674 struct ath_node *an; 1675 1676 an = malloc(sizeof(*an), M_DEVBUF, M_NOWAIT | M_ZERO); 1677 if (an) { 1678 int i; 1679 for (i = 0; i < ATH_RHIST_SIZE; i++) 1680 an->an_rx_hist[i].arh_ticks = ATH_RHIST_NOTIME; 1681 an->an_rx_hist_next = ATH_RHIST_SIZE-1; 1682 return &an->an_node; 1683 } else 1684 return NULL; 1685 } 1686 1687 void 1688 ath_node_free(struct ieee80211com *ic, struct ieee80211_node *ni) 1689 { 1690 struct ath_softc *sc = ic->ic_if.if_softc; 1691 struct ath_buf *bf; 1692 1693 TAILQ_FOREACH(bf, &sc->sc_txq, bf_list) { 1694 if (bf->bf_node == ni) 1695 bf->bf_node = NULL; 1696 } 1697 (*sc->sc_node_free)(ic, ni); 1698 } 1699 1700 void 1701 ath_node_copy(struct ieee80211com *ic, 1702 struct ieee80211_node *dst, const struct ieee80211_node *src) 1703 { 1704 struct ath_softc *sc = ic->ic_if.if_softc; 1705 1706 bcopy(&src[1], &dst[1], 1707 sizeof(struct ath_node) - sizeof(struct ieee80211_node)); 1708 (*sc->sc_node_copy)(ic, dst, src); 1709 } 1710 1711 u_int8_t 1712 ath_node_getrssi(struct ieee80211com *ic, const struct ieee80211_node *ni) 1713 { 1714 const struct ath_node *an = ATH_NODE(ni); 1715 int i, now, nsamples, rssi; 1716 1717 /* 1718 * Calculate the average over the last second of sampled data. 1719 */ 1720 now = ATH_TICKS(); 1721 nsamples = 0; 1722 rssi = 0; 1723 i = an->an_rx_hist_next; 1724 do { 1725 const struct ath_recv_hist *rh = &an->an_rx_hist[i]; 1726 if (rh->arh_ticks == ATH_RHIST_NOTIME) 1727 goto done; 1728 if (now - rh->arh_ticks > hz) 1729 goto done; 1730 rssi += rh->arh_rssi; 1731 nsamples++; 1732 if (i == 0) { 1733 i = ATH_RHIST_SIZE-1; 1734 } else { 1735 i--; 1736 } 1737 } while (i != an->an_rx_hist_next); 1738 done: 1739 /* 1740 * Return either the average or the last known 1741 * value if there is no recent data. 1742 */ 1743 return (nsamples ? rssi / nsamples : an->an_rx_hist[i].arh_rssi); 1744 } 1745 1746 int 1747 ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 1748 { 1749 struct ath_hal *ah = sc->sc_ah; 1750 int error; 1751 struct mbuf *m; 1752 struct ath_desc *ds; 1753 1754 m = bf->bf_m; 1755 if (m == NULL) { 1756 /* 1757 * NB: by assigning a page to the rx dma buffer we 1758 * implicitly satisfy the Atheros requirement that 1759 * this buffer be cache-line-aligned and sized to be 1760 * multiple of the cache line size. Not doing this 1761 * causes weird stuff to happen (for the 5210 at least). 1762 */ 1763 m = ath_getmbuf(M_DONTWAIT, MT_DATA, MCLBYTES); 1764 if (m == NULL) { 1765 DPRINTF(ATH_DEBUG_ANY, 1766 ("%s: no mbuf/cluster\n", __func__)); 1767 sc->sc_stats.ast_rx_nombuf++; 1768 return ENOMEM; 1769 } 1770 bf->bf_m = m; 1771 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 1772 1773 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m, 1774 BUS_DMA_NOWAIT); 1775 if (error != 0) { 1776 DPRINTF(ATH_DEBUG_ANY, 1777 ("%s: ath_bus_dmamap_load_mbuf failed;" 1778 " error %d\n", __func__, error)); 1779 sc->sc_stats.ast_rx_busdma++; 1780 return error; 1781 } 1782 KASSERT(bf->bf_nseg == 1, 1783 ("ath_rxbuf_init: multi-segment packet; nseg %u", 1784 bf->bf_nseg)); 1785 } 1786 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 1787 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1788 1789 /* 1790 * Setup descriptors. For receive we always terminate 1791 * the descriptor list with a self-linked entry so we'll 1792 * not get overrun under high load (as can happen with a 1793 * 5212 when ANI processing enables PHY errors). 1794 * 1795 * To insure the last descriptor is self-linked we create 1796 * each descriptor as self-linked and add it to the end. As 1797 * each additional descriptor is added the previous self-linked 1798 * entry is ``fixed'' naturally. This should be safe even 1799 * if DMA is happening. When processing RX interrupts we 1800 * never remove/process the last, self-linked, entry on the 1801 * descriptor list. This insures the hardware always has 1802 * someplace to write a new frame. 1803 */ 1804 ds = bf->bf_desc; 1805 bzero(ds, sizeof(struct ath_desc)); 1806 #ifndef IEEE80211_STA_ONLY 1807 if (sc->sc_ic.ic_opmode != IEEE80211_M_HOSTAP) 1808 ds->ds_link = bf->bf_daddr; /* link to self */ 1809 #endif 1810 ds->ds_data = bf->bf_segs[0].ds_addr; 1811 ath_hal_setup_rx_desc(ah, ds 1812 , m->m_len /* buffer size */ 1813 , 0 1814 ); 1815 1816 if (sc->sc_rxlink != NULL) 1817 *sc->sc_rxlink = bf->bf_daddr; 1818 sc->sc_rxlink = &ds->ds_link; 1819 return 0; 1820 } 1821 1822 void 1823 ath_rx_proc(void *arg, int npending) 1824 { 1825 #define PA2DESC(_sc, _pa) \ 1826 ((struct ath_desc *)((caddr_t)(_sc)->sc_desc + \ 1827 ((_pa) - (_sc)->sc_desc_paddr))) 1828 struct ath_softc *sc = arg; 1829 struct ath_buf *bf; 1830 struct ieee80211com *ic = &sc->sc_ic; 1831 struct ifnet *ifp = &ic->ic_if; 1832 struct ath_hal *ah = sc->sc_ah; 1833 struct ath_desc *ds; 1834 struct mbuf *m; 1835 struct ieee80211_frame *wh; 1836 struct ieee80211_frame whbuf; 1837 struct ieee80211_rxinfo rxi; 1838 struct ieee80211_node *ni; 1839 struct ath_node *an; 1840 struct ath_recv_hist *rh; 1841 int len; 1842 u_int phyerr; 1843 HAL_STATUS status; 1844 1845 DPRINTF(ATH_DEBUG_RX_PROC, ("%s: pending %u\n", __func__, npending)); 1846 do { 1847 bf = TAILQ_FIRST(&sc->sc_rxbuf); 1848 if (bf == NULL) { /* NB: shouldn't happen */ 1849 printf("%s: ath_rx_proc: no buffer!\n", ifp->if_xname); 1850 break; 1851 } 1852 ds = bf->bf_desc; 1853 if (ds->ds_link == bf->bf_daddr) { 1854 /* NB: never process the self-linked entry at the end */ 1855 break; 1856 } 1857 m = bf->bf_m; 1858 if (m == NULL) { /* NB: shouldn't happen */ 1859 printf("%s: ath_rx_proc: no mbuf!\n", ifp->if_xname); 1860 continue; 1861 } 1862 /* XXX sync descriptor memory */ 1863 /* 1864 * Must provide the virtual address of the current 1865 * descriptor, the physical address, and the virtual 1866 * address of the next descriptor in the h/w chain. 1867 * This allows the HAL to look ahead to see if the 1868 * hardware is done with a descriptor by checking the 1869 * done bit in the following descriptor and the address 1870 * of the current descriptor the DMA engine is working 1871 * on. All this is necessary because of our use of 1872 * a self-linked list to avoid rx overruns. 1873 */ 1874 status = ath_hal_proc_rx_desc(ah, ds, 1875 bf->bf_daddr, PA2DESC(sc, ds->ds_link)); 1876 #ifdef AR_DEBUG 1877 if (ath_debug & ATH_DEBUG_RECV_DESC) 1878 ath_printrxbuf(bf, status == HAL_OK); 1879 #endif 1880 if (status == HAL_EINPROGRESS) 1881 break; 1882 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 1883 1884 if (ds->ds_rxstat.rs_more) { 1885 /* 1886 * Frame spans multiple descriptors; this 1887 * cannot happen yet as we don't support 1888 * jumbograms. If not in monitor mode, 1889 * discard the frame. 1890 */ 1891 1892 /* 1893 * Enable this if you want to see error 1894 * frames in Monitor mode. 1895 */ 1896 #ifdef ERROR_FRAMES 1897 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 1898 /* XXX statistic */ 1899 goto rx_next; 1900 } 1901 #endif 1902 /* fall thru for monitor mode handling... */ 1903 1904 } else if (ds->ds_rxstat.rs_status != 0) { 1905 if (ds->ds_rxstat.rs_status & HAL_RXERR_CRC) 1906 sc->sc_stats.ast_rx_crcerr++; 1907 if (ds->ds_rxstat.rs_status & HAL_RXERR_FIFO) 1908 sc->sc_stats.ast_rx_fifoerr++; 1909 if (ds->ds_rxstat.rs_status & HAL_RXERR_DECRYPT) 1910 sc->sc_stats.ast_rx_badcrypt++; 1911 if (ds->ds_rxstat.rs_status & HAL_RXERR_PHY) { 1912 sc->sc_stats.ast_rx_phyerr++; 1913 phyerr = ds->ds_rxstat.rs_phyerr & 0x1f; 1914 sc->sc_stats.ast_rx_phy[phyerr]++; 1915 } 1916 1917 /* 1918 * reject error frames, we normally don't want 1919 * to see them in monitor mode. 1920 */ 1921 if ((ds->ds_rxstat.rs_status & HAL_RXERR_DECRYPT ) || 1922 (ds->ds_rxstat.rs_status & HAL_RXERR_PHY)) 1923 goto rx_next; 1924 1925 /* 1926 * In monitor mode, allow through packets that 1927 * cannot be decrypted 1928 */ 1929 if ((ds->ds_rxstat.rs_status & ~HAL_RXERR_DECRYPT) || 1930 sc->sc_ic.ic_opmode != IEEE80211_M_MONITOR) 1931 goto rx_next; 1932 } 1933 1934 len = ds->ds_rxstat.rs_datalen; 1935 if (len < IEEE80211_MIN_LEN) { 1936 DPRINTF(ATH_DEBUG_RECV, ("%s: short packet %d\n", 1937 __func__, len)); 1938 sc->sc_stats.ast_rx_tooshort++; 1939 goto rx_next; 1940 } 1941 1942 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 1943 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1944 1945 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1946 bf->bf_m = NULL; 1947 m->m_pkthdr.len = m->m_len = len; 1948 1949 #if NBPFILTER > 0 1950 if (sc->sc_drvbpf) { 1951 struct mbuf mb; 1952 1953 sc->sc_rxtap.wr_flags = IEEE80211_RADIOTAP_F_FCS; 1954 sc->sc_rxtap.wr_rate = 1955 sc->sc_hwmap[ds->ds_rxstat.rs_rate] & 1956 IEEE80211_RATE_VAL; 1957 sc->sc_rxtap.wr_antenna = ds->ds_rxstat.rs_antenna; 1958 sc->sc_rxtap.wr_rssi = ds->ds_rxstat.rs_rssi; 1959 sc->sc_rxtap.wr_max_rssi = ic->ic_max_rssi; 1960 1961 mb.m_data = (caddr_t)&sc->sc_rxtap; 1962 mb.m_len = sc->sc_rxtap_len; 1963 mb.m_next = m; 1964 mb.m_nextpkt = NULL; 1965 mb.m_type = 0; 1966 mb.m_flags = 0; 1967 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN); 1968 } 1969 #endif 1970 m_adj(m, -IEEE80211_CRC_LEN); 1971 wh = mtod(m, struct ieee80211_frame *); 1972 rxi.rxi_flags = 0; 1973 if (!ath_softcrypto && (wh->i_fc[1] & IEEE80211_FC1_WEP)) { 1974 /* 1975 * WEP is decrypted by hardware. Clear WEP bit 1976 * and trim WEP header for ieee80211_input(). 1977 */ 1978 wh->i_fc[1] &= ~IEEE80211_FC1_WEP; 1979 bcopy(wh, &whbuf, sizeof(whbuf)); 1980 m_adj(m, IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN); 1981 wh = mtod(m, struct ieee80211_frame *); 1982 bcopy(&whbuf, wh, sizeof(whbuf)); 1983 /* 1984 * Also trim WEP ICV from the tail. 1985 */ 1986 m_adj(m, -IEEE80211_WEP_CRCLEN); 1987 /* 1988 * The header has probably moved. 1989 */ 1990 wh = mtod(m, struct ieee80211_frame *); 1991 1992 rxi.rxi_flags |= IEEE80211_RXI_HWDEC; 1993 } 1994 1995 /* 1996 * Locate the node for sender, track state, and 1997 * then pass this node (referenced) up to the 802.11 1998 * layer for its use. 1999 */ 2000 ni = ieee80211_find_rxnode(ic, wh); 2001 2002 /* 2003 * Record driver-specific state. 2004 */ 2005 an = ATH_NODE(ni); 2006 if (++(an->an_rx_hist_next) == ATH_RHIST_SIZE) 2007 an->an_rx_hist_next = 0; 2008 rh = &an->an_rx_hist[an->an_rx_hist_next]; 2009 rh->arh_ticks = ATH_TICKS(); 2010 rh->arh_rssi = ds->ds_rxstat.rs_rssi; 2011 rh->arh_antenna = ds->ds_rxstat.rs_antenna; 2012 2013 /* 2014 * Send frame up for processing. 2015 */ 2016 rxi.rxi_rssi = ds->ds_rxstat.rs_rssi; 2017 rxi.rxi_tstamp = ds->ds_rxstat.rs_tstamp; 2018 ieee80211_input(ifp, m, ni, &rxi); 2019 2020 /* Handle the rate adaption */ 2021 ieee80211_rssadapt_input(ic, ni, &an->an_rssadapt, 2022 ds->ds_rxstat.rs_rssi); 2023 2024 /* 2025 * The frame may have caused the node to be marked for 2026 * reclamation (e.g. in response to a DEAUTH message) 2027 * so use release_node here instead of unref_node. 2028 */ 2029 ieee80211_release_node(ic, ni); 2030 2031 rx_next: 2032 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 2033 } while (ath_rxbuf_init(sc, bf) == 0); 2034 2035 ath_hal_set_rx_signal(ah); /* rx signal state monitoring */ 2036 ath_hal_start_rx(ah); /* in case of RXEOL */ 2037 #undef PA2DESC 2038 } 2039 2040 /* 2041 * XXX Size of an ACK control frame in bytes. 2042 */ 2043 #define IEEE80211_ACK_SIZE (2+2+IEEE80211_ADDR_LEN+4) 2044 2045 int 2046 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, 2047 struct ath_buf *bf, struct mbuf *m0) 2048 { 2049 struct ieee80211com *ic = &sc->sc_ic; 2050 struct ath_hal *ah = sc->sc_ah; 2051 struct ifnet *ifp = &sc->sc_ic.ic_if; 2052 int i, error, iswep, hdrlen, pktlen, len, s, tries; 2053 u_int8_t rix, cix, txrate, ctsrate; 2054 struct ath_desc *ds; 2055 struct ieee80211_frame *wh; 2056 struct ieee80211_key *k; 2057 u_int32_t iv; 2058 u_int8_t *ivp; 2059 u_int8_t hdrbuf[sizeof(struct ieee80211_frame) + 2060 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN]; 2061 u_int subtype, flags, ctsduration, antenna; 2062 HAL_PKT_TYPE atype; 2063 const HAL_RATE_TABLE *rt; 2064 HAL_BOOL shortPreamble; 2065 struct ath_node *an; 2066 u_int8_t hwqueue = HAL_TX_QUEUE_ID_DATA_MIN; 2067 2068 wh = mtod(m0, struct ieee80211_frame *); 2069 iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED; 2070 hdrlen = sizeof(struct ieee80211_frame); 2071 pktlen = m0->m_pkthdr.len; 2072 2073 if (ath_softcrypto && iswep) { 2074 k = ieee80211_get_txkey(ic, wh, ni); 2075 if ((m0 = ieee80211_encrypt(ic, m0, k)) == NULL) 2076 return ENOMEM; 2077 wh = mtod(m0, struct ieee80211_frame *); 2078 2079 /* reset len in case we got a new mbuf */ 2080 pktlen = m0->m_pkthdr.len; 2081 } else if (!ath_softcrypto && iswep) { 2082 bcopy(mtod(m0, caddr_t), hdrbuf, hdrlen); 2083 m_adj(m0, hdrlen); 2084 M_PREPEND(m0, sizeof(hdrbuf), M_DONTWAIT); 2085 if (m0 == NULL) { 2086 sc->sc_stats.ast_tx_nombuf++; 2087 return ENOMEM; 2088 } 2089 ivp = hdrbuf + hdrlen; 2090 wh = mtod(m0, struct ieee80211_frame *); 2091 /* 2092 * XXX 2093 * IV must not duplicate during the lifetime of the key. 2094 * But no mechanism to renew keys is defined in IEEE 802.11 2095 * for WEP. And the IV may be duplicated at other stations 2096 * because the session key itself is shared. So we use a 2097 * pseudo random IV for now, though it is not the right way. 2098 * 2099 * NB: Rather than use a strictly random IV we select a 2100 * random one to start and then increment the value for 2101 * each frame. This is an explicit tradeoff between 2102 * overhead and security. Given the basic insecurity of 2103 * WEP this seems worthwhile. 2104 */ 2105 2106 /* 2107 * Skip 'bad' IVs from Fluhrer/Mantin/Shamir: 2108 * (B, 255, N) with 3 <= B < 16 and 0 <= N <= 255 2109 */ 2110 iv = ic->ic_iv; 2111 if ((iv & 0xff00) == 0xff00) { 2112 int B = (iv & 0xff0000) >> 16; 2113 if (3 <= B && B < 16) 2114 iv = (B+1) << 16; 2115 } 2116 ic->ic_iv = iv + 1; 2117 2118 /* 2119 * NB: Preserve byte order of IV for packet 2120 * sniffers; it doesn't matter otherwise. 2121 */ 2122 #if BYTE_ORDER == BIG_ENDIAN 2123 ivp[0] = iv >> 0; 2124 ivp[1] = iv >> 8; 2125 ivp[2] = iv >> 16; 2126 #else 2127 ivp[2] = iv >> 0; 2128 ivp[1] = iv >> 8; 2129 ivp[0] = iv >> 16; 2130 #endif 2131 ivp[3] = ic->ic_wep_txkey << 6; /* Key ID and pad */ 2132 bcopy(hdrbuf, mtod(m0, caddr_t), sizeof(hdrbuf)); 2133 /* 2134 * The length of hdrlen and pktlen must be increased for WEP 2135 */ 2136 len = IEEE80211_WEP_IVLEN + 2137 IEEE80211_WEP_KIDLEN + 2138 IEEE80211_WEP_CRCLEN; 2139 hdrlen += len; 2140 pktlen += len; 2141 } 2142 pktlen += IEEE80211_CRC_LEN; 2143 2144 /* 2145 * Load the DMA map so any coalescing is done. This 2146 * also calculates the number of descriptors we need. 2147 */ 2148 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m0, 2149 BUS_DMA_NOWAIT); 2150 /* 2151 * Discard null packets and check for packets that 2152 * require too many TX descriptors. We try to convert 2153 * the latter to a cluster. 2154 */ 2155 if (error == EFBIG) { /* too many desc's, linearize */ 2156 sc->sc_stats.ast_tx_linear++; 2157 if (m_defrag(m0, M_DONTWAIT)) { 2158 sc->sc_stats.ast_tx_nomcl++; 2159 m_freem(m0); 2160 return ENOMEM; 2161 } 2162 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m0, 2163 BUS_DMA_NOWAIT); 2164 if (error != 0) { 2165 sc->sc_stats.ast_tx_busdma++; 2166 m_freem(m0); 2167 return error; 2168 } 2169 KASSERT(bf->bf_nseg == 1, 2170 ("ath_tx_start: packet not one segment; nseg %u", 2171 bf->bf_nseg)); 2172 } else if (error != 0) { 2173 sc->sc_stats.ast_tx_busdma++; 2174 m_freem(m0); 2175 return error; 2176 } else if (bf->bf_nseg == 0) { /* null packet, discard */ 2177 sc->sc_stats.ast_tx_nodata++; 2178 m_freem(m0); 2179 return EIO; 2180 } 2181 DPRINTF(ATH_DEBUG_XMIT, ("%s: m %p len %u\n", __func__, m0, pktlen)); 2182 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 2183 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 2184 bf->bf_m = m0; 2185 bf->bf_node = ni; /* NB: held reference */ 2186 an = ATH_NODE(ni); 2187 2188 /* setup descriptors */ 2189 ds = bf->bf_desc; 2190 rt = sc->sc_currates; 2191 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 2192 2193 /* 2194 * Calculate Atheros packet type from IEEE80211 packet header 2195 * and setup for rate calculations. 2196 */ 2197 bf->bf_id.id_node = NULL; 2198 atype = HAL_PKT_TYPE_NORMAL; /* default */ 2199 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 2200 case IEEE80211_FC0_TYPE_MGT: 2201 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2202 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) { 2203 atype = HAL_PKT_TYPE_BEACON; 2204 } else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) { 2205 atype = HAL_PKT_TYPE_PROBE_RESP; 2206 } else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) { 2207 atype = HAL_PKT_TYPE_ATIM; 2208 } 2209 rix = 0; /* XXX lowest rate */ 2210 break; 2211 case IEEE80211_FC0_TYPE_CTL: 2212 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2213 if (subtype == IEEE80211_FC0_SUBTYPE_PS_POLL) 2214 atype = HAL_PKT_TYPE_PSPOLL; 2215 rix = 0; /* XXX lowest rate */ 2216 break; 2217 default: 2218 /* remember link conditions for rate adaptation algorithm */ 2219 if (ic->ic_fixed_rate == -1) { 2220 bf->bf_id.id_len = m0->m_pkthdr.len; 2221 bf->bf_id.id_rateidx = ni->ni_txrate; 2222 bf->bf_id.id_node = ni; 2223 bf->bf_id.id_rssi = ath_node_getrssi(ic, ni); 2224 } 2225 ni->ni_txrate = ieee80211_rssadapt_choose(&an->an_rssadapt, 2226 &ni->ni_rates, wh, m0->m_pkthdr.len, ic->ic_fixed_rate, 2227 ifp->if_xname, 0); 2228 rix = sc->sc_rixmap[ni->ni_rates.rs_rates[ni->ni_txrate] & 2229 IEEE80211_RATE_VAL]; 2230 if (rix == 0xff) { 2231 printf("%s: bogus xmit rate 0x%x (idx 0x%x)\n", 2232 ifp->if_xname, ni->ni_rates.rs_rates[ni->ni_txrate], 2233 ni->ni_txrate); 2234 sc->sc_stats.ast_tx_badrate++; 2235 m_freem(m0); 2236 return EIO; 2237 } 2238 break; 2239 } 2240 2241 /* 2242 * NB: the 802.11 layer marks whether or not we should 2243 * use short preamble based on the current mode and 2244 * negotiated parameters. 2245 */ 2246 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 2247 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 2248 txrate = rt->info[rix].rateCode | rt->info[rix].shortPreamble; 2249 shortPreamble = AH_TRUE; 2250 sc->sc_stats.ast_tx_shortpre++; 2251 } else { 2252 txrate = rt->info[rix].rateCode; 2253 shortPreamble = AH_FALSE; 2254 } 2255 2256 /* 2257 * Calculate miscellaneous flags. 2258 */ 2259 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for wep errors */ 2260 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2261 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 2262 sc->sc_stats.ast_tx_noack++; 2263 } else if (pktlen > ic->ic_rtsthreshold) { 2264 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 2265 sc->sc_stats.ast_tx_rts++; 2266 } 2267 2268 /* 2269 * Calculate duration. This logically belongs in the 802.11 2270 * layer but it lacks sufficient information to calculate it. 2271 */ 2272 if ((flags & HAL_TXDESC_NOACK) == 0 && 2273 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 2274 u_int16_t dur; 2275 /* 2276 * XXX not right with fragmentation. 2277 */ 2278 dur = ath_hal_computetxtime(ah, rt, IEEE80211_ACK_SIZE, 2279 rix, shortPreamble); 2280 *((u_int16_t*) wh->i_dur) = htole16(dur); 2281 } 2282 2283 /* 2284 * Calculate RTS/CTS rate and duration if needed. 2285 */ 2286 ctsduration = 0; 2287 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) { 2288 /* 2289 * CTS transmit rate is derived from the transmit rate 2290 * by looking in the h/w rate table. We must also factor 2291 * in whether or not a short preamble is to be used. 2292 */ 2293 cix = rt->info[rix].controlRate; 2294 ctsrate = rt->info[cix].rateCode; 2295 if (shortPreamble) 2296 ctsrate |= rt->info[cix].shortPreamble; 2297 /* 2298 * Compute the transmit duration based on the size 2299 * of an ACK frame. We call into the HAL to do the 2300 * computation since it depends on the characteristics 2301 * of the actual PHY being used. 2302 */ 2303 if (flags & HAL_TXDESC_RTSENA) { /* SIFS + CTS */ 2304 ctsduration += ath_hal_computetxtime(ah, 2305 rt, IEEE80211_ACK_SIZE, cix, shortPreamble); 2306 } 2307 /* SIFS + data */ 2308 ctsduration += ath_hal_computetxtime(ah, 2309 rt, pktlen, rix, shortPreamble); 2310 if ((flags & HAL_TXDESC_NOACK) == 0) { /* SIFS + ACK */ 2311 ctsduration += ath_hal_computetxtime(ah, 2312 rt, IEEE80211_ACK_SIZE, cix, shortPreamble); 2313 } 2314 } else 2315 ctsrate = 0; 2316 2317 /* 2318 * For now use the antenna on which the last good 2319 * frame was received on. We assume this field is 2320 * initialized to 0 which gives us ``auto'' or the 2321 * ``default'' antenna. 2322 */ 2323 if (an->an_tx_antenna) { 2324 antenna = an->an_tx_antenna; 2325 } else { 2326 antenna = an->an_rx_hist[an->an_rx_hist_next].arh_antenna; 2327 } 2328 2329 #if NBPFILTER > 0 2330 if (ic->ic_rawbpf) 2331 bpf_mtap(ic->ic_rawbpf, m0, BPF_DIRECTION_OUT); 2332 2333 if (sc->sc_drvbpf) { 2334 struct mbuf mb; 2335 2336 sc->sc_txtap.wt_flags = 0; 2337 if (shortPreamble) 2338 sc->sc_txtap.wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2339 if (!ath_softcrypto && iswep) 2340 sc->sc_txtap.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2341 sc->sc_txtap.wt_rate = ni->ni_rates.rs_rates[ni->ni_txrate] & 2342 IEEE80211_RATE_VAL; 2343 sc->sc_txtap.wt_txpower = 30; 2344 sc->sc_txtap.wt_antenna = antenna; 2345 sc->sc_txtap.wt_hwqueue = hwqueue; 2346 2347 mb.m_data = (caddr_t)&sc->sc_txtap; 2348 mb.m_len = sc->sc_txtap_len; 2349 mb.m_next = m0; 2350 mb.m_nextpkt = NULL; 2351 mb.m_type = 0; 2352 mb.m_flags = 0; 2353 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT); 2354 } 2355 #endif 2356 2357 /* 2358 * Formulate first tx descriptor with tx controls. 2359 */ 2360 tries = IEEE80211_IS_MULTICAST(wh->i_addr1) ? 1 : 15; 2361 /* XXX check return value? */ 2362 ath_hal_setup_tx_desc(ah, ds 2363 , pktlen /* packet length */ 2364 , hdrlen /* header length */ 2365 , atype /* Atheros packet type */ 2366 , 60 /* txpower XXX */ 2367 , txrate, tries /* series 0 rate/tries */ 2368 , iswep ? sc->sc_ic.ic_wep_txkey : HAL_TXKEYIX_INVALID 2369 , antenna /* antenna mode */ 2370 , flags /* flags */ 2371 , ctsrate /* rts/cts rate */ 2372 , ctsduration /* rts/cts duration */ 2373 ); 2374 #ifdef notyet 2375 ath_hal_setup_xtx_desc(ah, ds 2376 , AH_FALSE /* short preamble */ 2377 , 0, 0 /* series 1 rate/tries */ 2378 , 0, 0 /* series 2 rate/tries */ 2379 , 0, 0 /* series 3 rate/tries */ 2380 ); 2381 #endif 2382 /* 2383 * Fillin the remainder of the descriptor info. 2384 */ 2385 for (i = 0; i < bf->bf_nseg; i++, ds++) { 2386 ds->ds_data = bf->bf_segs[i].ds_addr; 2387 if (i == bf->bf_nseg - 1) { 2388 ds->ds_link = 0; 2389 } else { 2390 ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1); 2391 } 2392 ath_hal_fill_tx_desc(ah, ds 2393 , bf->bf_segs[i].ds_len /* segment length */ 2394 , i == 0 /* first segment */ 2395 , i == bf->bf_nseg - 1 /* last segment */ 2396 ); 2397 DPRINTF(ATH_DEBUG_XMIT, 2398 ("%s: %d: %08x %08x %08x %08x %08x %08x\n", 2399 __func__, i, ds->ds_link, ds->ds_data, 2400 ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1])); 2401 } 2402 2403 /* 2404 * Insert the frame on the outbound list and 2405 * pass it on to the hardware. 2406 */ 2407 s = splnet(); 2408 TAILQ_INSERT_TAIL(&sc->sc_txq, bf, bf_list); 2409 if (sc->sc_txlink == NULL) { 2410 ath_hal_put_tx_buf(ah, sc->sc_txhalq[hwqueue], bf->bf_daddr); 2411 DPRINTF(ATH_DEBUG_XMIT, ("%s: TXDP0 = %p (%p)\n", __func__, 2412 (caddr_t)bf->bf_daddr, bf->bf_desc)); 2413 } else { 2414 *sc->sc_txlink = bf->bf_daddr; 2415 DPRINTF(ATH_DEBUG_XMIT, ("%s: link(%p)=%p (%p)\n", __func__, 2416 sc->sc_txlink, (caddr_t)bf->bf_daddr, bf->bf_desc)); 2417 } 2418 sc->sc_txlink = &bf->bf_desc[bf->bf_nseg - 1].ds_link; 2419 splx(s); 2420 2421 ath_hal_tx_start(ah, sc->sc_txhalq[hwqueue]); 2422 return 0; 2423 } 2424 2425 void 2426 ath_tx_proc(void *arg, int npending) 2427 { 2428 struct ath_softc *sc = arg; 2429 struct ath_hal *ah = sc->sc_ah; 2430 struct ath_buf *bf; 2431 struct ieee80211com *ic = &sc->sc_ic; 2432 struct ifnet *ifp = &ic->ic_if; 2433 struct ath_desc *ds; 2434 struct ieee80211_node *ni; 2435 struct ath_node *an; 2436 int sr, lr, s; 2437 HAL_STATUS status; 2438 2439 for (;;) { 2440 s = splnet(); 2441 bf = TAILQ_FIRST(&sc->sc_txq); 2442 if (bf == NULL) { 2443 sc->sc_txlink = NULL; 2444 splx(s); 2445 break; 2446 } 2447 /* only the last descriptor is needed */ 2448 ds = &bf->bf_desc[bf->bf_nseg - 1]; 2449 status = ath_hal_proc_tx_desc(ah, ds); 2450 #ifdef AR_DEBUG 2451 if (ath_debug & ATH_DEBUG_XMIT_DESC) 2452 ath_printtxbuf(bf, status == HAL_OK); 2453 #endif 2454 if (status == HAL_EINPROGRESS) { 2455 splx(s); 2456 break; 2457 } 2458 TAILQ_REMOVE(&sc->sc_txq, bf, bf_list); 2459 splx(s); 2460 2461 ni = bf->bf_node; 2462 if (ni != NULL) { 2463 an = (struct ath_node *) ni; 2464 if (ds->ds_txstat.ts_status == 0) { 2465 if (bf->bf_id.id_node != NULL) 2466 ieee80211_rssadapt_raise_rate(ic, 2467 &an->an_rssadapt, &bf->bf_id); 2468 an->an_tx_antenna = ds->ds_txstat.ts_antenna; 2469 } else { 2470 if (bf->bf_id.id_node != NULL) 2471 ieee80211_rssadapt_lower_rate(ic, ni, 2472 &an->an_rssadapt, &bf->bf_id); 2473 if (ds->ds_txstat.ts_status & HAL_TXERR_XRETRY) 2474 sc->sc_stats.ast_tx_xretries++; 2475 if (ds->ds_txstat.ts_status & HAL_TXERR_FIFO) 2476 sc->sc_stats.ast_tx_fifoerr++; 2477 if (ds->ds_txstat.ts_status & HAL_TXERR_FILT) 2478 sc->sc_stats.ast_tx_filtered++; 2479 an->an_tx_antenna = 0; /* invalidate */ 2480 } 2481 sr = ds->ds_txstat.ts_shortretry; 2482 lr = ds->ds_txstat.ts_longretry; 2483 sc->sc_stats.ast_tx_shortretry += sr; 2484 sc->sc_stats.ast_tx_longretry += lr; 2485 /* 2486 * Reclaim reference to node. 2487 * 2488 * NB: the node may be reclaimed here if, for example 2489 * this is a DEAUTH message that was sent and the 2490 * node was timed out due to inactivity. 2491 */ 2492 ieee80211_release_node(ic, ni); 2493 } 2494 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 2495 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2496 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2497 m_freem(bf->bf_m); 2498 bf->bf_m = NULL; 2499 bf->bf_node = NULL; 2500 2501 s = splnet(); 2502 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 2503 splx(s); 2504 } 2505 ifq_clr_oactive(&ifp->if_snd); 2506 sc->sc_tx_timer = 0; 2507 2508 ath_start(ifp); 2509 } 2510 2511 /* 2512 * Drain the transmit queue and reclaim resources. 2513 */ 2514 void 2515 ath_draintxq(struct ath_softc *sc) 2516 { 2517 struct ath_hal *ah = sc->sc_ah; 2518 struct ieee80211com *ic = &sc->sc_ic; 2519 struct ifnet *ifp = &ic->ic_if; 2520 struct ieee80211_node *ni; 2521 struct ath_buf *bf; 2522 int s, i; 2523 2524 /* XXX return value */ 2525 if (!sc->sc_invalid) { 2526 for (i = 0; i <= HAL_TX_QUEUE_ID_DATA_MAX; i++) { 2527 /* don't touch the hardware if marked invalid */ 2528 (void) ath_hal_stop_tx_dma(ah, sc->sc_txhalq[i]); 2529 DPRINTF(ATH_DEBUG_RESET, 2530 ("%s: tx queue %d (%p), link %p\n", __func__, i, 2531 (caddr_t)(u_intptr_t)ath_hal_get_tx_buf(ah, 2532 sc->sc_txhalq[i]), sc->sc_txlink)); 2533 } 2534 (void) ath_hal_stop_tx_dma(ah, sc->sc_bhalq); 2535 DPRINTF(ATH_DEBUG_RESET, 2536 ("%s: beacon queue (%p)\n", __func__, 2537 (caddr_t)(u_intptr_t)ath_hal_get_tx_buf(ah, sc->sc_bhalq))); 2538 } 2539 for (;;) { 2540 s = splnet(); 2541 bf = TAILQ_FIRST(&sc->sc_txq); 2542 if (bf == NULL) { 2543 sc->sc_txlink = NULL; 2544 splx(s); 2545 break; 2546 } 2547 TAILQ_REMOVE(&sc->sc_txq, bf, bf_list); 2548 splx(s); 2549 #ifdef AR_DEBUG 2550 if (ath_debug & ATH_DEBUG_RESET) { 2551 ath_printtxbuf(bf, 2552 ath_hal_proc_tx_desc(ah, bf->bf_desc) == HAL_OK); 2553 } 2554 #endif /* AR_DEBUG */ 2555 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2556 m_freem(bf->bf_m); 2557 bf->bf_m = NULL; 2558 ni = bf->bf_node; 2559 bf->bf_node = NULL; 2560 s = splnet(); 2561 if (ni != NULL) { 2562 /* 2563 * Reclaim node reference. 2564 */ 2565 ieee80211_release_node(ic, ni); 2566 } 2567 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 2568 splx(s); 2569 } 2570 ifq_clr_oactive(&ifp->if_snd); 2571 sc->sc_tx_timer = 0; 2572 } 2573 2574 /* 2575 * Disable the receive h/w in preparation for a reset. 2576 */ 2577 void 2578 ath_stoprecv(struct ath_softc *sc) 2579 { 2580 #define PA2DESC(_sc, _pa) \ 2581 ((struct ath_desc *)((caddr_t)(_sc)->sc_desc + \ 2582 ((_pa) - (_sc)->sc_desc_paddr))) 2583 struct ath_hal *ah = sc->sc_ah; 2584 2585 ath_hal_stop_pcu_recv(ah); /* disable PCU */ 2586 ath_hal_set_rx_filter(ah, 0); /* clear recv filter */ 2587 ath_hal_stop_rx_dma(ah); /* disable DMA engine */ 2588 #ifdef AR_DEBUG 2589 if (ath_debug & ATH_DEBUG_RESET) { 2590 struct ath_buf *bf; 2591 2592 printf("%s: rx queue %p, link %p\n", __func__, 2593 (caddr_t)(u_intptr_t)ath_hal_get_rx_buf(ah), sc->sc_rxlink); 2594 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 2595 struct ath_desc *ds = bf->bf_desc; 2596 if (ath_hal_proc_rx_desc(ah, ds, bf->bf_daddr, 2597 PA2DESC(sc, ds->ds_link)) == HAL_OK) 2598 ath_printrxbuf(bf, 1); 2599 } 2600 } 2601 #endif 2602 sc->sc_rxlink = NULL; /* just in case */ 2603 #undef PA2DESC 2604 } 2605 2606 /* 2607 * Enable the receive h/w following a reset. 2608 */ 2609 int 2610 ath_startrecv(struct ath_softc *sc) 2611 { 2612 struct ath_hal *ah = sc->sc_ah; 2613 struct ath_buf *bf; 2614 2615 sc->sc_rxlink = NULL; 2616 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 2617 int error = ath_rxbuf_init(sc, bf); 2618 if (error != 0) { 2619 DPRINTF(ATH_DEBUG_RECV, 2620 ("%s: ath_rxbuf_init failed %d\n", 2621 __func__, error)); 2622 return error; 2623 } 2624 } 2625 2626 bf = TAILQ_FIRST(&sc->sc_rxbuf); 2627 ath_hal_put_rx_buf(ah, bf->bf_daddr); 2628 ath_hal_start_rx(ah); /* enable recv descriptors */ 2629 ath_mode_init(sc); /* set filters, etc. */ 2630 ath_hal_start_rx_pcu(ah); /* re-enable PCU/DMA engine */ 2631 return 0; 2632 } 2633 2634 /* 2635 * Set/change channels. If the channel is really being changed, 2636 * it's done by resetting the chip. To accomplish this we must 2637 * first cleanup any pending DMA, then restart stuff after a la 2638 * ath_init. 2639 */ 2640 int 2641 ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 2642 { 2643 struct ath_hal *ah = sc->sc_ah; 2644 struct ieee80211com *ic = &sc->sc_ic; 2645 struct ifnet *ifp = &ic->ic_if; 2646 2647 DPRINTF(ATH_DEBUG_ANY, ("%s: %u (%u MHz) -> %u (%u MHz)\n", __func__, 2648 ieee80211_chan2ieee(ic, ic->ic_ibss_chan), 2649 ic->ic_ibss_chan->ic_freq, 2650 ieee80211_chan2ieee(ic, chan), chan->ic_freq)); 2651 if (chan != ic->ic_ibss_chan) { 2652 HAL_STATUS status; 2653 HAL_CHANNEL hchan; 2654 enum ieee80211_phymode mode; 2655 2656 /* 2657 * To switch channels clear any pending DMA operations; 2658 * wait long enough for the RX fifo to drain, reset the 2659 * hardware at the new frequency, and then re-enable 2660 * the relevant bits of the h/w. 2661 */ 2662 ath_hal_set_intr(ah, 0); /* disable interrupts */ 2663 ath_draintxq(sc); /* clear pending tx frames */ 2664 ath_stoprecv(sc); /* turn off frame recv */ 2665 /* 2666 * Convert to a HAL channel description with 2667 * the flags constrained to reflect the current 2668 * operating mode. 2669 */ 2670 hchan.channel = chan->ic_freq; 2671 hchan.channelFlags = ath_chan2flags(ic, chan); 2672 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, AH_TRUE, 2673 &status)) { 2674 printf("%s: ath_chan_set: unable to reset " 2675 "channel %u (%u MHz)\n", ifp->if_xname, 2676 ieee80211_chan2ieee(ic, chan), chan->ic_freq); 2677 return EIO; 2678 } 2679 ath_set_slot_time(sc); 2680 /* 2681 * Re-enable rx framework. 2682 */ 2683 if (ath_startrecv(sc) != 0) { 2684 printf("%s: ath_chan_set: unable to restart recv " 2685 "logic\n", ifp->if_xname); 2686 return EIO; 2687 } 2688 2689 #if NBPFILTER > 0 2690 /* 2691 * Update BPF state. 2692 */ 2693 sc->sc_txtap.wt_chan_freq = sc->sc_rxtap.wr_chan_freq = 2694 htole16(chan->ic_freq); 2695 sc->sc_txtap.wt_chan_flags = sc->sc_rxtap.wr_chan_flags = 2696 htole16(chan->ic_flags); 2697 #endif 2698 2699 /* 2700 * Change channels and update the h/w rate map 2701 * if we're switching; e.g. 11a to 11b/g. 2702 */ 2703 ic->ic_ibss_chan = chan; 2704 mode = ieee80211_chan2mode(ic, chan); 2705 if (mode != sc->sc_curmode) 2706 ath_setcurmode(sc, mode); 2707 2708 /* 2709 * Re-enable interrupts. 2710 */ 2711 ath_hal_set_intr(ah, sc->sc_imask); 2712 } 2713 return 0; 2714 } 2715 2716 void 2717 ath_next_scan(void *arg) 2718 { 2719 struct ath_softc *sc = arg; 2720 struct ieee80211com *ic = &sc->sc_ic; 2721 struct ifnet *ifp = &ic->ic_if; 2722 int s; 2723 2724 /* don't call ath_start w/o network interrupts blocked */ 2725 s = splnet(); 2726 2727 if (ic->ic_state == IEEE80211_S_SCAN) 2728 ieee80211_next_scan(ifp); 2729 splx(s); 2730 } 2731 2732 int 2733 ath_set_slot_time(struct ath_softc *sc) 2734 { 2735 struct ath_hal *ah = sc->sc_ah; 2736 struct ieee80211com *ic = &sc->sc_ic; 2737 2738 if (ic->ic_flags & IEEE80211_F_SHSLOT) 2739 return (ath_hal_set_slot_time(ah, HAL_SLOT_TIME_9)); 2740 2741 return (0); 2742 } 2743 2744 /* 2745 * Periodically recalibrate the PHY to account 2746 * for temperature/environment changes. 2747 */ 2748 void 2749 ath_calibrate(void *arg) 2750 { 2751 struct ath_softc *sc = arg; 2752 struct ath_hal *ah = sc->sc_ah; 2753 struct ieee80211com *ic = &sc->sc_ic; 2754 struct ieee80211_channel *c; 2755 HAL_CHANNEL hchan; 2756 int s; 2757 2758 sc->sc_stats.ast_per_cal++; 2759 2760 /* 2761 * Convert to a HAL channel description with the flags 2762 * constrained to reflect the current operating mode. 2763 */ 2764 c = ic->ic_ibss_chan; 2765 hchan.channel = c->ic_freq; 2766 hchan.channelFlags = ath_chan2flags(ic, c); 2767 2768 s = splnet(); 2769 DPRINTF(ATH_DEBUG_CALIBRATE, 2770 ("%s: channel %u/%x\n", __func__, c->ic_freq, c->ic_flags)); 2771 2772 if (ath_hal_get_rf_gain(ah) == HAL_RFGAIN_NEED_CHANGE) { 2773 /* 2774 * Rfgain is out of bounds, reset the chip 2775 * to load new gain values. 2776 */ 2777 sc->sc_stats.ast_per_rfgain++; 2778 ath_reset(sc, 1); 2779 } 2780 if (!ath_hal_calibrate(ah, &hchan)) { 2781 DPRINTF(ATH_DEBUG_ANY, 2782 ("%s: calibration of channel %u failed\n", 2783 __func__, c->ic_freq)); 2784 sc->sc_stats.ast_per_calfail++; 2785 } 2786 timeout_add_sec(&sc->sc_cal_to, ath_calinterval); 2787 splx(s); 2788 } 2789 2790 void 2791 ath_ledstate(struct ath_softc *sc, enum ieee80211_state state) 2792 { 2793 HAL_LED_STATE led = HAL_LED_INIT; 2794 u_int32_t softled = AR5K_SOFTLED_OFF; 2795 2796 switch (state) { 2797 case IEEE80211_S_INIT: 2798 break; 2799 case IEEE80211_S_SCAN: 2800 led = HAL_LED_SCAN; 2801 break; 2802 case IEEE80211_S_AUTH: 2803 led = HAL_LED_AUTH; 2804 break; 2805 case IEEE80211_S_ASSOC: 2806 led = HAL_LED_ASSOC; 2807 softled = AR5K_SOFTLED_ON; 2808 break; 2809 case IEEE80211_S_RUN: 2810 led = HAL_LED_RUN; 2811 softled = AR5K_SOFTLED_ON; 2812 break; 2813 } 2814 2815 ath_hal_set_ledstate(sc->sc_ah, led); 2816 if (sc->sc_softled) { 2817 ath_hal_set_gpio_output(sc->sc_ah, AR5K_SOFTLED_PIN); 2818 ath_hal_set_gpio(sc->sc_ah, AR5K_SOFTLED_PIN, softled); 2819 } 2820 } 2821 2822 int 2823 ath_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg) 2824 { 2825 struct ifnet *ifp = &ic->ic_if; 2826 struct ath_softc *sc = ifp->if_softc; 2827 struct ath_hal *ah = sc->sc_ah; 2828 struct ieee80211_node *ni; 2829 const u_int8_t *bssid; 2830 int error, i; 2831 2832 u_int32_t rfilt; 2833 2834 DPRINTF(ATH_DEBUG_ANY, ("%s: %s -> %s\n", __func__, 2835 ieee80211_state_name[ic->ic_state], 2836 ieee80211_state_name[nstate])); 2837 2838 timeout_del(&sc->sc_scan_to); 2839 timeout_del(&sc->sc_cal_to); 2840 ath_ledstate(sc, nstate); 2841 2842 if (nstate == IEEE80211_S_INIT) { 2843 timeout_del(&sc->sc_rssadapt_to); 2844 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 2845 ath_hal_set_intr(ah, sc->sc_imask); 2846 return (*sc->sc_newstate)(ic, nstate, arg); 2847 } 2848 ni = ic->ic_bss; 2849 error = ath_chan_set(sc, ni->ni_chan); 2850 if (error != 0) 2851 goto bad; 2852 rfilt = ath_calcrxfilter(sc); 2853 if (nstate == IEEE80211_S_SCAN || 2854 ic->ic_opmode == IEEE80211_M_MONITOR) { 2855 bssid = sc->sc_broadcast_addr; 2856 } else { 2857 bssid = ni->ni_bssid; 2858 } 2859 ath_hal_set_rx_filter(ah, rfilt); 2860 DPRINTF(ATH_DEBUG_ANY, ("%s: RX filter 0x%x bssid %s\n", 2861 __func__, rfilt, ether_sprintf((u_char*)bssid))); 2862 2863 if (nstate == IEEE80211_S_RUN && ic->ic_opmode == IEEE80211_M_STA) { 2864 ath_hal_set_associd(ah, bssid, ni->ni_associd); 2865 } else { 2866 ath_hal_set_associd(ah, bssid, 0); 2867 } 2868 2869 if (!ath_softcrypto && (ic->ic_flags & IEEE80211_F_WEPON)) { 2870 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 2871 if (ath_hal_is_key_valid(ah, i)) 2872 ath_hal_set_key_lladdr(ah, i, bssid); 2873 } 2874 } 2875 2876 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 2877 /* nothing to do */ 2878 } else if (nstate == IEEE80211_S_RUN) { 2879 DPRINTF(ATH_DEBUG_ANY, ("%s(RUN): " 2880 "ic_flags=0x%08x iv=%d bssid=%s " 2881 "capinfo=0x%04x chan=%d\n", 2882 __func__, 2883 ic->ic_flags, 2884 ni->ni_intval, 2885 ether_sprintf(ni->ni_bssid), 2886 ni->ni_capinfo, 2887 ieee80211_chan2ieee(ic, ni->ni_chan))); 2888 2889 /* 2890 * Allocate and setup the beacon frame for AP or adhoc mode. 2891 */ 2892 #ifndef IEEE80211_STA_ONLY 2893 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 2894 ic->ic_opmode == IEEE80211_M_IBSS) { 2895 error = ath_beacon_alloc(sc, ni); 2896 if (error != 0) 2897 goto bad; 2898 } 2899 #endif 2900 /* 2901 * Configure the beacon and sleep timers. 2902 */ 2903 ath_beacon_config(sc); 2904 } else { 2905 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 2906 ath_hal_set_intr(ah, sc->sc_imask); 2907 } 2908 2909 /* 2910 * Invoke the parent method to complete the work. 2911 */ 2912 error = (*sc->sc_newstate)(ic, nstate, arg); 2913 2914 if (nstate == IEEE80211_S_RUN) { 2915 /* start periodic recalibration timer */ 2916 timeout_add_sec(&sc->sc_cal_to, ath_calinterval); 2917 2918 if (ic->ic_opmode != IEEE80211_M_MONITOR) 2919 timeout_add_msec(&sc->sc_rssadapt_to, 100); 2920 } else if (nstate == IEEE80211_S_SCAN) { 2921 /* start ap/neighbor scan timer */ 2922 timeout_add_msec(&sc->sc_scan_to, ath_dwelltime); 2923 } 2924 bad: 2925 return error; 2926 } 2927 2928 #ifndef IEEE80211_STA_ONLY 2929 void 2930 ath_recv_mgmt(struct ieee80211com *ic, struct mbuf *m, 2931 struct ieee80211_node *ni, struct ieee80211_rxinfo *rxi, int subtype) 2932 { 2933 struct ath_softc *sc = (struct ath_softc*)ic->ic_softc; 2934 struct ath_hal *ah = sc->sc_ah; 2935 2936 (*sc->sc_recv_mgmt)(ic, m, ni, rxi, subtype); 2937 2938 switch (subtype) { 2939 case IEEE80211_FC0_SUBTYPE_PROBE_RESP: 2940 case IEEE80211_FC0_SUBTYPE_BEACON: 2941 if (ic->ic_opmode != IEEE80211_M_IBSS || 2942 ic->ic_state != IEEE80211_S_RUN) 2943 break; 2944 if (ieee80211_ibss_merge(ic, ni, ath_hal_get_tsf64(ah)) == 2945 ENETRESET) 2946 ath_hal_set_associd(ah, ic->ic_bss->ni_bssid, 0); 2947 break; 2948 default: 2949 break; 2950 } 2951 return; 2952 } 2953 #endif 2954 2955 /* 2956 * Setup driver-specific state for a newly associated node. 2957 * Note that we're called also on a re-associate, the isnew 2958 * param tells us if this is the first time or not. 2959 */ 2960 void 2961 ath_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, int isnew) 2962 { 2963 if (ic->ic_opmode == IEEE80211_M_MONITOR) 2964 return; 2965 } 2966 2967 int 2968 ath_getchannels(struct ath_softc *sc, HAL_BOOL outdoor, HAL_BOOL xchanmode) 2969 { 2970 struct ieee80211com *ic = &sc->sc_ic; 2971 struct ifnet *ifp = &ic->ic_if; 2972 struct ath_hal *ah = sc->sc_ah; 2973 HAL_CHANNEL *chans; 2974 int i, ix, nchan; 2975 2976 sc->sc_nchan = 0; 2977 chans = malloc(IEEE80211_CHAN_MAX * sizeof(HAL_CHANNEL), 2978 M_TEMP, M_NOWAIT); 2979 if (chans == NULL) { 2980 printf("%s: unable to allocate channel table\n", ifp->if_xname); 2981 return ENOMEM; 2982 } 2983 if (!ath_hal_init_channels(ah, chans, IEEE80211_CHAN_MAX, &nchan, 2984 HAL_MODE_ALL, outdoor, xchanmode)) { 2985 printf("%s: unable to collect channel list from hal\n", 2986 ifp->if_xname); 2987 free(chans, M_TEMP, 0); 2988 return EINVAL; 2989 } 2990 2991 /* 2992 * Convert HAL channels to ieee80211 ones and insert 2993 * them in the table according to their channel number. 2994 */ 2995 for (i = 0; i < nchan; i++) { 2996 HAL_CHANNEL *c = &chans[i]; 2997 ix = ieee80211_mhz2ieee(c->channel, c->channelFlags); 2998 if (ix > IEEE80211_CHAN_MAX) { 2999 printf("%s: bad hal channel %u (%u/%x) ignored\n", 3000 ifp->if_xname, ix, c->channel, c->channelFlags); 3001 continue; 3002 } 3003 DPRINTF(ATH_DEBUG_ANY, 3004 ("%s: HAL channel %d/%d freq %d flags %#04x idx %d\n", 3005 sc->sc_dev.dv_xname, i, nchan, c->channel, c->channelFlags, 3006 ix)); 3007 /* NB: flags are known to be compatible */ 3008 if (ic->ic_channels[ix].ic_freq == 0) { 3009 ic->ic_channels[ix].ic_freq = c->channel; 3010 ic->ic_channels[ix].ic_flags = c->channelFlags; 3011 } else { 3012 /* channels overlap; e.g. 11g and 11b */ 3013 ic->ic_channels[ix].ic_flags |= c->channelFlags; 3014 } 3015 /* count valid channels */ 3016 sc->sc_nchan++; 3017 } 3018 free(chans, M_TEMP, 0); 3019 3020 if (sc->sc_nchan < 1) { 3021 printf("%s: no valid channels for regdomain %s(%u)\n", 3022 ifp->if_xname, ieee80211_regdomain2name(ah->ah_regdomain), 3023 ah->ah_regdomain); 3024 return ENOENT; 3025 } 3026 3027 /* set an initial channel */ 3028 ic->ic_ibss_chan = &ic->ic_channels[0]; 3029 3030 return 0; 3031 } 3032 3033 int 3034 ath_rate_setup(struct ath_softc *sc, u_int mode) 3035 { 3036 struct ath_hal *ah = sc->sc_ah; 3037 struct ieee80211com *ic = &sc->sc_ic; 3038 const HAL_RATE_TABLE *rt; 3039 struct ieee80211_rateset *rs; 3040 int i, maxrates; 3041 3042 switch (mode) { 3043 case IEEE80211_MODE_11A: 3044 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_11A); 3045 break; 3046 case IEEE80211_MODE_11B: 3047 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_11B); 3048 break; 3049 case IEEE80211_MODE_11G: 3050 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_11G); 3051 break; 3052 default: 3053 DPRINTF(ATH_DEBUG_ANY, 3054 ("%s: invalid mode %u\n", __func__, mode)); 3055 return 0; 3056 } 3057 rt = sc->sc_rates[mode]; 3058 if (rt == NULL) 3059 return 0; 3060 if (rt->rateCount > IEEE80211_RATE_MAXSIZE) { 3061 DPRINTF(ATH_DEBUG_ANY, 3062 ("%s: rate table too small (%u > %u)\n", 3063 __func__, rt->rateCount, IEEE80211_RATE_MAXSIZE)); 3064 maxrates = IEEE80211_RATE_MAXSIZE; 3065 } else { 3066 maxrates = rt->rateCount; 3067 } 3068 rs = &ic->ic_sup_rates[mode]; 3069 for (i = 0; i < maxrates; i++) 3070 rs->rs_rates[i] = rt->info[i].dot11Rate; 3071 rs->rs_nrates = maxrates; 3072 return 1; 3073 } 3074 3075 void 3076 ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 3077 { 3078 const HAL_RATE_TABLE *rt; 3079 struct ieee80211com *ic = &sc->sc_ic; 3080 struct ieee80211_node *ni; 3081 int i; 3082 3083 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 3084 rt = sc->sc_rates[mode]; 3085 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 3086 for (i = 0; i < rt->rateCount; i++) 3087 sc->sc_rixmap[rt->info[i].dot11Rate & IEEE80211_RATE_VAL] = i; 3088 bzero(sc->sc_hwmap, sizeof(sc->sc_hwmap)); 3089 for (i = 0; i < 32; i++) 3090 sc->sc_hwmap[i] = rt->info[rt->rateCodeToIndex[i]].dot11Rate; 3091 sc->sc_currates = rt; 3092 sc->sc_curmode = mode; 3093 ni = ic->ic_bss; 3094 ni->ni_rates.rs_nrates = sc->sc_currates->rateCount; 3095 if (ni->ni_txrate >= ni->ni_rates.rs_nrates) 3096 ni->ni_txrate = 0; 3097 } 3098 3099 void 3100 ath_rssadapt_updatenode(void *arg, struct ieee80211_node *ni) 3101 { 3102 struct ath_node *an = ATH_NODE(ni); 3103 3104 ieee80211_rssadapt_updatestats(&an->an_rssadapt); 3105 } 3106 3107 void 3108 ath_rssadapt_updatestats(void *arg) 3109 { 3110 struct ath_softc *sc = (struct ath_softc *)arg; 3111 struct ieee80211com *ic = &sc->sc_ic; 3112 3113 if (ic->ic_opmode == IEEE80211_M_STA) { 3114 ath_rssadapt_updatenode(arg, ic->ic_bss); 3115 } else { 3116 ieee80211_iterate_nodes(ic, ath_rssadapt_updatenode, arg); 3117 } 3118 3119 timeout_add_msec(&sc->sc_rssadapt_to, 100); 3120 } 3121 3122 #ifdef AR_DEBUG 3123 void 3124 ath_printrxbuf(struct ath_buf *bf, int done) 3125 { 3126 struct ath_desc *ds; 3127 int i; 3128 3129 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { 3130 printf("R%d (%p %p) %08x %08x %08x %08x %08x %08x %c\n", 3131 i, ds, (struct ath_desc *)bf->bf_daddr + i, 3132 ds->ds_link, ds->ds_data, 3133 ds->ds_ctl0, ds->ds_ctl1, 3134 ds->ds_hw[0], ds->ds_hw[1], 3135 !done ? ' ' : (ds->ds_rxstat.rs_status == 0) ? '*' : '!'); 3136 } 3137 } 3138 3139 void 3140 ath_printtxbuf(struct ath_buf *bf, int done) 3141 { 3142 struct ath_desc *ds; 3143 int i; 3144 3145 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { 3146 printf("T%d (%p %p) " 3147 "%08x %08x %08x %08x %08x %08x %08x %08x %c\n", 3148 i, ds, (struct ath_desc *)bf->bf_daddr + i, 3149 ds->ds_link, ds->ds_data, 3150 ds->ds_ctl0, ds->ds_ctl1, 3151 ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3], 3152 !done ? ' ' : (ds->ds_txstat.ts_status == 0) ? '*' : '!'); 3153 } 3154 } 3155 #endif /* AR_DEBUG */ 3156 3157 int 3158 ath_gpio_attach(struct ath_softc *sc, u_int16_t devid) 3159 { 3160 struct ath_hal *ah = sc->sc_ah; 3161 struct gpiobus_attach_args gba; 3162 int i; 3163 3164 if (ah->ah_gpio_npins < 1) 3165 return 0; 3166 3167 /* Initialize gpio pins array */ 3168 for (i = 0; i < ah->ah_gpio_npins && i < AR5K_MAX_GPIO; i++) { 3169 sc->sc_gpio_pins[i].pin_num = i; 3170 sc->sc_gpio_pins[i].pin_caps = GPIO_PIN_INPUT | 3171 GPIO_PIN_OUTPUT; 3172 3173 /* Set pin mode to input */ 3174 ath_hal_set_gpio_input(ah, i); 3175 sc->sc_gpio_pins[i].pin_flags = GPIO_PIN_INPUT; 3176 3177 /* Get pin input */ 3178 sc->sc_gpio_pins[i].pin_state = ath_hal_get_gpio(ah, i) ? 3179 GPIO_PIN_HIGH : GPIO_PIN_LOW; 3180 } 3181 3182 /* Enable GPIO-controlled software LED if available */ 3183 if ((ah->ah_version == AR5K_AR5211) || 3184 (devid == PCI_PRODUCT_ATHEROS_AR5212_IBM)) { 3185 sc->sc_softled = 1; 3186 ath_hal_set_gpio_output(ah, AR5K_SOFTLED_PIN); 3187 ath_hal_set_gpio(ah, AR5K_SOFTLED_PIN, AR5K_SOFTLED_OFF); 3188 } 3189 3190 /* Create gpio controller tag */ 3191 sc->sc_gpio_gc.gp_cookie = sc; 3192 sc->sc_gpio_gc.gp_pin_read = ath_gpio_pin_read; 3193 sc->sc_gpio_gc.gp_pin_write = ath_gpio_pin_write; 3194 sc->sc_gpio_gc.gp_pin_ctl = ath_gpio_pin_ctl; 3195 3196 gba.gba_name = "gpio"; 3197 gba.gba_gc = &sc->sc_gpio_gc; 3198 gba.gba_pins = sc->sc_gpio_pins; 3199 gba.gba_npins = ah->ah_gpio_npins; 3200 3201 #ifdef notyet 3202 #if NGPIO > 0 3203 if (config_found(&sc->sc_dev, &gba, gpiobus_print) == NULL) 3204 return (ENODEV); 3205 #endif 3206 #endif 3207 3208 return (0); 3209 } 3210 3211 int 3212 ath_gpio_pin_read(void *arg, int pin) 3213 { 3214 struct ath_softc *sc = arg; 3215 struct ath_hal *ah = sc->sc_ah; 3216 return (ath_hal_get_gpio(ah, pin) ? GPIO_PIN_HIGH : GPIO_PIN_LOW); 3217 } 3218 3219 void 3220 ath_gpio_pin_write(void *arg, int pin, int value) 3221 { 3222 struct ath_softc *sc = arg; 3223 struct ath_hal *ah = sc->sc_ah; 3224 ath_hal_set_gpio(ah, pin, value ? GPIO_PIN_HIGH : GPIO_PIN_LOW); 3225 } 3226 3227 void 3228 ath_gpio_pin_ctl(void *arg, int pin, int flags) 3229 { 3230 struct ath_softc *sc = arg; 3231 struct ath_hal *ah = sc->sc_ah; 3232 3233 if (flags & GPIO_PIN_INPUT) { 3234 ath_hal_set_gpio_input(ah, pin); 3235 } else if (flags & GPIO_PIN_OUTPUT) { 3236 ath_hal_set_gpio_output(ah, pin); 3237 } 3238 } 3239