1 /* $OpenBSD: ath.c,v 1.115 2017/05/31 09:17:39 stsp Exp $ */ 2 /* $NetBSD: ath.c,v 1.37 2004/08/18 21:59:39 dyoung Exp $ */ 3 4 /*- 5 * Copyright (c) 2002-2004 Sam Leffler, Errno Consulting 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer, 13 * without modification. 14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 15 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 16 * redistribution must be conditioned upon including a substantially 17 * similar Disclaimer requirement for further binary redistribution. 18 * 3. Neither the names of the above-listed copyright holders nor the names 19 * of any contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 26 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 27 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 28 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 31 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGES. 34 */ 35 36 /* 37 * Driver for the Atheros Wireless LAN controller. 38 * 39 * This software is derived from work of Atsushi Onoe; his contribution 40 * is greatly appreciated. It has been modified for OpenBSD to use an 41 * open source HAL instead of the original binary-only HAL. 42 */ 43 44 #include "bpfilter.h" 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/mbuf.h> 49 #include <sys/malloc.h> 50 #include <sys/lock.h> 51 #include <sys/kernel.h> 52 #include <sys/socket.h> 53 #include <sys/sockio.h> 54 #include <sys/device.h> 55 #include <sys/errno.h> 56 #include <sys/timeout.h> 57 #include <sys/gpio.h> 58 #include <sys/endian.h> 59 60 #include <machine/bus.h> 61 62 #include <net/if.h> 63 #include <net/if_dl.h> 64 #include <net/if_media.h> 65 #if NBPFILTER > 0 66 #include <net/bpf.h> 67 #endif 68 #include <netinet/in.h> 69 #include <netinet/if_ether.h> 70 71 #include <net80211/ieee80211_var.h> 72 #include <net80211/ieee80211_rssadapt.h> 73 74 #include <dev/pci/pcidevs.h> 75 #include <dev/gpio/gpiovar.h> 76 77 #include <dev/ic/athvar.h> 78 79 int ath_init(struct ifnet *); 80 int ath_init1(struct ath_softc *); 81 int ath_intr1(struct ath_softc *); 82 void ath_stop(struct ifnet *); 83 void ath_start(struct ifnet *); 84 void ath_reset(struct ath_softc *, int); 85 int ath_media_change(struct ifnet *); 86 void ath_watchdog(struct ifnet *); 87 int ath_ioctl(struct ifnet *, u_long, caddr_t); 88 void ath_fatal_proc(void *, int); 89 void ath_rxorn_proc(void *, int); 90 void ath_bmiss_proc(void *, int); 91 u_int ath_chan2flags(struct ieee80211com *, struct ieee80211_channel *); 92 int ath_initkeytable(struct ath_softc *); 93 void ath_mcastfilter_accum(caddr_t, u_int32_t (*)[2]); 94 void ath_mcastfilter_compute(struct ath_softc *, u_int32_t (*)[2]); 95 u_int32_t ath_calcrxfilter(struct ath_softc *); 96 void ath_mode_init(struct ath_softc *); 97 #ifndef IEEE80211_STA_ONLY 98 int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *); 99 void ath_beacon_proc(void *, int); 100 void ath_beacon_free(struct ath_softc *); 101 #endif 102 void ath_beacon_config(struct ath_softc *); 103 int ath_desc_alloc(struct ath_softc *); 104 void ath_desc_free(struct ath_softc *); 105 struct ieee80211_node *ath_node_alloc(struct ieee80211com *); 106 struct mbuf *ath_getmbuf(int, int, u_int); 107 void ath_node_free(struct ieee80211com *, struct ieee80211_node *); 108 void ath_node_copy(struct ieee80211com *, 109 struct ieee80211_node *, const struct ieee80211_node *); 110 u_int8_t ath_node_getrssi(struct ieee80211com *, 111 const struct ieee80211_node *); 112 int ath_rxbuf_init(struct ath_softc *, struct ath_buf *); 113 void ath_rx_proc(void *, int); 114 int ath_tx_start(struct ath_softc *, struct ieee80211_node *, 115 struct ath_buf *, struct mbuf *); 116 void ath_tx_proc(void *, int); 117 int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 118 void ath_draintxq(struct ath_softc *); 119 void ath_stoprecv(struct ath_softc *); 120 int ath_startrecv(struct ath_softc *); 121 void ath_next_scan(void *); 122 int ath_set_slot_time(struct ath_softc *); 123 void ath_calibrate(void *); 124 void ath_ledstate(struct ath_softc *, enum ieee80211_state); 125 int ath_newstate(struct ieee80211com *, enum ieee80211_state, int); 126 void ath_newassoc(struct ieee80211com *, 127 struct ieee80211_node *, int); 128 int ath_getchannels(struct ath_softc *, HAL_BOOL outdoor, 129 HAL_BOOL xchanmode); 130 int ath_rate_setup(struct ath_softc *sc, u_int mode); 131 void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 132 void ath_rssadapt_updatenode(void *, struct ieee80211_node *); 133 void ath_rssadapt_updatestats(void *); 134 #ifndef IEEE80211_STA_ONLY 135 void ath_recv_mgmt(struct ieee80211com *, struct mbuf *, 136 struct ieee80211_node *, struct ieee80211_rxinfo *, int); 137 #endif 138 void ath_disable(struct ath_softc *); 139 140 int ath_gpio_attach(struct ath_softc *, u_int16_t); 141 int ath_gpio_pin_read(void *, int); 142 void ath_gpio_pin_write(void *, int, int); 143 void ath_gpio_pin_ctl(void *, int, int); 144 145 #ifdef AR_DEBUG 146 void ath_printrxbuf(struct ath_buf *, int); 147 void ath_printtxbuf(struct ath_buf *, int); 148 int ath_debug = 0; 149 #endif 150 151 int ath_dwelltime = 200; /* 5 channels/second */ 152 int ath_calinterval = 30; /* calibrate every 30 secs */ 153 int ath_outdoor = AH_TRUE; /* outdoor operation */ 154 int ath_xchanmode = AH_TRUE; /* enable extended channels */ 155 int ath_softcrypto = 1; /* 1=enable software crypto */ 156 157 struct cfdriver ath_cd = { 158 NULL, "ath", DV_IFNET 159 }; 160 161 int 162 ath_activate(struct device *self, int act) 163 { 164 struct ath_softc *sc = (struct ath_softc *)self; 165 struct ifnet *ifp = &sc->sc_ic.ic_if; 166 167 switch (act) { 168 case DVACT_SUSPEND: 169 if (ifp->if_flags & IFF_RUNNING) { 170 ath_stop(ifp); 171 if (sc->sc_power != NULL) 172 (*sc->sc_power)(sc, act); 173 } 174 break; 175 case DVACT_RESUME: 176 if (ifp->if_flags & IFF_UP) { 177 ath_init(ifp); 178 if (ifp->if_flags & IFF_RUNNING) 179 ath_start(ifp); 180 } 181 break; 182 } 183 return 0; 184 } 185 186 int 187 ath_enable(struct ath_softc *sc) 188 { 189 if (ATH_IS_ENABLED(sc) == 0) { 190 if (sc->sc_enable != NULL && (*sc->sc_enable)(sc) != 0) { 191 printf("%s: device enable failed\n", 192 sc->sc_dev.dv_xname); 193 return (EIO); 194 } 195 sc->sc_flags |= ATH_ENABLED; 196 } 197 return (0); 198 } 199 200 void 201 ath_disable(struct ath_softc *sc) 202 { 203 if (!ATH_IS_ENABLED(sc)) 204 return; 205 if (sc->sc_disable != NULL) 206 (*sc->sc_disable)(sc); 207 sc->sc_flags &= ~ATH_ENABLED; 208 } 209 210 int 211 ath_attach(u_int16_t devid, struct ath_softc *sc) 212 { 213 struct ieee80211com *ic = &sc->sc_ic; 214 struct ifnet *ifp = &ic->ic_if; 215 struct ath_hal *ah; 216 HAL_STATUS status; 217 HAL_TXQ_INFO qinfo; 218 int error = 0, i; 219 220 DPRINTF(ATH_DEBUG_ANY, ("%s: devid 0x%x\n", __func__, devid)); 221 222 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 223 sc->sc_flags &= ~ATH_ATTACHED; /* make sure that it's not attached */ 224 225 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, 226 sc->sc_pcie, &status); 227 if (ah == NULL) { 228 printf("%s: unable to attach hardware; HAL status %d\n", 229 ifp->if_xname, status); 230 error = ENXIO; 231 goto bad; 232 } 233 if (ah->ah_abi != HAL_ABI_VERSION) { 234 printf("%s: HAL ABI mismatch detected (0x%x != 0x%x)\n", 235 ifp->if_xname, ah->ah_abi, HAL_ABI_VERSION); 236 error = ENXIO; 237 goto bad; 238 } 239 240 if (ah->ah_single_chip == AH_TRUE) { 241 printf("%s: AR%s %u.%u phy %u.%u rf %u.%u", ifp->if_xname, 242 ar5k_printver(AR5K_VERSION_DEV, devid), 243 ah->ah_mac_version, ah->ah_mac_revision, 244 ah->ah_phy_revision >> 4, ah->ah_phy_revision & 0xf, 245 ah->ah_radio_5ghz_revision >> 4, 246 ah->ah_radio_5ghz_revision & 0xf); 247 } else { 248 printf("%s: AR%s %u.%u phy %u.%u", ifp->if_xname, 249 ar5k_printver(AR5K_VERSION_VER, ah->ah_mac_srev), 250 ah->ah_mac_version, ah->ah_mac_revision, 251 ah->ah_phy_revision >> 4, ah->ah_phy_revision & 0xf); 252 printf(" rf%s %u.%u", 253 ar5k_printver(AR5K_VERSION_RAD, ah->ah_radio_5ghz_revision), 254 ah->ah_radio_5ghz_revision >> 4, 255 ah->ah_radio_5ghz_revision & 0xf); 256 if (ah->ah_radio_2ghz_revision != 0) { 257 printf(" rf%s %u.%u", 258 ar5k_printver(AR5K_VERSION_RAD, 259 ah->ah_radio_2ghz_revision), 260 ah->ah_radio_2ghz_revision >> 4, 261 ah->ah_radio_2ghz_revision & 0xf); 262 } 263 } 264 if (ah->ah_ee_version == AR5K_EEPROM_VERSION_4_7) 265 printf(" eeprom 4.7"); 266 else 267 printf(" eeprom %1x.%1x", ah->ah_ee_version >> 12, 268 ah->ah_ee_version & 0xff); 269 270 #if 0 271 if (ah->ah_radio_5ghz_revision >= AR5K_SREV_RAD_UNSUPP || 272 ah->ah_radio_2ghz_revision >= AR5K_SREV_RAD_UNSUPP) { 273 printf(": RF radio not supported\n"); 274 error = EOPNOTSUPP; 275 goto bad; 276 } 277 #endif 278 279 sc->sc_ah = ah; 280 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 281 282 /* 283 * Get regulation domain either stored in the EEPROM or defined 284 * as the default value. Some devices are known to have broken 285 * regulation domain values in their EEPROM. 286 */ 287 ath_hal_get_regdomain(ah, &ah->ah_regdomain); 288 289 /* 290 * Construct channel list based on the current regulation domain. 291 */ 292 error = ath_getchannels(sc, ath_outdoor, ath_xchanmode); 293 if (error != 0) 294 goto bad; 295 296 /* 297 * Setup rate tables for all potential media types. 298 */ 299 ath_rate_setup(sc, IEEE80211_MODE_11A); 300 ath_rate_setup(sc, IEEE80211_MODE_11B); 301 ath_rate_setup(sc, IEEE80211_MODE_11G); 302 303 error = ath_desc_alloc(sc); 304 if (error != 0) { 305 printf(": failed to allocate descriptors: %d\n", error); 306 goto bad; 307 } 308 timeout_set(&sc->sc_scan_to, ath_next_scan, sc); 309 timeout_set(&sc->sc_cal_to, ath_calibrate, sc); 310 timeout_set(&sc->sc_rssadapt_to, ath_rssadapt_updatestats, sc); 311 312 #ifdef __FreeBSD__ 313 ATH_TXBUF_LOCK_INIT(sc); 314 ATH_TXQ_LOCK_INIT(sc); 315 #endif 316 317 ATH_TASK_INIT(&sc->sc_txtask, ath_tx_proc, sc); 318 ATH_TASK_INIT(&sc->sc_rxtask, ath_rx_proc, sc); 319 ATH_TASK_INIT(&sc->sc_rxorntask, ath_rxorn_proc, sc); 320 ATH_TASK_INIT(&sc->sc_fataltask, ath_fatal_proc, sc); 321 ATH_TASK_INIT(&sc->sc_bmisstask, ath_bmiss_proc, sc); 322 #ifndef IEEE80211_STA_ONLY 323 ATH_TASK_INIT(&sc->sc_swbatask, ath_beacon_proc, sc); 324 #endif 325 326 /* 327 * For now just pre-allocate one data queue and one 328 * beacon queue. Note that the HAL handles resetting 329 * them at the needed time. Eventually we'll want to 330 * allocate more tx queues for splitting management 331 * frames and for QOS support. 332 */ 333 sc->sc_bhalq = ath_hal_setup_tx_queue(ah, HAL_TX_QUEUE_BEACON, NULL); 334 if (sc->sc_bhalq == (u_int) -1) { 335 printf(": unable to setup a beacon xmit queue!\n"); 336 goto bad2; 337 } 338 339 for (i = 0; i <= HAL_TX_QUEUE_ID_DATA_MAX; i++) { 340 bzero(&qinfo, sizeof(qinfo)); 341 qinfo.tqi_type = HAL_TX_QUEUE_DATA; 342 qinfo.tqi_subtype = i; /* should be mapped to WME types */ 343 sc->sc_txhalq[i] = ath_hal_setup_tx_queue(ah, 344 HAL_TX_QUEUE_DATA, &qinfo); 345 if (sc->sc_txhalq[i] == (u_int) -1) { 346 printf(": unable to setup a data xmit queue %u!\n", i); 347 goto bad2; 348 } 349 } 350 351 ifp->if_softc = sc; 352 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 353 ifp->if_start = ath_start; 354 ifp->if_watchdog = ath_watchdog; 355 ifp->if_ioctl = ath_ioctl; 356 #ifndef __OpenBSD__ 357 ifp->if_stop = ath_stop; /* XXX */ 358 #endif 359 IFQ_SET_MAXLEN(&ifp->if_snd, ATH_TXBUF * ATH_TXDESC); 360 361 ic->ic_softc = sc; 362 ic->ic_newassoc = ath_newassoc; 363 /* XXX not right but it's not used anywhere important */ 364 ic->ic_phytype = IEEE80211_T_OFDM; 365 ic->ic_opmode = IEEE80211_M_STA; 366 ic->ic_caps = IEEE80211_C_WEP /* wep supported */ 367 | IEEE80211_C_PMGT /* power management */ 368 #ifndef IEEE80211_STA_ONLY 369 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 370 | IEEE80211_C_HOSTAP /* hostap mode */ 371 #endif 372 | IEEE80211_C_MONITOR /* monitor mode */ 373 | IEEE80211_C_SHSLOT /* short slot time supported */ 374 | IEEE80211_C_SHPREAMBLE; /* short preamble supported */ 375 if (ath_softcrypto) 376 ic->ic_caps |= IEEE80211_C_RSN; /* wpa/rsn supported */ 377 378 /* 379 * Not all chips have the VEOL support we want to use with 380 * IBSS beacon; check here for it. 381 */ 382 sc->sc_veol = ath_hal_has_veol(ah); 383 384 /* get mac address from hardware */ 385 ath_hal_get_lladdr(ah, ic->ic_myaddr); 386 387 if_attach(ifp); 388 389 /* call MI attach routine. */ 390 ieee80211_ifattach(ifp); 391 392 /* override default methods */ 393 ic->ic_node_alloc = ath_node_alloc; 394 sc->sc_node_free = ic->ic_node_free; 395 ic->ic_node_free = ath_node_free; 396 sc->sc_node_copy = ic->ic_node_copy; 397 ic->ic_node_copy = ath_node_copy; 398 ic->ic_node_getrssi = ath_node_getrssi; 399 sc->sc_newstate = ic->ic_newstate; 400 ic->ic_newstate = ath_newstate; 401 #ifndef IEEE80211_STA_ONLY 402 sc->sc_recv_mgmt = ic->ic_recv_mgmt; 403 ic->ic_recv_mgmt = ath_recv_mgmt; 404 #endif 405 ic->ic_max_rssi = AR5K_MAX_RSSI; 406 bcopy(etherbroadcastaddr, sc->sc_broadcast_addr, IEEE80211_ADDR_LEN); 407 408 /* complete initialization */ 409 ieee80211_media_init(ifp, ath_media_change, ieee80211_media_status); 410 411 #if NBPFILTER > 0 412 bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO, 413 sizeof(struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN); 414 415 sc->sc_rxtap_len = sizeof(sc->sc_rxtapu); 416 bzero(&sc->sc_rxtapu, sc->sc_rxtap_len); 417 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); 418 sc->sc_rxtap.wr_ihdr.it_present = htole32(ATH_RX_RADIOTAP_PRESENT); 419 420 sc->sc_txtap_len = sizeof(sc->sc_txtapu); 421 bzero(&sc->sc_txtapu, sc->sc_txtap_len); 422 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); 423 sc->sc_txtap.wt_ihdr.it_present = htole32(ATH_TX_RADIOTAP_PRESENT); 424 #endif 425 426 sc->sc_flags |= ATH_ATTACHED; 427 428 /* 429 * Print regulation domain and the mac address. The regulation domain 430 * will be marked with a * if the EEPROM value has been overwritten. 431 */ 432 printf(", %s%s, address %s\n", 433 ieee80211_regdomain2name(ah->ah_regdomain), 434 ah->ah_regdomain != ah->ah_regdomain_hw ? "*" : "", 435 ether_sprintf(ic->ic_myaddr)); 436 437 if (ath_gpio_attach(sc, devid) == 0) 438 sc->sc_flags |= ATH_GPIO; 439 440 return 0; 441 bad2: 442 ath_desc_free(sc); 443 bad: 444 if (ah) 445 ath_hal_detach(ah); 446 sc->sc_invalid = 1; 447 return error; 448 } 449 450 int 451 ath_detach(struct ath_softc *sc, int flags) 452 { 453 struct ifnet *ifp = &sc->sc_ic.ic_if; 454 int s; 455 456 if ((sc->sc_flags & ATH_ATTACHED) == 0) 457 return (0); 458 459 config_detach_children(&sc->sc_dev, flags); 460 461 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags %x\n", __func__, ifp->if_flags)); 462 463 timeout_del(&sc->sc_scan_to); 464 timeout_del(&sc->sc_cal_to); 465 timeout_del(&sc->sc_rssadapt_to); 466 467 s = splnet(); 468 ath_stop(ifp); 469 ath_desc_free(sc); 470 ath_hal_detach(sc->sc_ah); 471 472 ieee80211_ifdetach(ifp); 473 if_detach(ifp); 474 475 splx(s); 476 #ifdef __FreeBSD__ 477 ATH_TXBUF_LOCK_DESTROY(sc); 478 ATH_TXQ_LOCK_DESTROY(sc); 479 #endif 480 481 return 0; 482 } 483 484 int 485 ath_intr(void *arg) 486 { 487 return ath_intr1((struct ath_softc *)arg); 488 } 489 490 int 491 ath_intr1(struct ath_softc *sc) 492 { 493 struct ieee80211com *ic = &sc->sc_ic; 494 struct ifnet *ifp = &ic->ic_if; 495 struct ath_hal *ah = sc->sc_ah; 496 HAL_INT status; 497 498 if (sc->sc_invalid) { 499 /* 500 * The hardware is not ready/present, don't touch anything. 501 * Note this can happen early on if the IRQ is shared. 502 */ 503 DPRINTF(ATH_DEBUG_ANY, ("%s: invalid; ignored\n", __func__)); 504 return 0; 505 } 506 if (!ath_hal_is_intr_pending(ah)) /* shared irq, not for us */ 507 return 0; 508 if ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) != (IFF_RUNNING|IFF_UP)) { 509 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags 0x%x\n", 510 __func__, ifp->if_flags)); 511 ath_hal_get_isr(ah, &status); /* clear ISR */ 512 ath_hal_set_intr(ah, 0); /* disable further intr's */ 513 return 1; /* XXX */ 514 } 515 ath_hal_get_isr(ah, &status); /* NB: clears ISR too */ 516 DPRINTF(ATH_DEBUG_INTR, ("%s: status 0x%x\n", __func__, status)); 517 status &= sc->sc_imask; /* discard unasked for bits */ 518 if (status & HAL_INT_FATAL) { 519 sc->sc_stats.ast_hardware++; 520 ath_hal_set_intr(ah, 0); /* disable intr's until reset */ 521 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_fataltask); 522 } else if (status & HAL_INT_RXORN) { 523 sc->sc_stats.ast_rxorn++; 524 ath_hal_set_intr(ah, 0); /* disable intr's until reset */ 525 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_rxorntask); 526 } else if (status & HAL_INT_MIB) { 527 DPRINTF(ATH_DEBUG_INTR, 528 ("%s: resetting MIB counters\n", __func__)); 529 sc->sc_stats.ast_mib++; 530 ath_hal_update_mib_counters(ah, &sc->sc_mib_stats); 531 } else { 532 if (status & HAL_INT_RXEOL) { 533 /* 534 * NB: the hardware should re-read the link when 535 * RXE bit is written, but it doesn't work at 536 * least on older hardware revs. 537 */ 538 sc->sc_stats.ast_rxeol++; 539 sc->sc_rxlink = NULL; 540 } 541 if (status & HAL_INT_TXURN) { 542 sc->sc_stats.ast_txurn++; 543 /* bump tx trigger level */ 544 ath_hal_update_tx_triglevel(ah, AH_TRUE); 545 } 546 if (status & HAL_INT_RX) 547 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_rxtask); 548 if (status & HAL_INT_TX) 549 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_txtask); 550 if (status & HAL_INT_SWBA) 551 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_swbatask); 552 if (status & HAL_INT_BMISS) { 553 sc->sc_stats.ast_bmiss++; 554 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_bmisstask); 555 } 556 } 557 return 1; 558 } 559 560 void 561 ath_fatal_proc(void *arg, int pending) 562 { 563 struct ath_softc *sc = arg; 564 struct ieee80211com *ic = &sc->sc_ic; 565 struct ifnet *ifp = &ic->ic_if; 566 567 if (ifp->if_flags & IFF_DEBUG) 568 printf("%s: hardware error; resetting\n", ifp->if_xname); 569 ath_reset(sc, 1); 570 } 571 572 void 573 ath_rxorn_proc(void *arg, int pending) 574 { 575 struct ath_softc *sc = arg; 576 struct ieee80211com *ic = &sc->sc_ic; 577 struct ifnet *ifp = &ic->ic_if; 578 579 if (ifp->if_flags & IFF_DEBUG) 580 printf("%s: rx FIFO overrun; resetting\n", ifp->if_xname); 581 ath_reset(sc, 1); 582 } 583 584 void 585 ath_bmiss_proc(void *arg, int pending) 586 { 587 struct ath_softc *sc = arg; 588 struct ieee80211com *ic = &sc->sc_ic; 589 590 DPRINTF(ATH_DEBUG_ANY, ("%s: pending %u\n", __func__, pending)); 591 if (ic->ic_opmode != IEEE80211_M_STA) 592 return; 593 if (ic->ic_state == IEEE80211_S_RUN) { 594 /* 595 * Rather than go directly to scan state, try to 596 * reassociate first. If that fails then the state 597 * machine will drop us into scanning after timing 598 * out waiting for a probe response. 599 */ 600 ieee80211_new_state(ic, IEEE80211_S_ASSOC, -1); 601 } 602 } 603 604 u_int 605 ath_chan2flags(struct ieee80211com *ic, struct ieee80211_channel *chan) 606 { 607 enum ieee80211_phymode mode = ieee80211_chan2mode(ic, chan); 608 609 switch (mode) { 610 case IEEE80211_MODE_AUTO: 611 return 0; 612 case IEEE80211_MODE_11A: 613 return CHANNEL_A; 614 case IEEE80211_MODE_11B: 615 return CHANNEL_B; 616 case IEEE80211_MODE_11G: 617 return CHANNEL_G; 618 default: 619 panic("%s: unsupported mode %d", __func__, mode); 620 return 0; 621 } 622 } 623 624 int 625 ath_init(struct ifnet *ifp) 626 { 627 return ath_init1((struct ath_softc *)ifp->if_softc); 628 } 629 630 int 631 ath_init1(struct ath_softc *sc) 632 { 633 struct ieee80211com *ic = &sc->sc_ic; 634 struct ifnet *ifp = &ic->ic_if; 635 struct ieee80211_node *ni; 636 enum ieee80211_phymode mode; 637 struct ath_hal *ah = sc->sc_ah; 638 HAL_STATUS status; 639 HAL_CHANNEL hchan; 640 int error = 0, s; 641 642 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags 0x%x\n", 643 __func__, ifp->if_flags)); 644 645 if ((error = ath_enable(sc)) != 0) 646 return error; 647 648 s = splnet(); 649 /* 650 * Stop anything previously setup. This is safe 651 * whether this is the first time through or not. 652 */ 653 ath_stop(ifp); 654 655 /* 656 * Reset the link layer address to the latest value. 657 */ 658 IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl)); 659 ath_hal_set_lladdr(ah, ic->ic_myaddr); 660 661 /* 662 * The basic interface to setting the hardware in a good 663 * state is ``reset''. On return the hardware is known to 664 * be powered up and with interrupts disabled. This must 665 * be followed by initialization of the appropriate bits 666 * and then setup of the interrupt mask. 667 */ 668 hchan.channel = ic->ic_ibss_chan->ic_freq; 669 hchan.channelFlags = ath_chan2flags(ic, ic->ic_ibss_chan); 670 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, AH_TRUE, &status)) { 671 printf("%s: unable to reset hardware; hal status %u\n", 672 ifp->if_xname, status); 673 error = EIO; 674 goto done; 675 } 676 ath_set_slot_time(sc); 677 678 if ((error = ath_initkeytable(sc)) != 0) { 679 printf("%s: unable to reset the key cache\n", 680 ifp->if_xname); 681 goto done; 682 } 683 684 if ((error = ath_startrecv(sc)) != 0) { 685 printf("%s: unable to start recv logic\n", ifp->if_xname); 686 goto done; 687 } 688 689 /* 690 * Enable interrupts. 691 */ 692 sc->sc_imask = HAL_INT_RX | HAL_INT_TX 693 | HAL_INT_RXEOL | HAL_INT_RXORN 694 | HAL_INT_FATAL | HAL_INT_GLOBAL; 695 #ifndef IEEE80211_STA_ONLY 696 if (ic->ic_opmode == IEEE80211_M_HOSTAP) 697 sc->sc_imask |= HAL_INT_MIB; 698 #endif 699 ath_hal_set_intr(ah, sc->sc_imask); 700 701 ifp->if_flags |= IFF_RUNNING; 702 ic->ic_state = IEEE80211_S_INIT; 703 704 /* 705 * The hardware should be ready to go now so it's safe 706 * to kick the 802.11 state machine as it's likely to 707 * immediately call back to us to send mgmt frames. 708 */ 709 ni = ic->ic_bss; 710 ni->ni_chan = ic->ic_ibss_chan; 711 mode = ieee80211_chan2mode(ic, ni->ni_chan); 712 if (mode != sc->sc_curmode) 713 ath_setcurmode(sc, mode); 714 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 715 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1); 716 } else { 717 ieee80211_new_state(ic, IEEE80211_S_RUN, -1); 718 } 719 done: 720 splx(s); 721 return error; 722 } 723 724 void 725 ath_stop(struct ifnet *ifp) 726 { 727 struct ieee80211com *ic = (struct ieee80211com *) ifp; 728 struct ath_softc *sc = ifp->if_softc; 729 struct ath_hal *ah = sc->sc_ah; 730 int s; 731 732 DPRINTF(ATH_DEBUG_ANY, ("%s: invalid %u if_flags 0x%x\n", 733 __func__, sc->sc_invalid, ifp->if_flags)); 734 735 s = splnet(); 736 if (ifp->if_flags & IFF_RUNNING) { 737 /* 738 * Shutdown the hardware and driver: 739 * disable interrupts 740 * turn off timers 741 * clear transmit machinery 742 * clear receive machinery 743 * drain and release tx queues 744 * reclaim beacon resources 745 * reset 802.11 state machine 746 * power down hardware 747 * 748 * Note that some of this work is not possible if the 749 * hardware is gone (invalid). 750 */ 751 ifp->if_flags &= ~IFF_RUNNING; 752 ifp->if_timer = 0; 753 if (!sc->sc_invalid) 754 ath_hal_set_intr(ah, 0); 755 ath_draintxq(sc); 756 if (!sc->sc_invalid) { 757 ath_stoprecv(sc); 758 } else { 759 sc->sc_rxlink = NULL; 760 } 761 IFQ_PURGE(&ifp->if_snd); 762 #ifndef IEEE80211_STA_ONLY 763 ath_beacon_free(sc); 764 #endif 765 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 766 if (!sc->sc_invalid) { 767 ath_hal_set_power(ah, HAL_PM_FULL_SLEEP, 0); 768 } 769 ath_disable(sc); 770 } 771 splx(s); 772 } 773 774 /* 775 * Reset the hardware w/o losing operational state. This is 776 * basically a more efficient way of doing ath_stop, ath_init, 777 * followed by state transitions to the current 802.11 778 * operational state. Used to recover from errors rx overrun 779 * and to reset the hardware when rf gain settings must be reset. 780 */ 781 void 782 ath_reset(struct ath_softc *sc, int full) 783 { 784 struct ieee80211com *ic = &sc->sc_ic; 785 struct ifnet *ifp = &ic->ic_if; 786 struct ath_hal *ah = sc->sc_ah; 787 struct ieee80211_channel *c; 788 HAL_STATUS status; 789 HAL_CHANNEL hchan; 790 791 /* 792 * Convert to a HAL channel description with the flags 793 * constrained to reflect the current operating mode. 794 */ 795 c = ic->ic_ibss_chan; 796 hchan.channel = c->ic_freq; 797 hchan.channelFlags = ath_chan2flags(ic, c); 798 799 ath_hal_set_intr(ah, 0); /* disable interrupts */ 800 ath_draintxq(sc); /* stop xmit side */ 801 ath_stoprecv(sc); /* stop recv side */ 802 /* NB: indicate channel change so we do a full reset */ 803 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, 804 full ? AH_TRUE : AH_FALSE, &status)) { 805 printf("%s: %s: unable to reset hardware; hal status %u\n", 806 ifp->if_xname, __func__, status); 807 } 808 ath_set_slot_time(sc); 809 /* In case channel changed, save as a node channel */ 810 ic->ic_bss->ni_chan = ic->ic_ibss_chan; 811 ath_hal_set_intr(ah, sc->sc_imask); 812 if (ath_startrecv(sc) != 0) /* restart recv */ 813 printf("%s: %s: unable to start recv logic\n", ifp->if_xname, 814 __func__); 815 ath_start(ifp); /* restart xmit */ 816 if (ic->ic_state == IEEE80211_S_RUN) 817 ath_beacon_config(sc); /* restart beacons */ 818 } 819 820 void 821 ath_start(struct ifnet *ifp) 822 { 823 struct ath_softc *sc = ifp->if_softc; 824 struct ath_hal *ah = sc->sc_ah; 825 struct ieee80211com *ic = &sc->sc_ic; 826 struct ieee80211_node *ni; 827 struct ath_buf *bf; 828 struct mbuf *m; 829 struct ieee80211_frame *wh; 830 int s; 831 832 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd) || 833 sc->sc_invalid) 834 return; 835 for (;;) { 836 /* 837 * Grab a TX buffer and associated resources. 838 */ 839 s = splnet(); 840 bf = TAILQ_FIRST(&sc->sc_txbuf); 841 if (bf != NULL) 842 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list); 843 splx(s); 844 if (bf == NULL) { 845 DPRINTF(ATH_DEBUG_ANY, ("%s: out of xmit buffers\n", 846 __func__)); 847 sc->sc_stats.ast_tx_qstop++; 848 ifq_set_oactive(&ifp->if_snd); 849 break; 850 } 851 /* 852 * Poll the management queue for frames; they 853 * have priority over normal data frames. 854 */ 855 m = mq_dequeue(&ic->ic_mgtq); 856 if (m == NULL) { 857 /* 858 * No data frames go out unless we're associated. 859 */ 860 if (ic->ic_state != IEEE80211_S_RUN) { 861 DPRINTF(ATH_DEBUG_ANY, 862 ("%s: ignore data packet, state %u\n", 863 __func__, ic->ic_state)); 864 sc->sc_stats.ast_tx_discard++; 865 s = splnet(); 866 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 867 splx(s); 868 break; 869 } 870 IFQ_DEQUEUE(&ifp->if_snd, m); 871 if (m == NULL) { 872 s = splnet(); 873 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 874 splx(s); 875 break; 876 } 877 878 #if NBPFILTER > 0 879 if (ifp->if_bpf) 880 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 881 #endif 882 883 /* 884 * Encapsulate the packet in prep for transmission. 885 */ 886 m = ieee80211_encap(ifp, m, &ni); 887 if (m == NULL) { 888 DPRINTF(ATH_DEBUG_ANY, 889 ("%s: encapsulation failure\n", 890 __func__)); 891 sc->sc_stats.ast_tx_encap++; 892 goto bad; 893 } 894 wh = mtod(m, struct ieee80211_frame *); 895 } else { 896 ni = m->m_pkthdr.ph_cookie; 897 898 wh = mtod(m, struct ieee80211_frame *); 899 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == 900 IEEE80211_FC0_SUBTYPE_PROBE_RESP) { 901 /* fill time stamp */ 902 u_int64_t tsf; 903 u_int32_t *tstamp; 904 905 tsf = ath_hal_get_tsf64(ah); 906 /* XXX: adjust 100us delay to xmit */ 907 tsf += 100; 908 tstamp = (u_int32_t *)&wh[1]; 909 tstamp[0] = htole32(tsf & 0xffffffff); 910 tstamp[1] = htole32(tsf >> 32); 911 } 912 sc->sc_stats.ast_tx_mgmt++; 913 } 914 915 if (ath_tx_start(sc, ni, bf, m)) { 916 bad: 917 s = splnet(); 918 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 919 splx(s); 920 ifp->if_oerrors++; 921 if (ni != NULL) 922 ieee80211_release_node(ic, ni); 923 continue; 924 } 925 926 sc->sc_tx_timer = 5; 927 ifp->if_timer = 1; 928 } 929 } 930 931 int 932 ath_media_change(struct ifnet *ifp) 933 { 934 int error; 935 936 error = ieee80211_media_change(ifp); 937 if (error == ENETRESET) { 938 if ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) == 939 (IFF_RUNNING|IFF_UP)) 940 ath_init(ifp); /* XXX lose error */ 941 error = 0; 942 } 943 return error; 944 } 945 946 void 947 ath_watchdog(struct ifnet *ifp) 948 { 949 struct ath_softc *sc = ifp->if_softc; 950 951 ifp->if_timer = 0; 952 if ((ifp->if_flags & IFF_RUNNING) == 0 || sc->sc_invalid) 953 return; 954 if (sc->sc_tx_timer) { 955 if (--sc->sc_tx_timer == 0) { 956 printf("%s: device timeout\n", ifp->if_xname); 957 ath_reset(sc, 1); 958 ifp->if_oerrors++; 959 sc->sc_stats.ast_watchdog++; 960 return; 961 } 962 ifp->if_timer = 1; 963 } 964 965 ieee80211_watchdog(ifp); 966 } 967 968 int 969 ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 970 { 971 struct ath_softc *sc = ifp->if_softc; 972 struct ieee80211com *ic = &sc->sc_ic; 973 struct ifreq *ifr = (struct ifreq *)data; 974 int error = 0, s; 975 976 s = splnet(); 977 switch (cmd) { 978 case SIOCSIFADDR: 979 ifp->if_flags |= IFF_UP; 980 /* FALLTHROUGH */ 981 case SIOCSIFFLAGS: 982 if (ifp->if_flags & IFF_UP) { 983 if (ifp->if_flags & IFF_RUNNING) { 984 /* 985 * To avoid rescanning another access point, 986 * do not call ath_init() here. Instead, 987 * only reflect promisc mode settings. 988 */ 989 ath_mode_init(sc); 990 } else { 991 /* 992 * Beware of being called during detach to 993 * reset promiscuous mode. In that case we 994 * will still be marked UP but not RUNNING. 995 * However trying to re-init the interface 996 * is the wrong thing to do as we've already 997 * torn down much of our state. There's 998 * probably a better way to deal with this. 999 */ 1000 if (!sc->sc_invalid) 1001 ath_init(ifp); /* XXX lose error */ 1002 } 1003 } else 1004 ath_stop(ifp); 1005 break; 1006 case SIOCADDMULTI: 1007 case SIOCDELMULTI: 1008 #ifdef __FreeBSD__ 1009 /* 1010 * The upper layer has already installed/removed 1011 * the multicast address(es), just recalculate the 1012 * multicast filter for the card. 1013 */ 1014 if (ifp->if_flags & IFF_RUNNING) 1015 ath_mode_init(sc); 1016 #endif 1017 error = (cmd == SIOCADDMULTI) ? 1018 ether_addmulti(ifr, &sc->sc_ic.ic_ac) : 1019 ether_delmulti(ifr, &sc->sc_ic.ic_ac); 1020 if (error == ENETRESET) { 1021 if (ifp->if_flags & IFF_RUNNING) 1022 ath_mode_init(sc); 1023 error = 0; 1024 } 1025 break; 1026 case SIOCGATHSTATS: 1027 error = copyout(&sc->sc_stats, 1028 ifr->ifr_data, sizeof (sc->sc_stats)); 1029 break; 1030 default: 1031 error = ieee80211_ioctl(ifp, cmd, data); 1032 if (error == ENETRESET) { 1033 if ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) == 1034 (IFF_RUNNING|IFF_UP)) { 1035 if (ic->ic_opmode != IEEE80211_M_MONITOR) 1036 ath_init(ifp); /* XXX lose error */ 1037 else 1038 ath_reset(sc, 1); 1039 } 1040 error = 0; 1041 } 1042 break; 1043 } 1044 splx(s); 1045 return error; 1046 } 1047 1048 /* 1049 * Fill the hardware key cache with key entries. 1050 */ 1051 int 1052 ath_initkeytable(struct ath_softc *sc) 1053 { 1054 struct ieee80211com *ic = &sc->sc_ic; 1055 struct ath_hal *ah = sc->sc_ah; 1056 int i; 1057 1058 if (ath_softcrypto) { 1059 /* 1060 * Disable the hardware crypto engine and reset the key cache 1061 * to allow software crypto operation for WEP/RSN/WPA2 1062 */ 1063 if (ic->ic_flags & (IEEE80211_F_WEPON|IEEE80211_F_RSNON)) 1064 (void)ath_hal_softcrypto(ah, AH_TRUE); 1065 else 1066 (void)ath_hal_softcrypto(ah, AH_FALSE); 1067 return (0); 1068 } 1069 1070 /* WEP is disabled, we only support WEP in hardware yet */ 1071 if ((ic->ic_flags & IEEE80211_F_WEPON) == 0) 1072 return (0); 1073 1074 /* 1075 * Setup the hardware after reset: the key cache is filled as 1076 * needed and the receive engine is set going. Frame transmit 1077 * is handled entirely in the frame output path; there's nothing 1078 * to do here except setup the interrupt mask. 1079 */ 1080 1081 /* XXX maybe should reset all keys when !WEPON */ 1082 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 1083 struct ieee80211_key *k = &ic->ic_nw_keys[i]; 1084 if (k->k_len == 0) 1085 ath_hal_reset_key(ah, i); 1086 else { 1087 HAL_KEYVAL hk; 1088 1089 bzero(&hk, sizeof(hk)); 1090 /* 1091 * Pad the key to a supported key length. It 1092 * is always a good idea to use full-length 1093 * keys without padded zeros but this seems 1094 * to be the default behaviour used by many 1095 * implementations. 1096 */ 1097 if (k->k_cipher == IEEE80211_CIPHER_WEP40) 1098 hk.wk_len = AR5K_KEYVAL_LENGTH_40; 1099 else if (k->k_cipher == IEEE80211_CIPHER_WEP104) 1100 hk.wk_len = AR5K_KEYVAL_LENGTH_104; 1101 else 1102 return (EINVAL); 1103 bcopy(k->k_key, hk.wk_key, hk.wk_len); 1104 1105 if (ath_hal_set_key(ah, i, &hk) != AH_TRUE) 1106 return (EINVAL); 1107 } 1108 } 1109 1110 return (0); 1111 } 1112 1113 void 1114 ath_mcastfilter_accum(caddr_t dl, u_int32_t (*mfilt)[2]) 1115 { 1116 u_int32_t val; 1117 u_int8_t pos; 1118 1119 val = LE_READ_4(dl + 0); 1120 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 1121 val = LE_READ_4(dl + 3); 1122 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 1123 pos &= 0x3f; 1124 (*mfilt)[pos / 32] |= (1 << (pos % 32)); 1125 } 1126 1127 void 1128 ath_mcastfilter_compute(struct ath_softc *sc, u_int32_t (*mfilt)[2]) 1129 { 1130 struct arpcom *ac = &sc->sc_ic.ic_ac; 1131 struct ifnet *ifp = &sc->sc_ic.ic_if; 1132 struct ether_multi *enm; 1133 struct ether_multistep estep; 1134 1135 if (ac->ac_multirangecnt > 0) { 1136 /* XXX Punt on ranges. */ 1137 (*mfilt)[0] = (*mfilt)[1] = ~((u_int32_t)0); 1138 ifp->if_flags |= IFF_ALLMULTI; 1139 return; 1140 } 1141 1142 ETHER_FIRST_MULTI(estep, ac, enm); 1143 while (enm != NULL) { 1144 ath_mcastfilter_accum(enm->enm_addrlo, mfilt); 1145 ETHER_NEXT_MULTI(estep, enm); 1146 } 1147 ifp->if_flags &= ~IFF_ALLMULTI; 1148 } 1149 1150 /* 1151 * Calculate the receive filter according to the 1152 * operating mode and state: 1153 * 1154 * o always accept unicast, broadcast, and multicast traffic 1155 * o maintain current state of phy error reception 1156 * o probe request frames are accepted only when operating in 1157 * hostap, adhoc, or monitor modes 1158 * o enable promiscuous mode according to the interface state 1159 * o accept beacons: 1160 * - when operating in adhoc mode so the 802.11 layer creates 1161 * node table entries for peers, 1162 * - when operating in station mode for collecting rssi data when 1163 * the station is otherwise quiet, or 1164 * - when scanning 1165 */ 1166 u_int32_t 1167 ath_calcrxfilter(struct ath_softc *sc) 1168 { 1169 struct ieee80211com *ic = &sc->sc_ic; 1170 struct ath_hal *ah = sc->sc_ah; 1171 struct ifnet *ifp = &ic->ic_if; 1172 u_int32_t rfilt; 1173 1174 rfilt = (ath_hal_get_rx_filter(ah) & HAL_RX_FILTER_PHYERR) 1175 | HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST; 1176 if (ic->ic_opmode != IEEE80211_M_STA) 1177 rfilt |= HAL_RX_FILTER_PROBEREQ; 1178 #ifndef IEEE80211_STA_ONLY 1179 if (ic->ic_opmode != IEEE80211_M_AHDEMO) 1180 #endif 1181 rfilt |= HAL_RX_FILTER_BEACON; 1182 if (ifp->if_flags & IFF_PROMISC) 1183 rfilt |= HAL_RX_FILTER_PROM; 1184 return rfilt; 1185 } 1186 1187 void 1188 ath_mode_init(struct ath_softc *sc) 1189 { 1190 struct ath_hal *ah = sc->sc_ah; 1191 u_int32_t rfilt, mfilt[2]; 1192 1193 /* configure rx filter */ 1194 rfilt = ath_calcrxfilter(sc); 1195 ath_hal_set_rx_filter(ah, rfilt); 1196 1197 /* configure operational mode */ 1198 ath_hal_set_opmode(ah); 1199 1200 /* calculate and install multicast filter */ 1201 mfilt[0] = mfilt[1] = 0; 1202 ath_mcastfilter_compute(sc, &mfilt); 1203 ath_hal_set_mcast_filter(ah, mfilt[0], mfilt[1]); 1204 DPRINTF(ATH_DEBUG_MODE, ("%s: RX filter 0x%x, MC filter %08x:%08x\n", 1205 __func__, rfilt, mfilt[0], mfilt[1])); 1206 } 1207 1208 struct mbuf * 1209 ath_getmbuf(int flags, int type, u_int pktlen) 1210 { 1211 struct mbuf *m; 1212 1213 KASSERT(pktlen <= MCLBYTES, ("802.11 packet too large: %u", pktlen)); 1214 #ifdef __FreeBSD__ 1215 if (pktlen <= MHLEN) { 1216 MGETHDR(m, flags, type); 1217 } else { 1218 m = m_getcl(flags, type, M_PKTHDR); 1219 } 1220 #else 1221 MGETHDR(m, flags, type); 1222 if (m != NULL && pktlen > MHLEN) { 1223 MCLGET(m, flags); 1224 if ((m->m_flags & M_EXT) == 0) { 1225 m_free(m); 1226 m = NULL; 1227 } 1228 } 1229 #endif 1230 return m; 1231 } 1232 1233 #ifndef IEEE80211_STA_ONLY 1234 int 1235 ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni) 1236 { 1237 struct ieee80211com *ic = &sc->sc_ic; 1238 struct ath_hal *ah = sc->sc_ah; 1239 struct ath_buf *bf; 1240 struct ath_desc *ds; 1241 struct mbuf *m; 1242 int error; 1243 u_int8_t rate; 1244 const HAL_RATE_TABLE *rt; 1245 u_int flags = 0; 1246 1247 bf = sc->sc_bcbuf; 1248 if (bf->bf_m != NULL) { 1249 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1250 m_freem(bf->bf_m); 1251 bf->bf_m = NULL; 1252 bf->bf_node = NULL; 1253 } 1254 /* 1255 * NB: the beacon data buffer must be 32-bit aligned; 1256 * we assume the mbuf routines will return us something 1257 * with this alignment (perhaps should assert). 1258 */ 1259 m = ieee80211_beacon_alloc(ic, ni); 1260 if (m == NULL) { 1261 DPRINTF(ATH_DEBUG_BEACON, ("%s: cannot get mbuf/cluster\n", 1262 __func__)); 1263 sc->sc_stats.ast_be_nombuf++; 1264 return ENOMEM; 1265 } 1266 1267 DPRINTF(ATH_DEBUG_BEACON, ("%s: m %p len %u\n", __func__, m, m->m_len)); 1268 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m, 1269 BUS_DMA_NOWAIT); 1270 if (error != 0) { 1271 m_freem(m); 1272 return error; 1273 } 1274 KASSERT(bf->bf_nseg == 1, 1275 ("%s: multi-segment packet; nseg %u", __func__, bf->bf_nseg)); 1276 bf->bf_m = m; 1277 1278 /* setup descriptors */ 1279 ds = bf->bf_desc; 1280 bzero(ds, sizeof(struct ath_desc)); 1281 1282 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_veol) { 1283 ds->ds_link = bf->bf_daddr; /* link to self */ 1284 flags |= HAL_TXDESC_VEOL; 1285 } else { 1286 ds->ds_link = 0; 1287 } 1288 ds->ds_data = bf->bf_segs[0].ds_addr; 1289 1290 DPRINTF(ATH_DEBUG_ANY, ("%s: segaddr %p seglen %u\n", __func__, 1291 (caddr_t)bf->bf_segs[0].ds_addr, (u_int)bf->bf_segs[0].ds_len)); 1292 1293 /* 1294 * Calculate rate code. 1295 * XXX everything at min xmit rate 1296 */ 1297 rt = sc->sc_currates; 1298 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 1299 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) { 1300 rate = rt->info[0].rateCode | rt->info[0].shortPreamble; 1301 } else { 1302 rate = rt->info[0].rateCode; 1303 } 1304 1305 flags = HAL_TXDESC_NOACK; 1306 if (ic->ic_opmode == IEEE80211_M_IBSS) 1307 flags |= HAL_TXDESC_VEOL; 1308 1309 if (!ath_hal_setup_tx_desc(ah, ds 1310 , m->m_pkthdr.len + IEEE80211_CRC_LEN /* packet length */ 1311 , sizeof(struct ieee80211_frame) /* header length */ 1312 , HAL_PKT_TYPE_BEACON /* Atheros packet type */ 1313 , 60 /* txpower XXX */ 1314 , rate, 1 /* series 0 rate/tries */ 1315 , HAL_TXKEYIX_INVALID /* no encryption */ 1316 , 0 /* antenna mode */ 1317 , flags /* no ack for beacons */ 1318 , 0 /* rts/cts rate */ 1319 , 0 /* rts/cts duration */ 1320 )) { 1321 printf("%s: ath_hal_setup_tx_desc failed\n", __func__); 1322 return -1; 1323 } 1324 /* NB: beacon's BufLen must be a multiple of 4 bytes */ 1325 /* XXX verify mbuf data area covers this roundup */ 1326 if (!ath_hal_fill_tx_desc(ah, ds 1327 , roundup(bf->bf_segs[0].ds_len, 4) /* buffer length */ 1328 , AH_TRUE /* first segment */ 1329 , AH_TRUE /* last segment */ 1330 )) { 1331 printf("%s: ath_hal_fill_tx_desc failed\n", __func__); 1332 return -1; 1333 } 1334 1335 /* XXX it is not appropriate to bus_dmamap_sync? -dcy */ 1336 1337 return 0; 1338 } 1339 1340 void 1341 ath_beacon_proc(void *arg, int pending) 1342 { 1343 struct ath_softc *sc = arg; 1344 struct ieee80211com *ic = &sc->sc_ic; 1345 struct ath_buf *bf = sc->sc_bcbuf; 1346 struct ath_hal *ah = sc->sc_ah; 1347 1348 DPRINTF(ATH_DEBUG_BEACON_PROC, ("%s: pending %u\n", __func__, pending)); 1349 if (ic->ic_opmode == IEEE80211_M_STA || 1350 bf == NULL || bf->bf_m == NULL) { 1351 DPRINTF(ATH_DEBUG_ANY, ("%s: ic_flags=%x bf=%p bf_m=%p\n", 1352 __func__, ic->ic_flags, bf, bf ? bf->bf_m : NULL)); 1353 return; 1354 } 1355 /* TODO: update beacon to reflect PS poll state */ 1356 if (!ath_hal_stop_tx_dma(ah, sc->sc_bhalq)) { 1357 DPRINTF(ATH_DEBUG_ANY, ("%s: beacon queue %u did not stop?\n", 1358 __func__, sc->sc_bhalq)); 1359 } 1360 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 1361 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1362 1363 ath_hal_put_tx_buf(ah, sc->sc_bhalq, bf->bf_daddr); 1364 ath_hal_tx_start(ah, sc->sc_bhalq); 1365 DPRINTF(ATH_DEBUG_BEACON_PROC, 1366 ("%s: TXDP%u = %p (%p)\n", __func__, 1367 sc->sc_bhalq, (caddr_t)bf->bf_daddr, bf->bf_desc)); 1368 } 1369 1370 void 1371 ath_beacon_free(struct ath_softc *sc) 1372 { 1373 struct ath_buf *bf = sc->sc_bcbuf; 1374 1375 if (bf->bf_m != NULL) { 1376 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1377 m_freem(bf->bf_m); 1378 bf->bf_m = NULL; 1379 bf->bf_node = NULL; 1380 } 1381 } 1382 #endif /* IEEE80211_STA_ONLY */ 1383 1384 /* 1385 * Configure the beacon and sleep timers. 1386 * 1387 * When operating as an AP this resets the TSF and sets 1388 * up the hardware to notify us when we need to issue beacons. 1389 * 1390 * When operating in station mode this sets up the beacon 1391 * timers according to the timestamp of the last received 1392 * beacon and the current TSF, configures PCF and DTIM 1393 * handling, programs the sleep registers so the hardware 1394 * will wakeup in time to receive beacons, and configures 1395 * the beacon miss handling so we'll receive a BMISS 1396 * interrupt when we stop seeing beacons from the AP 1397 * we've associated with. 1398 */ 1399 void 1400 ath_beacon_config(struct ath_softc *sc) 1401 { 1402 #define MS_TO_TU(x) (((x) * 1000) / 1024) 1403 struct ath_hal *ah = sc->sc_ah; 1404 struct ieee80211com *ic = &sc->sc_ic; 1405 struct ieee80211_node *ni = ic->ic_bss; 1406 u_int32_t nexttbtt, intval; 1407 1408 nexttbtt = (LE_READ_4(ni->ni_tstamp + 4) << 22) | 1409 (LE_READ_4(ni->ni_tstamp) >> 10); 1410 intval = MAX(1, ni->ni_intval) & HAL_BEACON_PERIOD; 1411 if (nexttbtt == 0) { /* e.g. for ap mode */ 1412 nexttbtt = intval; 1413 } else if (intval) { 1414 nexttbtt = roundup(nexttbtt, intval); 1415 } 1416 DPRINTF(ATH_DEBUG_BEACON, ("%s: intval %u nexttbtt %u\n", 1417 __func__, ni->ni_intval, nexttbtt)); 1418 if (ic->ic_opmode == IEEE80211_M_STA) { 1419 HAL_BEACON_STATE bs; 1420 1421 /* NB: no PCF support right now */ 1422 bzero(&bs, sizeof(bs)); 1423 bs.bs_intval = intval; 1424 bs.bs_nexttbtt = nexttbtt; 1425 bs.bs_dtimperiod = bs.bs_intval; 1426 bs.bs_nextdtim = nexttbtt; 1427 /* 1428 * Calculate the number of consecutive beacons to miss 1429 * before taking a BMISS interrupt. 1430 * Note that we clamp the result to at most 7 beacons. 1431 */ 1432 bs.bs_bmissthreshold = ic->ic_bmissthres; 1433 if (bs.bs_bmissthreshold > 7) { 1434 bs.bs_bmissthreshold = 7; 1435 } else if (bs.bs_bmissthreshold <= 0) { 1436 bs.bs_bmissthreshold = 1; 1437 } 1438 1439 /* 1440 * Calculate sleep duration. The configuration is 1441 * given in ms. We insure a multiple of the beacon 1442 * period is used. Also, if the sleep duration is 1443 * greater than the DTIM period then it makes senses 1444 * to make it a multiple of that. 1445 * 1446 * XXX fixed at 100ms 1447 */ 1448 bs.bs_sleepduration = 1449 roundup(MS_TO_TU(100), bs.bs_intval); 1450 if (bs.bs_sleepduration > bs.bs_dtimperiod) { 1451 bs.bs_sleepduration = 1452 roundup(bs.bs_sleepduration, bs.bs_dtimperiod); 1453 } 1454 1455 DPRINTF(ATH_DEBUG_BEACON, 1456 ("%s: intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u" 1457 " sleep %u\n" 1458 , __func__ 1459 , bs.bs_intval 1460 , bs.bs_nexttbtt 1461 , bs.bs_dtimperiod 1462 , bs.bs_nextdtim 1463 , bs.bs_bmissthreshold 1464 , bs.bs_sleepduration 1465 )); 1466 ath_hal_set_intr(ah, 0); 1467 ath_hal_set_beacon_timers(ah, &bs, 0/*XXX*/, 0, 0); 1468 sc->sc_imask |= HAL_INT_BMISS; 1469 ath_hal_set_intr(ah, sc->sc_imask); 1470 } 1471 #ifndef IEEE80211_STA_ONLY 1472 else { 1473 ath_hal_set_intr(ah, 0); 1474 if (nexttbtt == intval) 1475 intval |= HAL_BEACON_RESET_TSF; 1476 if (ic->ic_opmode == IEEE80211_M_IBSS) { 1477 /* 1478 * In IBSS mode enable the beacon timers but only 1479 * enable SWBA interrupts if we need to manually 1480 * prepare beacon frames. Otherwise we use a 1481 * self-linked tx descriptor and let the hardware 1482 * deal with things. 1483 */ 1484 intval |= HAL_BEACON_ENA; 1485 if (!sc->sc_veol) 1486 sc->sc_imask |= HAL_INT_SWBA; 1487 } else if (ic->ic_opmode == IEEE80211_M_HOSTAP) { 1488 /* 1489 * In AP mode we enable the beacon timers and 1490 * SWBA interrupts to prepare beacon frames. 1491 */ 1492 intval |= HAL_BEACON_ENA; 1493 sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */ 1494 } 1495 ath_hal_init_beacon(ah, nexttbtt, intval); 1496 ath_hal_set_intr(ah, sc->sc_imask); 1497 /* 1498 * When using a self-linked beacon descriptor in IBBS 1499 * mode load it once here. 1500 */ 1501 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_veol) 1502 ath_beacon_proc(sc, 0); 1503 } 1504 #endif 1505 } 1506 1507 int 1508 ath_desc_alloc(struct ath_softc *sc) 1509 { 1510 int i, bsize, error = -1; 1511 struct ath_desc *ds; 1512 struct ath_buf *bf; 1513 1514 /* allocate descriptors */ 1515 sc->sc_desc_len = sizeof(struct ath_desc) * 1516 (ATH_TXBUF * ATH_TXDESC + ATH_RXBUF + 1); 1517 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_desc_len, PAGE_SIZE, 1518 0, &sc->sc_dseg, 1, &sc->sc_dnseg, 0)) != 0) { 1519 printf("%s: unable to allocate control data, error = %d\n", 1520 sc->sc_dev.dv_xname, error); 1521 goto fail0; 1522 } 1523 1524 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg, 1525 sc->sc_desc_len, (caddr_t *)&sc->sc_desc, BUS_DMA_COHERENT)) != 0) { 1526 printf("%s: unable to map control data, error = %d\n", 1527 sc->sc_dev.dv_xname, error); 1528 goto fail1; 1529 } 1530 1531 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_desc_len, 1, 1532 sc->sc_desc_len, 0, 0, &sc->sc_ddmamap)) != 0) { 1533 printf("%s: unable to create control data DMA map, " 1534 "error = %d\n", sc->sc_dev.dv_xname, error); 1535 goto fail2; 1536 } 1537 1538 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_ddmamap, sc->sc_desc, 1539 sc->sc_desc_len, NULL, 0)) != 0) { 1540 printf("%s: unable to load control data DMA map, error = %d\n", 1541 sc->sc_dev.dv_xname, error); 1542 goto fail3; 1543 } 1544 1545 ds = sc->sc_desc; 1546 sc->sc_desc_paddr = sc->sc_ddmamap->dm_segs[0].ds_addr; 1547 1548 DPRINTF(ATH_DEBUG_XMIT_DESC|ATH_DEBUG_RECV_DESC, 1549 ("ath_desc_alloc: DMA map: %p (%lu) -> %p (%lu)\n", 1550 ds, (u_long)sc->sc_desc_len, 1551 (caddr_t) sc->sc_desc_paddr, /*XXX*/ (u_long) sc->sc_desc_len)); 1552 1553 /* allocate buffers */ 1554 bsize = sizeof(struct ath_buf) * (ATH_TXBUF + ATH_RXBUF + 1); 1555 bf = malloc(bsize, M_DEVBUF, M_NOWAIT | M_ZERO); 1556 if (bf == NULL) { 1557 printf("%s: unable to allocate Tx/Rx buffers\n", 1558 sc->sc_dev.dv_xname); 1559 error = ENOMEM; 1560 goto fail3; 1561 } 1562 sc->sc_bufptr = bf; 1563 1564 TAILQ_INIT(&sc->sc_rxbuf); 1565 for (i = 0; i < ATH_RXBUF; i++, bf++, ds++) { 1566 bf->bf_desc = ds; 1567 bf->bf_daddr = sc->sc_desc_paddr + 1568 ((caddr_t)ds - (caddr_t)sc->sc_desc); 1569 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1570 MCLBYTES, 0, 0, &bf->bf_dmamap)) != 0) { 1571 printf("%s: unable to create Rx dmamap, error = %d\n", 1572 sc->sc_dev.dv_xname, error); 1573 goto fail4; 1574 } 1575 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 1576 } 1577 1578 TAILQ_INIT(&sc->sc_txbuf); 1579 for (i = 0; i < ATH_TXBUF; i++, bf++, ds += ATH_TXDESC) { 1580 bf->bf_desc = ds; 1581 bf->bf_daddr = sc->sc_desc_paddr + 1582 ((caddr_t)ds - (caddr_t)sc->sc_desc); 1583 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1584 ATH_TXDESC, MCLBYTES, 0, 0, &bf->bf_dmamap)) != 0) { 1585 printf("%s: unable to create Tx dmamap, error = %d\n", 1586 sc->sc_dev.dv_xname, error); 1587 goto fail5; 1588 } 1589 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 1590 } 1591 TAILQ_INIT(&sc->sc_txq); 1592 1593 /* beacon buffer */ 1594 bf->bf_desc = ds; 1595 bf->bf_daddr = sc->sc_desc_paddr + ((caddr_t)ds - (caddr_t)sc->sc_desc); 1596 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 0, 1597 &bf->bf_dmamap)) != 0) { 1598 printf("%s: unable to create beacon dmamap, error = %d\n", 1599 sc->sc_dev.dv_xname, error); 1600 goto fail5; 1601 } 1602 sc->sc_bcbuf = bf; 1603 return 0; 1604 1605 fail5: 1606 for (i = ATH_RXBUF; i < ATH_RXBUF + ATH_TXBUF; i++) { 1607 if (sc->sc_bufptr[i].bf_dmamap == NULL) 1608 continue; 1609 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bufptr[i].bf_dmamap); 1610 } 1611 fail4: 1612 for (i = 0; i < ATH_RXBUF; i++) { 1613 if (sc->sc_bufptr[i].bf_dmamap == NULL) 1614 continue; 1615 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bufptr[i].bf_dmamap); 1616 } 1617 fail3: 1618 bus_dmamap_unload(sc->sc_dmat, sc->sc_ddmamap); 1619 fail2: 1620 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ddmamap); 1621 sc->sc_ddmamap = NULL; 1622 fail1: 1623 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_desc, sc->sc_desc_len); 1624 fail0: 1625 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg); 1626 return error; 1627 } 1628 1629 void 1630 ath_desc_free(struct ath_softc *sc) 1631 { 1632 struct ath_buf *bf; 1633 1634 bus_dmamap_unload(sc->sc_dmat, sc->sc_ddmamap); 1635 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ddmamap); 1636 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg); 1637 1638 TAILQ_FOREACH(bf, &sc->sc_txq, bf_list) { 1639 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1640 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 1641 m_freem(bf->bf_m); 1642 } 1643 TAILQ_FOREACH(bf, &sc->sc_txbuf, bf_list) 1644 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 1645 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 1646 if (bf->bf_m) { 1647 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1648 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 1649 m_freem(bf->bf_m); 1650 bf->bf_m = NULL; 1651 } 1652 } 1653 if (sc->sc_bcbuf != NULL) { 1654 bus_dmamap_unload(sc->sc_dmat, sc->sc_bcbuf->bf_dmamap); 1655 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bcbuf->bf_dmamap); 1656 sc->sc_bcbuf = NULL; 1657 } 1658 1659 TAILQ_INIT(&sc->sc_rxbuf); 1660 TAILQ_INIT(&sc->sc_txbuf); 1661 TAILQ_INIT(&sc->sc_txq); 1662 free(sc->sc_bufptr, M_DEVBUF, 0); 1663 sc->sc_bufptr = NULL; 1664 } 1665 1666 struct ieee80211_node * 1667 ath_node_alloc(struct ieee80211com *ic) 1668 { 1669 struct ath_node *an; 1670 1671 an = malloc(sizeof(*an), M_DEVBUF, M_NOWAIT | M_ZERO); 1672 if (an) { 1673 int i; 1674 for (i = 0; i < ATH_RHIST_SIZE; i++) 1675 an->an_rx_hist[i].arh_ticks = ATH_RHIST_NOTIME; 1676 an->an_rx_hist_next = ATH_RHIST_SIZE-1; 1677 return &an->an_node; 1678 } else 1679 return NULL; 1680 } 1681 1682 void 1683 ath_node_free(struct ieee80211com *ic, struct ieee80211_node *ni) 1684 { 1685 struct ath_softc *sc = ic->ic_if.if_softc; 1686 struct ath_buf *bf; 1687 1688 TAILQ_FOREACH(bf, &sc->sc_txq, bf_list) { 1689 if (bf->bf_node == ni) 1690 bf->bf_node = NULL; 1691 } 1692 (*sc->sc_node_free)(ic, ni); 1693 } 1694 1695 void 1696 ath_node_copy(struct ieee80211com *ic, 1697 struct ieee80211_node *dst, const struct ieee80211_node *src) 1698 { 1699 struct ath_softc *sc = ic->ic_if.if_softc; 1700 1701 bcopy(&src[1], &dst[1], 1702 sizeof(struct ath_node) - sizeof(struct ieee80211_node)); 1703 (*sc->sc_node_copy)(ic, dst, src); 1704 } 1705 1706 u_int8_t 1707 ath_node_getrssi(struct ieee80211com *ic, const struct ieee80211_node *ni) 1708 { 1709 const struct ath_node *an = ATH_NODE(ni); 1710 int i, now, nsamples, rssi; 1711 1712 /* 1713 * Calculate the average over the last second of sampled data. 1714 */ 1715 now = ATH_TICKS(); 1716 nsamples = 0; 1717 rssi = 0; 1718 i = an->an_rx_hist_next; 1719 do { 1720 const struct ath_recv_hist *rh = &an->an_rx_hist[i]; 1721 if (rh->arh_ticks == ATH_RHIST_NOTIME) 1722 goto done; 1723 if (now - rh->arh_ticks > hz) 1724 goto done; 1725 rssi += rh->arh_rssi; 1726 nsamples++; 1727 if (i == 0) { 1728 i = ATH_RHIST_SIZE-1; 1729 } else { 1730 i--; 1731 } 1732 } while (i != an->an_rx_hist_next); 1733 done: 1734 /* 1735 * Return either the average or the last known 1736 * value if there is no recent data. 1737 */ 1738 return (nsamples ? rssi / nsamples : an->an_rx_hist[i].arh_rssi); 1739 } 1740 1741 int 1742 ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 1743 { 1744 struct ath_hal *ah = sc->sc_ah; 1745 int error; 1746 struct mbuf *m; 1747 struct ath_desc *ds; 1748 1749 m = bf->bf_m; 1750 if (m == NULL) { 1751 /* 1752 * NB: by assigning a page to the rx dma buffer we 1753 * implicitly satisfy the Atheros requirement that 1754 * this buffer be cache-line-aligned and sized to be 1755 * multiple of the cache line size. Not doing this 1756 * causes weird stuff to happen (for the 5210 at least). 1757 */ 1758 m = ath_getmbuf(M_DONTWAIT, MT_DATA, MCLBYTES); 1759 if (m == NULL) { 1760 DPRINTF(ATH_DEBUG_ANY, 1761 ("%s: no mbuf/cluster\n", __func__)); 1762 sc->sc_stats.ast_rx_nombuf++; 1763 return ENOMEM; 1764 } 1765 bf->bf_m = m; 1766 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 1767 1768 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m, 1769 BUS_DMA_NOWAIT); 1770 if (error != 0) { 1771 DPRINTF(ATH_DEBUG_ANY, 1772 ("%s: ath_bus_dmamap_load_mbuf failed;" 1773 " error %d\n", __func__, error)); 1774 sc->sc_stats.ast_rx_busdma++; 1775 return error; 1776 } 1777 KASSERT(bf->bf_nseg == 1, 1778 ("ath_rxbuf_init: multi-segment packet; nseg %u", 1779 bf->bf_nseg)); 1780 } 1781 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 1782 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1783 1784 /* 1785 * Setup descriptors. For receive we always terminate 1786 * the descriptor list with a self-linked entry so we'll 1787 * not get overrun under high load (as can happen with a 1788 * 5212 when ANI processing enables PHY errors). 1789 * 1790 * To insure the last descriptor is self-linked we create 1791 * each descriptor as self-linked and add it to the end. As 1792 * each additional descriptor is added the previous self-linked 1793 * entry is ``fixed'' naturally. This should be safe even 1794 * if DMA is happening. When processing RX interrupts we 1795 * never remove/process the last, self-linked, entry on the 1796 * descriptor list. This insures the hardware always has 1797 * someplace to write a new frame. 1798 */ 1799 ds = bf->bf_desc; 1800 bzero(ds, sizeof(struct ath_desc)); 1801 #ifndef IEEE80211_STA_ONLY 1802 if (sc->sc_ic.ic_opmode != IEEE80211_M_HOSTAP) 1803 ds->ds_link = bf->bf_daddr; /* link to self */ 1804 #endif 1805 ds->ds_data = bf->bf_segs[0].ds_addr; 1806 ath_hal_setup_rx_desc(ah, ds 1807 , m->m_len /* buffer size */ 1808 , 0 1809 ); 1810 1811 if (sc->sc_rxlink != NULL) 1812 *sc->sc_rxlink = bf->bf_daddr; 1813 sc->sc_rxlink = &ds->ds_link; 1814 return 0; 1815 } 1816 1817 void 1818 ath_rx_proc(void *arg, int npending) 1819 { 1820 #define PA2DESC(_sc, _pa) \ 1821 ((struct ath_desc *)((caddr_t)(_sc)->sc_desc + \ 1822 ((_pa) - (_sc)->sc_desc_paddr))) 1823 struct ath_softc *sc = arg; 1824 struct ath_buf *bf; 1825 struct ieee80211com *ic = &sc->sc_ic; 1826 struct ifnet *ifp = &ic->ic_if; 1827 struct ath_hal *ah = sc->sc_ah; 1828 struct ath_desc *ds; 1829 struct mbuf *m; 1830 struct ieee80211_frame *wh; 1831 struct ieee80211_frame whbuf; 1832 struct ieee80211_rxinfo rxi; 1833 struct ieee80211_node *ni; 1834 struct ath_node *an; 1835 struct ath_recv_hist *rh; 1836 int len; 1837 u_int phyerr; 1838 HAL_STATUS status; 1839 1840 DPRINTF(ATH_DEBUG_RX_PROC, ("%s: pending %u\n", __func__, npending)); 1841 do { 1842 bf = TAILQ_FIRST(&sc->sc_rxbuf); 1843 if (bf == NULL) { /* NB: shouldn't happen */ 1844 printf("%s: ath_rx_proc: no buffer!\n", ifp->if_xname); 1845 break; 1846 } 1847 ds = bf->bf_desc; 1848 if (ds->ds_link == bf->bf_daddr) { 1849 /* NB: never process the self-linked entry at the end */ 1850 break; 1851 } 1852 m = bf->bf_m; 1853 if (m == NULL) { /* NB: shouldn't happen */ 1854 printf("%s: ath_rx_proc: no mbuf!\n", ifp->if_xname); 1855 continue; 1856 } 1857 /* XXX sync descriptor memory */ 1858 /* 1859 * Must provide the virtual address of the current 1860 * descriptor, the physical address, and the virtual 1861 * address of the next descriptor in the h/w chain. 1862 * This allows the HAL to look ahead to see if the 1863 * hardware is done with a descriptor by checking the 1864 * done bit in the following descriptor and the address 1865 * of the current descriptor the DMA engine is working 1866 * on. All this is necessary because of our use of 1867 * a self-linked list to avoid rx overruns. 1868 */ 1869 status = ath_hal_proc_rx_desc(ah, ds, 1870 bf->bf_daddr, PA2DESC(sc, ds->ds_link)); 1871 #ifdef AR_DEBUG 1872 if (ath_debug & ATH_DEBUG_RECV_DESC) 1873 ath_printrxbuf(bf, status == HAL_OK); 1874 #endif 1875 if (status == HAL_EINPROGRESS) 1876 break; 1877 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 1878 1879 if (ds->ds_rxstat.rs_more) { 1880 /* 1881 * Frame spans multiple descriptors; this 1882 * cannot happen yet as we don't support 1883 * jumbograms. If not in monitor mode, 1884 * discard the frame. 1885 */ 1886 1887 /* 1888 * Enable this if you want to see error 1889 * frames in Monitor mode. 1890 */ 1891 #ifdef ERROR_FRAMES 1892 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 1893 /* XXX statistic */ 1894 goto rx_next; 1895 } 1896 #endif 1897 /* fall thru for monitor mode handling... */ 1898 1899 } else if (ds->ds_rxstat.rs_status != 0) { 1900 if (ds->ds_rxstat.rs_status & HAL_RXERR_CRC) 1901 sc->sc_stats.ast_rx_crcerr++; 1902 if (ds->ds_rxstat.rs_status & HAL_RXERR_FIFO) 1903 sc->sc_stats.ast_rx_fifoerr++; 1904 if (ds->ds_rxstat.rs_status & HAL_RXERR_DECRYPT) 1905 sc->sc_stats.ast_rx_badcrypt++; 1906 if (ds->ds_rxstat.rs_status & HAL_RXERR_PHY) { 1907 sc->sc_stats.ast_rx_phyerr++; 1908 phyerr = ds->ds_rxstat.rs_phyerr & 0x1f; 1909 sc->sc_stats.ast_rx_phy[phyerr]++; 1910 } 1911 1912 /* 1913 * reject error frames, we normally don't want 1914 * to see them in monitor mode. 1915 */ 1916 if ((ds->ds_rxstat.rs_status & HAL_RXERR_DECRYPT ) || 1917 (ds->ds_rxstat.rs_status & HAL_RXERR_PHY)) 1918 goto rx_next; 1919 1920 /* 1921 * In monitor mode, allow through packets that 1922 * cannot be decrypted 1923 */ 1924 if ((ds->ds_rxstat.rs_status & ~HAL_RXERR_DECRYPT) || 1925 sc->sc_ic.ic_opmode != IEEE80211_M_MONITOR) 1926 goto rx_next; 1927 } 1928 1929 len = ds->ds_rxstat.rs_datalen; 1930 if (len < IEEE80211_MIN_LEN) { 1931 DPRINTF(ATH_DEBUG_RECV, ("%s: short packet %d\n", 1932 __func__, len)); 1933 sc->sc_stats.ast_rx_tooshort++; 1934 goto rx_next; 1935 } 1936 1937 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 1938 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1939 1940 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1941 bf->bf_m = NULL; 1942 m->m_pkthdr.len = m->m_len = len; 1943 1944 #if NBPFILTER > 0 1945 if (sc->sc_drvbpf) { 1946 struct mbuf mb; 1947 1948 sc->sc_rxtap.wr_flags = IEEE80211_RADIOTAP_F_FCS; 1949 sc->sc_rxtap.wr_rate = 1950 sc->sc_hwmap[ds->ds_rxstat.rs_rate] & 1951 IEEE80211_RATE_VAL; 1952 sc->sc_rxtap.wr_antenna = ds->ds_rxstat.rs_antenna; 1953 sc->sc_rxtap.wr_rssi = ds->ds_rxstat.rs_rssi; 1954 sc->sc_rxtap.wr_max_rssi = ic->ic_max_rssi; 1955 1956 mb.m_data = (caddr_t)&sc->sc_rxtap; 1957 mb.m_len = sc->sc_rxtap_len; 1958 mb.m_next = m; 1959 mb.m_nextpkt = NULL; 1960 mb.m_type = 0; 1961 mb.m_flags = 0; 1962 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN); 1963 } 1964 #endif 1965 m_adj(m, -IEEE80211_CRC_LEN); 1966 wh = mtod(m, struct ieee80211_frame *); 1967 rxi.rxi_flags = 0; 1968 if (!ath_softcrypto && (wh->i_fc[1] & IEEE80211_FC1_WEP)) { 1969 /* 1970 * WEP is decrypted by hardware. Clear WEP bit 1971 * and trim WEP header for ieee80211_input(). 1972 */ 1973 wh->i_fc[1] &= ~IEEE80211_FC1_WEP; 1974 bcopy(wh, &whbuf, sizeof(whbuf)); 1975 m_adj(m, IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN); 1976 wh = mtod(m, struct ieee80211_frame *); 1977 bcopy(&whbuf, wh, sizeof(whbuf)); 1978 /* 1979 * Also trim WEP ICV from the tail. 1980 */ 1981 m_adj(m, -IEEE80211_WEP_CRCLEN); 1982 /* 1983 * The header has probably moved. 1984 */ 1985 wh = mtod(m, struct ieee80211_frame *); 1986 1987 rxi.rxi_flags |= IEEE80211_RXI_HWDEC; 1988 } 1989 1990 /* 1991 * Locate the node for sender, track state, and 1992 * then pass this node (referenced) up to the 802.11 1993 * layer for its use. 1994 */ 1995 ni = ieee80211_find_rxnode(ic, wh); 1996 1997 /* 1998 * Record driver-specific state. 1999 */ 2000 an = ATH_NODE(ni); 2001 if (++(an->an_rx_hist_next) == ATH_RHIST_SIZE) 2002 an->an_rx_hist_next = 0; 2003 rh = &an->an_rx_hist[an->an_rx_hist_next]; 2004 rh->arh_ticks = ATH_TICKS(); 2005 rh->arh_rssi = ds->ds_rxstat.rs_rssi; 2006 rh->arh_antenna = ds->ds_rxstat.rs_antenna; 2007 2008 /* 2009 * Send frame up for processing. 2010 */ 2011 rxi.rxi_rssi = ds->ds_rxstat.rs_rssi; 2012 rxi.rxi_tstamp = ds->ds_rxstat.rs_tstamp; 2013 ieee80211_input(ifp, m, ni, &rxi); 2014 2015 /* Handle the rate adaption */ 2016 ieee80211_rssadapt_input(ic, ni, &an->an_rssadapt, 2017 ds->ds_rxstat.rs_rssi); 2018 2019 /* 2020 * The frame may have caused the node to be marked for 2021 * reclamation (e.g. in response to a DEAUTH message) 2022 * so use release_node here instead of unref_node. 2023 */ 2024 ieee80211_release_node(ic, ni); 2025 2026 rx_next: 2027 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 2028 } while (ath_rxbuf_init(sc, bf) == 0); 2029 2030 ath_hal_set_rx_signal(ah); /* rx signal state monitoring */ 2031 ath_hal_start_rx(ah); /* in case of RXEOL */ 2032 #undef PA2DESC 2033 } 2034 2035 /* 2036 * XXX Size of an ACK control frame in bytes. 2037 */ 2038 #define IEEE80211_ACK_SIZE (2+2+IEEE80211_ADDR_LEN+4) 2039 2040 int 2041 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, 2042 struct ath_buf *bf, struct mbuf *m0) 2043 { 2044 struct ieee80211com *ic = &sc->sc_ic; 2045 struct ath_hal *ah = sc->sc_ah; 2046 struct ifnet *ifp = &sc->sc_ic.ic_if; 2047 int i, error, iswep, hdrlen, pktlen, len, s, tries; 2048 u_int8_t rix, cix, txrate, ctsrate; 2049 struct ath_desc *ds; 2050 struct ieee80211_frame *wh; 2051 struct ieee80211_key *k; 2052 u_int32_t iv; 2053 u_int8_t *ivp; 2054 u_int8_t hdrbuf[sizeof(struct ieee80211_frame) + 2055 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN]; 2056 u_int subtype, flags, ctsduration, antenna; 2057 HAL_PKT_TYPE atype; 2058 const HAL_RATE_TABLE *rt; 2059 HAL_BOOL shortPreamble; 2060 struct ath_node *an; 2061 u_int8_t hwqueue = HAL_TX_QUEUE_ID_DATA_MIN; 2062 2063 wh = mtod(m0, struct ieee80211_frame *); 2064 iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED; 2065 hdrlen = sizeof(struct ieee80211_frame); 2066 pktlen = m0->m_pkthdr.len; 2067 2068 if (ath_softcrypto && iswep) { 2069 k = ieee80211_get_txkey(ic, wh, ni); 2070 if ((m0 = ieee80211_encrypt(ic, m0, k)) == NULL) 2071 return ENOMEM; 2072 wh = mtod(m0, struct ieee80211_frame *); 2073 2074 /* reset len in case we got a new mbuf */ 2075 pktlen = m0->m_pkthdr.len; 2076 } else if (!ath_softcrypto && iswep) { 2077 bcopy(mtod(m0, caddr_t), hdrbuf, hdrlen); 2078 m_adj(m0, hdrlen); 2079 M_PREPEND(m0, sizeof(hdrbuf), M_DONTWAIT); 2080 if (m0 == NULL) { 2081 sc->sc_stats.ast_tx_nombuf++; 2082 return ENOMEM; 2083 } 2084 ivp = hdrbuf + hdrlen; 2085 wh = mtod(m0, struct ieee80211_frame *); 2086 /* 2087 * XXX 2088 * IV must not duplicate during the lifetime of the key. 2089 * But no mechanism to renew keys is defined in IEEE 802.11 2090 * for WEP. And the IV may be duplicated at other stations 2091 * because the session key itself is shared. So we use a 2092 * pseudo random IV for now, though it is not the right way. 2093 * 2094 * NB: Rather than use a strictly random IV we select a 2095 * random one to start and then increment the value for 2096 * each frame. This is an explicit tradeoff between 2097 * overhead and security. Given the basic insecurity of 2098 * WEP this seems worthwhile. 2099 */ 2100 2101 /* 2102 * Skip 'bad' IVs from Fluhrer/Mantin/Shamir: 2103 * (B, 255, N) with 3 <= B < 16 and 0 <= N <= 255 2104 */ 2105 iv = ic->ic_iv; 2106 if ((iv & 0xff00) == 0xff00) { 2107 int B = (iv & 0xff0000) >> 16; 2108 if (3 <= B && B < 16) 2109 iv = (B+1) << 16; 2110 } 2111 ic->ic_iv = iv + 1; 2112 2113 /* 2114 * NB: Preserve byte order of IV for packet 2115 * sniffers; it doesn't matter otherwise. 2116 */ 2117 #if BYTE_ORDER == BIG_ENDIAN 2118 ivp[0] = iv >> 0; 2119 ivp[1] = iv >> 8; 2120 ivp[2] = iv >> 16; 2121 #else 2122 ivp[2] = iv >> 0; 2123 ivp[1] = iv >> 8; 2124 ivp[0] = iv >> 16; 2125 #endif 2126 ivp[3] = ic->ic_wep_txkey << 6; /* Key ID and pad */ 2127 bcopy(hdrbuf, mtod(m0, caddr_t), sizeof(hdrbuf)); 2128 /* 2129 * The length of hdrlen and pktlen must be increased for WEP 2130 */ 2131 len = IEEE80211_WEP_IVLEN + 2132 IEEE80211_WEP_KIDLEN + 2133 IEEE80211_WEP_CRCLEN; 2134 hdrlen += len; 2135 pktlen += len; 2136 } 2137 pktlen += IEEE80211_CRC_LEN; 2138 2139 /* 2140 * Load the DMA map so any coalescing is done. This 2141 * also calculates the number of descriptors we need. 2142 */ 2143 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m0, 2144 BUS_DMA_NOWAIT); 2145 /* 2146 * Discard null packets and check for packets that 2147 * require too many TX descriptors. We try to convert 2148 * the latter to a cluster. 2149 */ 2150 if (error == EFBIG) { /* too many desc's, linearize */ 2151 sc->sc_stats.ast_tx_linear++; 2152 if (m_defrag(m0, M_DONTWAIT)) { 2153 sc->sc_stats.ast_tx_nomcl++; 2154 m_freem(m0); 2155 return ENOMEM; 2156 } 2157 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m0, 2158 BUS_DMA_NOWAIT); 2159 if (error != 0) { 2160 sc->sc_stats.ast_tx_busdma++; 2161 m_freem(m0); 2162 return error; 2163 } 2164 KASSERT(bf->bf_nseg == 1, 2165 ("ath_tx_start: packet not one segment; nseg %u", 2166 bf->bf_nseg)); 2167 } else if (error != 0) { 2168 sc->sc_stats.ast_tx_busdma++; 2169 m_freem(m0); 2170 return error; 2171 } else if (bf->bf_nseg == 0) { /* null packet, discard */ 2172 sc->sc_stats.ast_tx_nodata++; 2173 m_freem(m0); 2174 return EIO; 2175 } 2176 DPRINTF(ATH_DEBUG_XMIT, ("%s: m %p len %u\n", __func__, m0, pktlen)); 2177 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 2178 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 2179 bf->bf_m = m0; 2180 bf->bf_node = ni; /* NB: held reference */ 2181 an = ATH_NODE(ni); 2182 2183 /* setup descriptors */ 2184 ds = bf->bf_desc; 2185 rt = sc->sc_currates; 2186 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 2187 2188 /* 2189 * Calculate Atheros packet type from IEEE80211 packet header 2190 * and setup for rate calculations. 2191 */ 2192 bf->bf_id.id_node = NULL; 2193 atype = HAL_PKT_TYPE_NORMAL; /* default */ 2194 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 2195 case IEEE80211_FC0_TYPE_MGT: 2196 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2197 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) { 2198 atype = HAL_PKT_TYPE_BEACON; 2199 } else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) { 2200 atype = HAL_PKT_TYPE_PROBE_RESP; 2201 } else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) { 2202 atype = HAL_PKT_TYPE_ATIM; 2203 } 2204 rix = 0; /* XXX lowest rate */ 2205 break; 2206 case IEEE80211_FC0_TYPE_CTL: 2207 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2208 if (subtype == IEEE80211_FC0_SUBTYPE_PS_POLL) 2209 atype = HAL_PKT_TYPE_PSPOLL; 2210 rix = 0; /* XXX lowest rate */ 2211 break; 2212 default: 2213 /* remember link conditions for rate adaptation algorithm */ 2214 if (ic->ic_fixed_rate == -1) { 2215 bf->bf_id.id_len = m0->m_pkthdr.len; 2216 bf->bf_id.id_rateidx = ni->ni_txrate; 2217 bf->bf_id.id_node = ni; 2218 bf->bf_id.id_rssi = ath_node_getrssi(ic, ni); 2219 } 2220 ni->ni_txrate = ieee80211_rssadapt_choose(&an->an_rssadapt, 2221 &ni->ni_rates, wh, m0->m_pkthdr.len, ic->ic_fixed_rate, 2222 ifp->if_xname, 0); 2223 rix = sc->sc_rixmap[ni->ni_rates.rs_rates[ni->ni_txrate] & 2224 IEEE80211_RATE_VAL]; 2225 if (rix == 0xff) { 2226 printf("%s: bogus xmit rate 0x%x (idx 0x%x)\n", 2227 ifp->if_xname, ni->ni_rates.rs_rates[ni->ni_txrate], 2228 ni->ni_txrate); 2229 sc->sc_stats.ast_tx_badrate++; 2230 m_freem(m0); 2231 return EIO; 2232 } 2233 break; 2234 } 2235 2236 /* 2237 * NB: the 802.11 layer marks whether or not we should 2238 * use short preamble based on the current mode and 2239 * negotiated parameters. 2240 */ 2241 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 2242 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 2243 txrate = rt->info[rix].rateCode | rt->info[rix].shortPreamble; 2244 shortPreamble = AH_TRUE; 2245 sc->sc_stats.ast_tx_shortpre++; 2246 } else { 2247 txrate = rt->info[rix].rateCode; 2248 shortPreamble = AH_FALSE; 2249 } 2250 2251 /* 2252 * Calculate miscellaneous flags. 2253 */ 2254 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for wep errors */ 2255 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2256 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 2257 sc->sc_stats.ast_tx_noack++; 2258 } else if (pktlen > ic->ic_rtsthreshold) { 2259 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 2260 sc->sc_stats.ast_tx_rts++; 2261 } 2262 2263 /* 2264 * Calculate duration. This logically belongs in the 802.11 2265 * layer but it lacks sufficient information to calculate it. 2266 */ 2267 if ((flags & HAL_TXDESC_NOACK) == 0 && 2268 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 2269 u_int16_t dur; 2270 /* 2271 * XXX not right with fragmentation. 2272 */ 2273 dur = ath_hal_computetxtime(ah, rt, IEEE80211_ACK_SIZE, 2274 rix, shortPreamble); 2275 *((u_int16_t*) wh->i_dur) = htole16(dur); 2276 } 2277 2278 /* 2279 * Calculate RTS/CTS rate and duration if needed. 2280 */ 2281 ctsduration = 0; 2282 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) { 2283 /* 2284 * CTS transmit rate is derived from the transmit rate 2285 * by looking in the h/w rate table. We must also factor 2286 * in whether or not a short preamble is to be used. 2287 */ 2288 cix = rt->info[rix].controlRate; 2289 ctsrate = rt->info[cix].rateCode; 2290 if (shortPreamble) 2291 ctsrate |= rt->info[cix].shortPreamble; 2292 /* 2293 * Compute the transmit duration based on the size 2294 * of an ACK frame. We call into the HAL to do the 2295 * computation since it depends on the characteristics 2296 * of the actual PHY being used. 2297 */ 2298 if (flags & HAL_TXDESC_RTSENA) { /* SIFS + CTS */ 2299 ctsduration += ath_hal_computetxtime(ah, 2300 rt, IEEE80211_ACK_SIZE, cix, shortPreamble); 2301 } 2302 /* SIFS + data */ 2303 ctsduration += ath_hal_computetxtime(ah, 2304 rt, pktlen, rix, shortPreamble); 2305 if ((flags & HAL_TXDESC_NOACK) == 0) { /* SIFS + ACK */ 2306 ctsduration += ath_hal_computetxtime(ah, 2307 rt, IEEE80211_ACK_SIZE, cix, shortPreamble); 2308 } 2309 } else 2310 ctsrate = 0; 2311 2312 /* 2313 * For now use the antenna on which the last good 2314 * frame was received on. We assume this field is 2315 * initialized to 0 which gives us ``auto'' or the 2316 * ``default'' antenna. 2317 */ 2318 if (an->an_tx_antenna) { 2319 antenna = an->an_tx_antenna; 2320 } else { 2321 antenna = an->an_rx_hist[an->an_rx_hist_next].arh_antenna; 2322 } 2323 2324 #if NBPFILTER > 0 2325 if (ic->ic_rawbpf) 2326 bpf_mtap(ic->ic_rawbpf, m0, BPF_DIRECTION_OUT); 2327 2328 if (sc->sc_drvbpf) { 2329 struct mbuf mb; 2330 2331 sc->sc_txtap.wt_flags = 0; 2332 if (shortPreamble) 2333 sc->sc_txtap.wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2334 if (!ath_softcrypto && iswep) 2335 sc->sc_txtap.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2336 sc->sc_txtap.wt_rate = ni->ni_rates.rs_rates[ni->ni_txrate] & 2337 IEEE80211_RATE_VAL; 2338 sc->sc_txtap.wt_txpower = 30; 2339 sc->sc_txtap.wt_antenna = antenna; 2340 sc->sc_txtap.wt_hwqueue = hwqueue; 2341 2342 mb.m_data = (caddr_t)&sc->sc_txtap; 2343 mb.m_len = sc->sc_txtap_len; 2344 mb.m_next = m0; 2345 mb.m_nextpkt = NULL; 2346 mb.m_type = 0; 2347 mb.m_flags = 0; 2348 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT); 2349 } 2350 #endif 2351 2352 /* 2353 * Formulate first tx descriptor with tx controls. 2354 */ 2355 tries = IEEE80211_IS_MULTICAST(wh->i_addr1) ? 1 : 15; 2356 /* XXX check return value? */ 2357 ath_hal_setup_tx_desc(ah, ds 2358 , pktlen /* packet length */ 2359 , hdrlen /* header length */ 2360 , atype /* Atheros packet type */ 2361 , 60 /* txpower XXX */ 2362 , txrate, tries /* series 0 rate/tries */ 2363 , iswep ? sc->sc_ic.ic_wep_txkey : HAL_TXKEYIX_INVALID 2364 , antenna /* antenna mode */ 2365 , flags /* flags */ 2366 , ctsrate /* rts/cts rate */ 2367 , ctsduration /* rts/cts duration */ 2368 ); 2369 #ifdef notyet 2370 ath_hal_setup_xtx_desc(ah, ds 2371 , AH_FALSE /* short preamble */ 2372 , 0, 0 /* series 1 rate/tries */ 2373 , 0, 0 /* series 2 rate/tries */ 2374 , 0, 0 /* series 3 rate/tries */ 2375 ); 2376 #endif 2377 /* 2378 * Fillin the remainder of the descriptor info. 2379 */ 2380 for (i = 0; i < bf->bf_nseg; i++, ds++) { 2381 ds->ds_data = bf->bf_segs[i].ds_addr; 2382 if (i == bf->bf_nseg - 1) { 2383 ds->ds_link = 0; 2384 } else { 2385 ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1); 2386 } 2387 ath_hal_fill_tx_desc(ah, ds 2388 , bf->bf_segs[i].ds_len /* segment length */ 2389 , i == 0 /* first segment */ 2390 , i == bf->bf_nseg - 1 /* last segment */ 2391 ); 2392 DPRINTF(ATH_DEBUG_XMIT, 2393 ("%s: %d: %08x %08x %08x %08x %08x %08x\n", 2394 __func__, i, ds->ds_link, ds->ds_data, 2395 ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1])); 2396 } 2397 2398 /* 2399 * Insert the frame on the outbound list and 2400 * pass it on to the hardware. 2401 */ 2402 s = splnet(); 2403 TAILQ_INSERT_TAIL(&sc->sc_txq, bf, bf_list); 2404 if (sc->sc_txlink == NULL) { 2405 ath_hal_put_tx_buf(ah, sc->sc_txhalq[hwqueue], bf->bf_daddr); 2406 DPRINTF(ATH_DEBUG_XMIT, ("%s: TXDP0 = %p (%p)\n", __func__, 2407 (caddr_t)bf->bf_daddr, bf->bf_desc)); 2408 } else { 2409 *sc->sc_txlink = bf->bf_daddr; 2410 DPRINTF(ATH_DEBUG_XMIT, ("%s: link(%p)=%p (%p)\n", __func__, 2411 sc->sc_txlink, (caddr_t)bf->bf_daddr, bf->bf_desc)); 2412 } 2413 sc->sc_txlink = &bf->bf_desc[bf->bf_nseg - 1].ds_link; 2414 splx(s); 2415 2416 ath_hal_tx_start(ah, sc->sc_txhalq[hwqueue]); 2417 return 0; 2418 } 2419 2420 void 2421 ath_tx_proc(void *arg, int npending) 2422 { 2423 struct ath_softc *sc = arg; 2424 struct ath_hal *ah = sc->sc_ah; 2425 struct ath_buf *bf; 2426 struct ieee80211com *ic = &sc->sc_ic; 2427 struct ifnet *ifp = &ic->ic_if; 2428 struct ath_desc *ds; 2429 struct ieee80211_node *ni; 2430 struct ath_node *an; 2431 int sr, lr, s; 2432 HAL_STATUS status; 2433 2434 for (;;) { 2435 s = splnet(); 2436 bf = TAILQ_FIRST(&sc->sc_txq); 2437 if (bf == NULL) { 2438 sc->sc_txlink = NULL; 2439 splx(s); 2440 break; 2441 } 2442 /* only the last descriptor is needed */ 2443 ds = &bf->bf_desc[bf->bf_nseg - 1]; 2444 status = ath_hal_proc_tx_desc(ah, ds); 2445 #ifdef AR_DEBUG 2446 if (ath_debug & ATH_DEBUG_XMIT_DESC) 2447 ath_printtxbuf(bf, status == HAL_OK); 2448 #endif 2449 if (status == HAL_EINPROGRESS) { 2450 splx(s); 2451 break; 2452 } 2453 TAILQ_REMOVE(&sc->sc_txq, bf, bf_list); 2454 splx(s); 2455 2456 ni = bf->bf_node; 2457 if (ni != NULL) { 2458 an = (struct ath_node *) ni; 2459 if (ds->ds_txstat.ts_status == 0) { 2460 if (bf->bf_id.id_node != NULL) 2461 ieee80211_rssadapt_raise_rate(ic, 2462 &an->an_rssadapt, &bf->bf_id); 2463 an->an_tx_antenna = ds->ds_txstat.ts_antenna; 2464 } else { 2465 if (bf->bf_id.id_node != NULL) 2466 ieee80211_rssadapt_lower_rate(ic, ni, 2467 &an->an_rssadapt, &bf->bf_id); 2468 if (ds->ds_txstat.ts_status & HAL_TXERR_XRETRY) 2469 sc->sc_stats.ast_tx_xretries++; 2470 if (ds->ds_txstat.ts_status & HAL_TXERR_FIFO) 2471 sc->sc_stats.ast_tx_fifoerr++; 2472 if (ds->ds_txstat.ts_status & HAL_TXERR_FILT) 2473 sc->sc_stats.ast_tx_filtered++; 2474 an->an_tx_antenna = 0; /* invalidate */ 2475 } 2476 sr = ds->ds_txstat.ts_shortretry; 2477 lr = ds->ds_txstat.ts_longretry; 2478 sc->sc_stats.ast_tx_shortretry += sr; 2479 sc->sc_stats.ast_tx_longretry += lr; 2480 /* 2481 * Reclaim reference to node. 2482 * 2483 * NB: the node may be reclaimed here if, for example 2484 * this is a DEAUTH message that was sent and the 2485 * node was timed out due to inactivity. 2486 */ 2487 ieee80211_release_node(ic, ni); 2488 } 2489 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, 2490 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2491 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2492 m_freem(bf->bf_m); 2493 bf->bf_m = NULL; 2494 bf->bf_node = NULL; 2495 2496 s = splnet(); 2497 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 2498 splx(s); 2499 } 2500 ifq_clr_oactive(&ifp->if_snd); 2501 sc->sc_tx_timer = 0; 2502 2503 ath_start(ifp); 2504 } 2505 2506 /* 2507 * Drain the transmit queue and reclaim resources. 2508 */ 2509 void 2510 ath_draintxq(struct ath_softc *sc) 2511 { 2512 struct ath_hal *ah = sc->sc_ah; 2513 struct ieee80211com *ic = &sc->sc_ic; 2514 struct ifnet *ifp = &ic->ic_if; 2515 struct ieee80211_node *ni; 2516 struct ath_buf *bf; 2517 int s, i; 2518 2519 /* XXX return value */ 2520 if (!sc->sc_invalid) { 2521 for (i = 0; i <= HAL_TX_QUEUE_ID_DATA_MAX; i++) { 2522 /* don't touch the hardware if marked invalid */ 2523 (void) ath_hal_stop_tx_dma(ah, sc->sc_txhalq[i]); 2524 DPRINTF(ATH_DEBUG_RESET, 2525 ("%s: tx queue %d (%p), link %p\n", __func__, i, 2526 (caddr_t)(u_intptr_t)ath_hal_get_tx_buf(ah, 2527 sc->sc_txhalq[i]), sc->sc_txlink)); 2528 } 2529 (void) ath_hal_stop_tx_dma(ah, sc->sc_bhalq); 2530 DPRINTF(ATH_DEBUG_RESET, 2531 ("%s: beacon queue (%p)\n", __func__, 2532 (caddr_t)(u_intptr_t)ath_hal_get_tx_buf(ah, sc->sc_bhalq))); 2533 } 2534 for (;;) { 2535 s = splnet(); 2536 bf = TAILQ_FIRST(&sc->sc_txq); 2537 if (bf == NULL) { 2538 sc->sc_txlink = NULL; 2539 splx(s); 2540 break; 2541 } 2542 TAILQ_REMOVE(&sc->sc_txq, bf, bf_list); 2543 splx(s); 2544 #ifdef AR_DEBUG 2545 if (ath_debug & ATH_DEBUG_RESET) { 2546 ath_printtxbuf(bf, 2547 ath_hal_proc_tx_desc(ah, bf->bf_desc) == HAL_OK); 2548 } 2549 #endif /* AR_DEBUG */ 2550 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2551 m_freem(bf->bf_m); 2552 bf->bf_m = NULL; 2553 ni = bf->bf_node; 2554 bf->bf_node = NULL; 2555 s = splnet(); 2556 if (ni != NULL) { 2557 /* 2558 * Reclaim node reference. 2559 */ 2560 ieee80211_release_node(ic, ni); 2561 } 2562 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 2563 splx(s); 2564 } 2565 ifq_clr_oactive(&ifp->if_snd); 2566 sc->sc_tx_timer = 0; 2567 } 2568 2569 /* 2570 * Disable the receive h/w in preparation for a reset. 2571 */ 2572 void 2573 ath_stoprecv(struct ath_softc *sc) 2574 { 2575 #define PA2DESC(_sc, _pa) \ 2576 ((struct ath_desc *)((caddr_t)(_sc)->sc_desc + \ 2577 ((_pa) - (_sc)->sc_desc_paddr))) 2578 struct ath_hal *ah = sc->sc_ah; 2579 2580 ath_hal_stop_pcu_recv(ah); /* disable PCU */ 2581 ath_hal_set_rx_filter(ah, 0); /* clear recv filter */ 2582 ath_hal_stop_rx_dma(ah); /* disable DMA engine */ 2583 #ifdef AR_DEBUG 2584 if (ath_debug & ATH_DEBUG_RESET) { 2585 struct ath_buf *bf; 2586 2587 printf("%s: rx queue %p, link %p\n", __func__, 2588 (caddr_t)(u_intptr_t)ath_hal_get_rx_buf(ah), sc->sc_rxlink); 2589 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 2590 struct ath_desc *ds = bf->bf_desc; 2591 if (ath_hal_proc_rx_desc(ah, ds, bf->bf_daddr, 2592 PA2DESC(sc, ds->ds_link)) == HAL_OK) 2593 ath_printrxbuf(bf, 1); 2594 } 2595 } 2596 #endif 2597 sc->sc_rxlink = NULL; /* just in case */ 2598 #undef PA2DESC 2599 } 2600 2601 /* 2602 * Enable the receive h/w following a reset. 2603 */ 2604 int 2605 ath_startrecv(struct ath_softc *sc) 2606 { 2607 struct ath_hal *ah = sc->sc_ah; 2608 struct ath_buf *bf; 2609 2610 sc->sc_rxlink = NULL; 2611 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 2612 int error = ath_rxbuf_init(sc, bf); 2613 if (error != 0) { 2614 DPRINTF(ATH_DEBUG_RECV, 2615 ("%s: ath_rxbuf_init failed %d\n", 2616 __func__, error)); 2617 return error; 2618 } 2619 } 2620 2621 bf = TAILQ_FIRST(&sc->sc_rxbuf); 2622 ath_hal_put_rx_buf(ah, bf->bf_daddr); 2623 ath_hal_start_rx(ah); /* enable recv descriptors */ 2624 ath_mode_init(sc); /* set filters, etc. */ 2625 ath_hal_start_rx_pcu(ah); /* re-enable PCU/DMA engine */ 2626 return 0; 2627 } 2628 2629 /* 2630 * Set/change channels. If the channel is really being changed, 2631 * it's done by resetting the chip. To accomplish this we must 2632 * first cleanup any pending DMA, then restart stuff after a la 2633 * ath_init. 2634 */ 2635 int 2636 ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 2637 { 2638 struct ath_hal *ah = sc->sc_ah; 2639 struct ieee80211com *ic = &sc->sc_ic; 2640 struct ifnet *ifp = &ic->ic_if; 2641 2642 DPRINTF(ATH_DEBUG_ANY, ("%s: %u (%u MHz) -> %u (%u MHz)\n", __func__, 2643 ieee80211_chan2ieee(ic, ic->ic_ibss_chan), 2644 ic->ic_ibss_chan->ic_freq, 2645 ieee80211_chan2ieee(ic, chan), chan->ic_freq)); 2646 if (chan != ic->ic_ibss_chan) { 2647 HAL_STATUS status; 2648 HAL_CHANNEL hchan; 2649 enum ieee80211_phymode mode; 2650 2651 /* 2652 * To switch channels clear any pending DMA operations; 2653 * wait long enough for the RX fifo to drain, reset the 2654 * hardware at the new frequency, and then re-enable 2655 * the relevant bits of the h/w. 2656 */ 2657 ath_hal_set_intr(ah, 0); /* disable interrupts */ 2658 ath_draintxq(sc); /* clear pending tx frames */ 2659 ath_stoprecv(sc); /* turn off frame recv */ 2660 /* 2661 * Convert to a HAL channel description with 2662 * the flags constrained to reflect the current 2663 * operating mode. 2664 */ 2665 hchan.channel = chan->ic_freq; 2666 hchan.channelFlags = ath_chan2flags(ic, chan); 2667 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, AH_TRUE, 2668 &status)) { 2669 printf("%s: ath_chan_set: unable to reset " 2670 "channel %u (%u MHz)\n", ifp->if_xname, 2671 ieee80211_chan2ieee(ic, chan), chan->ic_freq); 2672 return EIO; 2673 } 2674 ath_set_slot_time(sc); 2675 /* 2676 * Re-enable rx framework. 2677 */ 2678 if (ath_startrecv(sc) != 0) { 2679 printf("%s: ath_chan_set: unable to restart recv " 2680 "logic\n", ifp->if_xname); 2681 return EIO; 2682 } 2683 2684 #if NBPFILTER > 0 2685 /* 2686 * Update BPF state. 2687 */ 2688 sc->sc_txtap.wt_chan_freq = sc->sc_rxtap.wr_chan_freq = 2689 htole16(chan->ic_freq); 2690 sc->sc_txtap.wt_chan_flags = sc->sc_rxtap.wr_chan_flags = 2691 htole16(chan->ic_flags); 2692 #endif 2693 2694 /* 2695 * Change channels and update the h/w rate map 2696 * if we're switching; e.g. 11a to 11b/g. 2697 */ 2698 ic->ic_ibss_chan = chan; 2699 mode = ieee80211_chan2mode(ic, chan); 2700 if (mode != sc->sc_curmode) 2701 ath_setcurmode(sc, mode); 2702 2703 /* 2704 * Re-enable interrupts. 2705 */ 2706 ath_hal_set_intr(ah, sc->sc_imask); 2707 } 2708 return 0; 2709 } 2710 2711 void 2712 ath_next_scan(void *arg) 2713 { 2714 struct ath_softc *sc = arg; 2715 struct ieee80211com *ic = &sc->sc_ic; 2716 struct ifnet *ifp = &ic->ic_if; 2717 int s; 2718 2719 /* don't call ath_start w/o network interrupts blocked */ 2720 s = splnet(); 2721 2722 if (ic->ic_state == IEEE80211_S_SCAN) 2723 ieee80211_next_scan(ifp); 2724 splx(s); 2725 } 2726 2727 int 2728 ath_set_slot_time(struct ath_softc *sc) 2729 { 2730 struct ath_hal *ah = sc->sc_ah; 2731 struct ieee80211com *ic = &sc->sc_ic; 2732 2733 if (ic->ic_flags & IEEE80211_F_SHSLOT) 2734 return (ath_hal_set_slot_time(ah, HAL_SLOT_TIME_9)); 2735 2736 return (0); 2737 } 2738 2739 /* 2740 * Periodically recalibrate the PHY to account 2741 * for temperature/environment changes. 2742 */ 2743 void 2744 ath_calibrate(void *arg) 2745 { 2746 struct ath_softc *sc = arg; 2747 struct ath_hal *ah = sc->sc_ah; 2748 struct ieee80211com *ic = &sc->sc_ic; 2749 struct ieee80211_channel *c; 2750 HAL_CHANNEL hchan; 2751 int s; 2752 2753 sc->sc_stats.ast_per_cal++; 2754 2755 /* 2756 * Convert to a HAL channel description with the flags 2757 * constrained to reflect the current operating mode. 2758 */ 2759 c = ic->ic_ibss_chan; 2760 hchan.channel = c->ic_freq; 2761 hchan.channelFlags = ath_chan2flags(ic, c); 2762 2763 s = splnet(); 2764 DPRINTF(ATH_DEBUG_CALIBRATE, 2765 ("%s: channel %u/%x\n", __func__, c->ic_freq, c->ic_flags)); 2766 2767 if (ath_hal_get_rf_gain(ah) == HAL_RFGAIN_NEED_CHANGE) { 2768 /* 2769 * Rfgain is out of bounds, reset the chip 2770 * to load new gain values. 2771 */ 2772 sc->sc_stats.ast_per_rfgain++; 2773 ath_reset(sc, 1); 2774 } 2775 if (!ath_hal_calibrate(ah, &hchan)) { 2776 DPRINTF(ATH_DEBUG_ANY, 2777 ("%s: calibration of channel %u failed\n", 2778 __func__, c->ic_freq)); 2779 sc->sc_stats.ast_per_calfail++; 2780 } 2781 timeout_add_sec(&sc->sc_cal_to, ath_calinterval); 2782 splx(s); 2783 } 2784 2785 void 2786 ath_ledstate(struct ath_softc *sc, enum ieee80211_state state) 2787 { 2788 HAL_LED_STATE led = HAL_LED_INIT; 2789 u_int32_t softled = AR5K_SOFTLED_OFF; 2790 2791 switch (state) { 2792 case IEEE80211_S_INIT: 2793 break; 2794 case IEEE80211_S_SCAN: 2795 led = HAL_LED_SCAN; 2796 break; 2797 case IEEE80211_S_AUTH: 2798 led = HAL_LED_AUTH; 2799 break; 2800 case IEEE80211_S_ASSOC: 2801 led = HAL_LED_ASSOC; 2802 softled = AR5K_SOFTLED_ON; 2803 break; 2804 case IEEE80211_S_RUN: 2805 led = HAL_LED_RUN; 2806 softled = AR5K_SOFTLED_ON; 2807 break; 2808 } 2809 2810 ath_hal_set_ledstate(sc->sc_ah, led); 2811 if (sc->sc_softled) { 2812 ath_hal_set_gpio_output(sc->sc_ah, AR5K_SOFTLED_PIN); 2813 ath_hal_set_gpio(sc->sc_ah, AR5K_SOFTLED_PIN, softled); 2814 } 2815 } 2816 2817 int 2818 ath_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg) 2819 { 2820 struct ifnet *ifp = &ic->ic_if; 2821 struct ath_softc *sc = ifp->if_softc; 2822 struct ath_hal *ah = sc->sc_ah; 2823 struct ieee80211_node *ni; 2824 const u_int8_t *bssid; 2825 int error, i; 2826 2827 u_int32_t rfilt; 2828 2829 DPRINTF(ATH_DEBUG_ANY, ("%s: %s -> %s\n", __func__, 2830 ieee80211_state_name[ic->ic_state], 2831 ieee80211_state_name[nstate])); 2832 2833 timeout_del(&sc->sc_scan_to); 2834 timeout_del(&sc->sc_cal_to); 2835 ath_ledstate(sc, nstate); 2836 2837 if (nstate == IEEE80211_S_INIT) { 2838 timeout_del(&sc->sc_rssadapt_to); 2839 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 2840 ath_hal_set_intr(ah, sc->sc_imask); 2841 return (*sc->sc_newstate)(ic, nstate, arg); 2842 } 2843 ni = ic->ic_bss; 2844 error = ath_chan_set(sc, ni->ni_chan); 2845 if (error != 0) 2846 goto bad; 2847 rfilt = ath_calcrxfilter(sc); 2848 if (nstate == IEEE80211_S_SCAN || 2849 ic->ic_opmode == IEEE80211_M_MONITOR) { 2850 bssid = sc->sc_broadcast_addr; 2851 } else { 2852 bssid = ni->ni_bssid; 2853 } 2854 ath_hal_set_rx_filter(ah, rfilt); 2855 DPRINTF(ATH_DEBUG_ANY, ("%s: RX filter 0x%x bssid %s\n", 2856 __func__, rfilt, ether_sprintf((u_char*)bssid))); 2857 2858 if (nstate == IEEE80211_S_RUN && ic->ic_opmode == IEEE80211_M_STA) { 2859 ath_hal_set_associd(ah, bssid, ni->ni_associd); 2860 } else { 2861 ath_hal_set_associd(ah, bssid, 0); 2862 } 2863 2864 if (!ath_softcrypto && (ic->ic_flags & IEEE80211_F_WEPON)) { 2865 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 2866 if (ath_hal_is_key_valid(ah, i)) 2867 ath_hal_set_key_lladdr(ah, i, bssid); 2868 } 2869 } 2870 2871 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 2872 /* nothing to do */ 2873 } else if (nstate == IEEE80211_S_RUN) { 2874 DPRINTF(ATH_DEBUG_ANY, ("%s(RUN): " 2875 "ic_flags=0x%08x iv=%d bssid=%s " 2876 "capinfo=0x%04x chan=%d\n", 2877 __func__, 2878 ic->ic_flags, 2879 ni->ni_intval, 2880 ether_sprintf(ni->ni_bssid), 2881 ni->ni_capinfo, 2882 ieee80211_chan2ieee(ic, ni->ni_chan))); 2883 2884 /* 2885 * Allocate and setup the beacon frame for AP or adhoc mode. 2886 */ 2887 #ifndef IEEE80211_STA_ONLY 2888 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 2889 ic->ic_opmode == IEEE80211_M_IBSS) { 2890 error = ath_beacon_alloc(sc, ni); 2891 if (error != 0) 2892 goto bad; 2893 } 2894 #endif 2895 /* 2896 * Configure the beacon and sleep timers. 2897 */ 2898 ath_beacon_config(sc); 2899 } else { 2900 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 2901 ath_hal_set_intr(ah, sc->sc_imask); 2902 } 2903 2904 /* 2905 * Invoke the parent method to complete the work. 2906 */ 2907 error = (*sc->sc_newstate)(ic, nstate, arg); 2908 2909 if (nstate == IEEE80211_S_RUN) { 2910 /* start periodic recalibration timer */ 2911 timeout_add_sec(&sc->sc_cal_to, ath_calinterval); 2912 2913 if (ic->ic_opmode != IEEE80211_M_MONITOR) 2914 timeout_add_msec(&sc->sc_rssadapt_to, 100); 2915 } else if (nstate == IEEE80211_S_SCAN) { 2916 /* start ap/neighbor scan timer */ 2917 timeout_add_msec(&sc->sc_scan_to, ath_dwelltime); 2918 } 2919 bad: 2920 return error; 2921 } 2922 2923 #ifndef IEEE80211_STA_ONLY 2924 void 2925 ath_recv_mgmt(struct ieee80211com *ic, struct mbuf *m, 2926 struct ieee80211_node *ni, struct ieee80211_rxinfo *rxi, int subtype) 2927 { 2928 struct ath_softc *sc = (struct ath_softc*)ic->ic_softc; 2929 struct ath_hal *ah = sc->sc_ah; 2930 2931 (*sc->sc_recv_mgmt)(ic, m, ni, rxi, subtype); 2932 2933 switch (subtype) { 2934 case IEEE80211_FC0_SUBTYPE_PROBE_RESP: 2935 case IEEE80211_FC0_SUBTYPE_BEACON: 2936 if (ic->ic_opmode != IEEE80211_M_IBSS || 2937 ic->ic_state != IEEE80211_S_RUN) 2938 break; 2939 if (ieee80211_ibss_merge(ic, ni, ath_hal_get_tsf64(ah)) == 2940 ENETRESET) 2941 ath_hal_set_associd(ah, ic->ic_bss->ni_bssid, 0); 2942 break; 2943 default: 2944 break; 2945 } 2946 return; 2947 } 2948 #endif 2949 2950 /* 2951 * Setup driver-specific state for a newly associated node. 2952 * Note that we're called also on a re-associate, the isnew 2953 * param tells us if this is the first time or not. 2954 */ 2955 void 2956 ath_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, int isnew) 2957 { 2958 if (ic->ic_opmode == IEEE80211_M_MONITOR) 2959 return; 2960 } 2961 2962 int 2963 ath_getchannels(struct ath_softc *sc, HAL_BOOL outdoor, HAL_BOOL xchanmode) 2964 { 2965 struct ieee80211com *ic = &sc->sc_ic; 2966 struct ifnet *ifp = &ic->ic_if; 2967 struct ath_hal *ah = sc->sc_ah; 2968 HAL_CHANNEL *chans; 2969 int i, ix, nchan; 2970 2971 sc->sc_nchan = 0; 2972 chans = malloc(IEEE80211_CHAN_MAX * sizeof(HAL_CHANNEL), 2973 M_TEMP, M_NOWAIT); 2974 if (chans == NULL) { 2975 printf("%s: unable to allocate channel table\n", ifp->if_xname); 2976 return ENOMEM; 2977 } 2978 if (!ath_hal_init_channels(ah, chans, IEEE80211_CHAN_MAX, &nchan, 2979 HAL_MODE_ALL, outdoor, xchanmode)) { 2980 printf("%s: unable to collect channel list from hal\n", 2981 ifp->if_xname); 2982 free(chans, M_TEMP, 0); 2983 return EINVAL; 2984 } 2985 2986 /* 2987 * Convert HAL channels to ieee80211 ones and insert 2988 * them in the table according to their channel number. 2989 */ 2990 for (i = 0; i < nchan; i++) { 2991 HAL_CHANNEL *c = &chans[i]; 2992 ix = ieee80211_mhz2ieee(c->channel, c->channelFlags); 2993 if (ix > IEEE80211_CHAN_MAX) { 2994 printf("%s: bad hal channel %u (%u/%x) ignored\n", 2995 ifp->if_xname, ix, c->channel, c->channelFlags); 2996 continue; 2997 } 2998 DPRINTF(ATH_DEBUG_ANY, 2999 ("%s: HAL channel %d/%d freq %d flags %#04x idx %d\n", 3000 sc->sc_dev.dv_xname, i, nchan, c->channel, c->channelFlags, 3001 ix)); 3002 /* NB: flags are known to be compatible */ 3003 if (ic->ic_channels[ix].ic_freq == 0) { 3004 ic->ic_channels[ix].ic_freq = c->channel; 3005 ic->ic_channels[ix].ic_flags = c->channelFlags; 3006 } else { 3007 /* channels overlap; e.g. 11g and 11b */ 3008 ic->ic_channels[ix].ic_flags |= c->channelFlags; 3009 } 3010 /* count valid channels */ 3011 sc->sc_nchan++; 3012 } 3013 free(chans, M_TEMP, 0); 3014 3015 if (sc->sc_nchan < 1) { 3016 printf("%s: no valid channels for regdomain %s(%u)\n", 3017 ifp->if_xname, ieee80211_regdomain2name(ah->ah_regdomain), 3018 ah->ah_regdomain); 3019 return ENOENT; 3020 } 3021 3022 /* set an initial channel */ 3023 ic->ic_ibss_chan = &ic->ic_channels[0]; 3024 3025 return 0; 3026 } 3027 3028 int 3029 ath_rate_setup(struct ath_softc *sc, u_int mode) 3030 { 3031 struct ath_hal *ah = sc->sc_ah; 3032 struct ieee80211com *ic = &sc->sc_ic; 3033 const HAL_RATE_TABLE *rt; 3034 struct ieee80211_rateset *rs; 3035 int i, maxrates; 3036 3037 switch (mode) { 3038 case IEEE80211_MODE_11A: 3039 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_11A); 3040 break; 3041 case IEEE80211_MODE_11B: 3042 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_11B); 3043 break; 3044 case IEEE80211_MODE_11G: 3045 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_11G); 3046 break; 3047 default: 3048 DPRINTF(ATH_DEBUG_ANY, 3049 ("%s: invalid mode %u\n", __func__, mode)); 3050 return 0; 3051 } 3052 rt = sc->sc_rates[mode]; 3053 if (rt == NULL) 3054 return 0; 3055 if (rt->rateCount > IEEE80211_RATE_MAXSIZE) { 3056 DPRINTF(ATH_DEBUG_ANY, 3057 ("%s: rate table too small (%u > %u)\n", 3058 __func__, rt->rateCount, IEEE80211_RATE_MAXSIZE)); 3059 maxrates = IEEE80211_RATE_MAXSIZE; 3060 } else { 3061 maxrates = rt->rateCount; 3062 } 3063 rs = &ic->ic_sup_rates[mode]; 3064 for (i = 0; i < maxrates; i++) 3065 rs->rs_rates[i] = rt->info[i].dot11Rate; 3066 rs->rs_nrates = maxrates; 3067 return 1; 3068 } 3069 3070 void 3071 ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 3072 { 3073 const HAL_RATE_TABLE *rt; 3074 struct ieee80211com *ic = &sc->sc_ic; 3075 struct ieee80211_node *ni; 3076 int i; 3077 3078 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 3079 rt = sc->sc_rates[mode]; 3080 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 3081 for (i = 0; i < rt->rateCount; i++) 3082 sc->sc_rixmap[rt->info[i].dot11Rate & IEEE80211_RATE_VAL] = i; 3083 bzero(sc->sc_hwmap, sizeof(sc->sc_hwmap)); 3084 for (i = 0; i < 32; i++) 3085 sc->sc_hwmap[i] = rt->info[rt->rateCodeToIndex[i]].dot11Rate; 3086 sc->sc_currates = rt; 3087 sc->sc_curmode = mode; 3088 ni = ic->ic_bss; 3089 ni->ni_rates.rs_nrates = sc->sc_currates->rateCount; 3090 if (ni->ni_txrate >= ni->ni_rates.rs_nrates) 3091 ni->ni_txrate = 0; 3092 } 3093 3094 void 3095 ath_rssadapt_updatenode(void *arg, struct ieee80211_node *ni) 3096 { 3097 struct ath_node *an = ATH_NODE(ni); 3098 3099 ieee80211_rssadapt_updatestats(&an->an_rssadapt); 3100 } 3101 3102 void 3103 ath_rssadapt_updatestats(void *arg) 3104 { 3105 struct ath_softc *sc = (struct ath_softc *)arg; 3106 struct ieee80211com *ic = &sc->sc_ic; 3107 3108 if (ic->ic_opmode == IEEE80211_M_STA) { 3109 ath_rssadapt_updatenode(arg, ic->ic_bss); 3110 } else { 3111 ieee80211_iterate_nodes(ic, ath_rssadapt_updatenode, arg); 3112 } 3113 3114 timeout_add_msec(&sc->sc_rssadapt_to, 100); 3115 } 3116 3117 #ifdef AR_DEBUG 3118 void 3119 ath_printrxbuf(struct ath_buf *bf, int done) 3120 { 3121 struct ath_desc *ds; 3122 int i; 3123 3124 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { 3125 printf("R%d (%p %p) %08x %08x %08x %08x %08x %08x %c\n", 3126 i, ds, (struct ath_desc *)bf->bf_daddr + i, 3127 ds->ds_link, ds->ds_data, 3128 ds->ds_ctl0, ds->ds_ctl1, 3129 ds->ds_hw[0], ds->ds_hw[1], 3130 !done ? ' ' : (ds->ds_rxstat.rs_status == 0) ? '*' : '!'); 3131 } 3132 } 3133 3134 void 3135 ath_printtxbuf(struct ath_buf *bf, int done) 3136 { 3137 struct ath_desc *ds; 3138 int i; 3139 3140 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { 3141 printf("T%d (%p %p) " 3142 "%08x %08x %08x %08x %08x %08x %08x %08x %c\n", 3143 i, ds, (struct ath_desc *)bf->bf_daddr + i, 3144 ds->ds_link, ds->ds_data, 3145 ds->ds_ctl0, ds->ds_ctl1, 3146 ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3], 3147 !done ? ' ' : (ds->ds_txstat.ts_status == 0) ? '*' : '!'); 3148 } 3149 } 3150 #endif /* AR_DEBUG */ 3151 3152 int 3153 ath_gpio_attach(struct ath_softc *sc, u_int16_t devid) 3154 { 3155 struct ath_hal *ah = sc->sc_ah; 3156 struct gpiobus_attach_args gba; 3157 int i; 3158 3159 if (ah->ah_gpio_npins < 1) 3160 return 0; 3161 3162 /* Initialize gpio pins array */ 3163 for (i = 0; i < ah->ah_gpio_npins && i < AR5K_MAX_GPIO; i++) { 3164 sc->sc_gpio_pins[i].pin_num = i; 3165 sc->sc_gpio_pins[i].pin_caps = GPIO_PIN_INPUT | 3166 GPIO_PIN_OUTPUT; 3167 3168 /* Set pin mode to input */ 3169 ath_hal_set_gpio_input(ah, i); 3170 sc->sc_gpio_pins[i].pin_flags = GPIO_PIN_INPUT; 3171 3172 /* Get pin input */ 3173 sc->sc_gpio_pins[i].pin_state = ath_hal_get_gpio(ah, i) ? 3174 GPIO_PIN_HIGH : GPIO_PIN_LOW; 3175 } 3176 3177 /* Enable GPIO-controlled software LED if available */ 3178 if ((ah->ah_version == AR5K_AR5211) || 3179 (devid == PCI_PRODUCT_ATHEROS_AR5212_IBM)) { 3180 sc->sc_softled = 1; 3181 ath_hal_set_gpio_output(ah, AR5K_SOFTLED_PIN); 3182 ath_hal_set_gpio(ah, AR5K_SOFTLED_PIN, AR5K_SOFTLED_OFF); 3183 } 3184 3185 /* Create gpio controller tag */ 3186 sc->sc_gpio_gc.gp_cookie = sc; 3187 sc->sc_gpio_gc.gp_pin_read = ath_gpio_pin_read; 3188 sc->sc_gpio_gc.gp_pin_write = ath_gpio_pin_write; 3189 sc->sc_gpio_gc.gp_pin_ctl = ath_gpio_pin_ctl; 3190 3191 gba.gba_name = "gpio"; 3192 gba.gba_gc = &sc->sc_gpio_gc; 3193 gba.gba_pins = sc->sc_gpio_pins; 3194 gba.gba_npins = ah->ah_gpio_npins; 3195 3196 #ifdef notyet 3197 #if NGPIO > 0 3198 if (config_found(&sc->sc_dev, &gba, gpiobus_print) == NULL) 3199 return (ENODEV); 3200 #endif 3201 #endif 3202 3203 return (0); 3204 } 3205 3206 int 3207 ath_gpio_pin_read(void *arg, int pin) 3208 { 3209 struct ath_softc *sc = arg; 3210 struct ath_hal *ah = sc->sc_ah; 3211 return (ath_hal_get_gpio(ah, pin) ? GPIO_PIN_HIGH : GPIO_PIN_LOW); 3212 } 3213 3214 void 3215 ath_gpio_pin_write(void *arg, int pin, int value) 3216 { 3217 struct ath_softc *sc = arg; 3218 struct ath_hal *ah = sc->sc_ah; 3219 ath_hal_set_gpio(ah, pin, value ? GPIO_PIN_HIGH : GPIO_PIN_LOW); 3220 } 3221 3222 void 3223 ath_gpio_pin_ctl(void *arg, int pin, int flags) 3224 { 3225 struct ath_softc *sc = arg; 3226 struct ath_hal *ah = sc->sc_ah; 3227 3228 if (flags & GPIO_PIN_INPUT) { 3229 ath_hal_set_gpio_input(ah, pin); 3230 } else if (flags & GPIO_PIN_OUTPUT) { 3231 ath_hal_set_gpio_output(ah, pin); 3232 } 3233 } 3234