1 /* $OpenBSD: if_iwn.c,v 1.112 2011/09/02 18:49:36 kettenis Exp $ */ 2 3 /*- 4 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /* 20 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network 21 * adapters. 22 */ 23 24 #include "bpfilter.h" 25 26 #include <sys/param.h> 27 #include <sys/sockio.h> 28 #include <sys/mbuf.h> 29 #include <sys/kernel.h> 30 #include <sys/socket.h> 31 #include <sys/systm.h> 32 #include <sys/malloc.h> 33 #include <sys/conf.h> 34 #include <sys/device.h> 35 #include <sys/workq.h> 36 37 #include <machine/bus.h> 38 #include <machine/endian.h> 39 #include <machine/intr.h> 40 41 #include <dev/pci/pcireg.h> 42 #include <dev/pci/pcivar.h> 43 #include <dev/pci/pcidevs.h> 44 45 #if NBPFILTER > 0 46 #include <net/bpf.h> 47 #endif 48 #include <net/if.h> 49 #include <net/if_arp.h> 50 #include <net/if_dl.h> 51 #include <net/if_media.h> 52 #include <net/if_types.h> 53 54 #include <netinet/in.h> 55 #include <netinet/in_systm.h> 56 #include <netinet/in_var.h> 57 #include <netinet/if_ether.h> 58 #include <netinet/ip.h> 59 60 #include <net80211/ieee80211_var.h> 61 #include <net80211/ieee80211_amrr.h> 62 #include <net80211/ieee80211_radiotap.h> 63 64 #include <dev/pci/if_iwnreg.h> 65 #include <dev/pci/if_iwnvar.h> 66 67 static const struct pci_matchid iwn_devices[] = { 68 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_4965_1 }, 69 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_4965_2 }, 70 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5100_1 }, 71 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5100_2 }, 72 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5150_1 }, 73 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5150_2 }, 74 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5300_1 }, 75 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5300_2 }, 76 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5350_1 }, 77 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5350_2 }, 78 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_1000_1 }, 79 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_1000_2 }, 80 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6300_1 }, 81 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6300_2 }, 82 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6200_1 }, 83 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6200_2 }, 84 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6050_1 }, 85 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6050_2 }, 86 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6005_1 }, 87 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6005_2 }, 88 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6030_1 }, 89 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6030_2 }, 90 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_1030_1 }, 91 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_1030_2 }, 92 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_100_1 }, 93 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_100_2 } 94 }; 95 96 int iwn_match(struct device *, void *, void *); 97 void iwn_attach(struct device *, struct device *, void *); 98 int iwn4965_attach(struct iwn_softc *, pci_product_id_t); 99 int iwn5000_attach(struct iwn_softc *, pci_product_id_t); 100 #if NBPFILTER > 0 101 void iwn_radiotap_attach(struct iwn_softc *); 102 #endif 103 int iwn_detach(struct device *, int); 104 int iwn_activate(struct device *, int); 105 void iwn_resume(void *, void *); 106 int iwn_nic_lock(struct iwn_softc *); 107 int iwn_eeprom_lock(struct iwn_softc *); 108 int iwn_init_otprom(struct iwn_softc *); 109 int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int); 110 int iwn_dma_contig_alloc(bus_dma_tag_t, struct iwn_dma_info *, 111 void **, bus_size_t, bus_size_t); 112 void iwn_dma_contig_free(struct iwn_dma_info *); 113 int iwn_alloc_sched(struct iwn_softc *); 114 void iwn_free_sched(struct iwn_softc *); 115 int iwn_alloc_kw(struct iwn_softc *); 116 void iwn_free_kw(struct iwn_softc *); 117 int iwn_alloc_ict(struct iwn_softc *); 118 void iwn_free_ict(struct iwn_softc *); 119 int iwn_alloc_fwmem(struct iwn_softc *); 120 void iwn_free_fwmem(struct iwn_softc *); 121 int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 122 void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 123 void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 124 int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *, 125 int); 126 void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 127 void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 128 void iwn5000_ict_reset(struct iwn_softc *); 129 int iwn_read_eeprom(struct iwn_softc *); 130 void iwn4965_read_eeprom(struct iwn_softc *); 131 void iwn4965_print_power_group(struct iwn_softc *, int); 132 void iwn5000_read_eeprom(struct iwn_softc *); 133 void iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t); 134 void iwn_read_eeprom_enhinfo(struct iwn_softc *); 135 struct ieee80211_node *iwn_node_alloc(struct ieee80211com *); 136 void iwn_newassoc(struct ieee80211com *, struct ieee80211_node *, 137 int); 138 int iwn_media_change(struct ifnet *); 139 int iwn_newstate(struct ieee80211com *, enum ieee80211_state, int); 140 void iwn_iter_func(void *, struct ieee80211_node *); 141 void iwn_calib_timeout(void *); 142 int iwn_ccmp_decap(struct iwn_softc *, struct mbuf *, 143 struct ieee80211_key *); 144 void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *, 145 struct iwn_rx_data *); 146 void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *, 147 struct iwn_rx_data *); 148 #ifndef IEEE80211_NO_HT 149 void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *, 150 struct iwn_rx_data *); 151 #endif 152 void iwn5000_rx_calib_results(struct iwn_softc *, 153 struct iwn_rx_desc *, struct iwn_rx_data *); 154 void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *, 155 struct iwn_rx_data *); 156 void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 157 struct iwn_rx_data *); 158 void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 159 struct iwn_rx_data *); 160 void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int, 161 uint8_t); 162 void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *); 163 void iwn_notif_intr(struct iwn_softc *); 164 void iwn_wakeup_intr(struct iwn_softc *); 165 void iwn_fatal_intr(struct iwn_softc *); 166 int iwn_intr(void *); 167 void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t, 168 uint16_t); 169 void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t, 170 uint16_t); 171 void iwn5000_reset_sched(struct iwn_softc *, int, int); 172 int iwn_tx(struct iwn_softc *, struct mbuf *, 173 struct ieee80211_node *); 174 void iwn_start(struct ifnet *); 175 void iwn_watchdog(struct ifnet *); 176 int iwn_ioctl(struct ifnet *, u_long, caddr_t); 177 int iwn_cmd(struct iwn_softc *, int, const void *, int, int); 178 int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *, 179 int); 180 int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *, 181 int); 182 int iwn_set_link_quality(struct iwn_softc *, 183 struct ieee80211_node *); 184 int iwn_add_broadcast_node(struct iwn_softc *, int); 185 void iwn_updateedca(struct ieee80211com *); 186 void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t); 187 int iwn_set_critical_temp(struct iwn_softc *); 188 int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *); 189 void iwn4965_power_calibration(struct iwn_softc *, int); 190 int iwn4965_set_txpower(struct iwn_softc *, int); 191 int iwn5000_set_txpower(struct iwn_softc *, int); 192 int iwn4965_get_rssi(const struct iwn_rx_stat *); 193 int iwn5000_get_rssi(const struct iwn_rx_stat *); 194 int iwn_get_noise(const struct iwn_rx_general_stats *); 195 int iwn4965_get_temperature(struct iwn_softc *); 196 int iwn5000_get_temperature(struct iwn_softc *); 197 int iwn_init_sensitivity(struct iwn_softc *); 198 void iwn_collect_noise(struct iwn_softc *, 199 const struct iwn_rx_general_stats *); 200 int iwn4965_init_gains(struct iwn_softc *); 201 int iwn5000_init_gains(struct iwn_softc *); 202 int iwn4965_set_gains(struct iwn_softc *); 203 int iwn5000_set_gains(struct iwn_softc *); 204 void iwn_tune_sensitivity(struct iwn_softc *, 205 const struct iwn_rx_stats *); 206 int iwn_send_sensitivity(struct iwn_softc *); 207 int iwn_set_pslevel(struct iwn_softc *, int, int, int); 208 int iwn_send_temperature_offset(struct iwn_softc *); 209 int iwn_send_btcoex(struct iwn_softc *); 210 int iwn5000_runtime_calib(struct iwn_softc *); 211 int iwn_config(struct iwn_softc *); 212 int iwn_scan(struct iwn_softc *, uint16_t); 213 int iwn_auth(struct iwn_softc *); 214 int iwn_run(struct iwn_softc *); 215 int iwn_set_key(struct ieee80211com *, struct ieee80211_node *, 216 struct ieee80211_key *); 217 void iwn_delete_key(struct ieee80211com *, struct ieee80211_node *, 218 struct ieee80211_key *); 219 #ifndef IEEE80211_NO_HT 220 int iwn_ampdu_rx_start(struct ieee80211com *, 221 struct ieee80211_node *, uint8_t); 222 void iwn_ampdu_rx_stop(struct ieee80211com *, 223 struct ieee80211_node *, uint8_t); 224 int iwn_ampdu_tx_start(struct ieee80211com *, 225 struct ieee80211_node *, uint8_t); 226 void iwn_ampdu_tx_stop(struct ieee80211com *, 227 struct ieee80211_node *, uint8_t); 228 void iwn4965_ampdu_tx_start(struct iwn_softc *, 229 struct ieee80211_node *, uint8_t, uint16_t); 230 void iwn4965_ampdu_tx_stop(struct iwn_softc *, 231 uint8_t, uint16_t); 232 void iwn5000_ampdu_tx_start(struct iwn_softc *, 233 struct ieee80211_node *, uint8_t, uint16_t); 234 void iwn5000_ampdu_tx_stop(struct iwn_softc *, 235 uint8_t, uint16_t); 236 #endif 237 int iwn5000_query_calibration(struct iwn_softc *); 238 int iwn5000_send_calibration(struct iwn_softc *); 239 int iwn5000_send_wimax_coex(struct iwn_softc *); 240 int iwn5000_crystal_calib(struct iwn_softc *); 241 int iwn5000_temp_offset_calib(struct iwn_softc *); 242 int iwn4965_post_alive(struct iwn_softc *); 243 int iwn5000_post_alive(struct iwn_softc *); 244 int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *, 245 int); 246 int iwn4965_load_firmware(struct iwn_softc *); 247 int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t, 248 const uint8_t *, int); 249 int iwn5000_load_firmware(struct iwn_softc *); 250 int iwn_read_firmware_leg(struct iwn_softc *, 251 struct iwn_fw_info *); 252 int iwn_read_firmware_tlv(struct iwn_softc *, 253 struct iwn_fw_info *, uint16_t); 254 int iwn_read_firmware(struct iwn_softc *); 255 int iwn_clock_wait(struct iwn_softc *); 256 int iwn_apm_init(struct iwn_softc *); 257 void iwn_apm_stop_master(struct iwn_softc *); 258 void iwn_apm_stop(struct iwn_softc *); 259 int iwn4965_nic_config(struct iwn_softc *); 260 int iwn5000_nic_config(struct iwn_softc *); 261 int iwn_hw_prepare(struct iwn_softc *); 262 int iwn_hw_init(struct iwn_softc *); 263 void iwn_hw_stop(struct iwn_softc *); 264 int iwn_init(struct ifnet *); 265 void iwn_stop(struct ifnet *, int); 266 267 #ifdef IWN_DEBUG 268 #define DPRINTF(x) do { if (iwn_debug > 0) printf x; } while (0) 269 #define DPRINTFN(n, x) do { if (iwn_debug >= (n)) printf x; } while (0) 270 int iwn_debug = 0; 271 #else 272 #define DPRINTF(x) 273 #define DPRINTFN(n, x) 274 #endif 275 276 struct cfdriver iwn_cd = { 277 NULL, "iwn", DV_IFNET 278 }; 279 280 struct cfattach iwn_ca = { 281 sizeof (struct iwn_softc), iwn_match, iwn_attach, iwn_detach, 282 iwn_activate 283 }; 284 285 int 286 iwn_match(struct device *parent, void *match, void *aux) 287 { 288 return pci_matchbyid((struct pci_attach_args *)aux, iwn_devices, 289 nitems(iwn_devices)); 290 } 291 292 void 293 iwn_attach(struct device *parent, struct device *self, void *aux) 294 { 295 struct iwn_softc *sc = (struct iwn_softc *)self; 296 struct ieee80211com *ic = &sc->sc_ic; 297 struct ifnet *ifp = &ic->ic_if; 298 struct pci_attach_args *pa = aux; 299 const char *intrstr; 300 pci_intr_handle_t ih; 301 pcireg_t memtype, reg; 302 int i, error; 303 304 sc->sc_pct = pa->pa_pc; 305 sc->sc_pcitag = pa->pa_tag; 306 sc->sc_dmat = pa->pa_dmat; 307 308 /* 309 * Get the offset of the PCI Express Capability Structure in PCI 310 * Configuration Space. 311 */ 312 error = pci_get_capability(sc->sc_pct, sc->sc_pcitag, 313 PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL); 314 if (error == 0) { 315 printf(": PCIe capability structure not found!\n"); 316 return; 317 } 318 319 /* Clear device-specific "PCI retry timeout" register (41h). */ 320 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40); 321 if (reg & 0xff00) 322 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00); 323 324 /* Hardware bug workaround. */ 325 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 326 if (reg & PCI_COMMAND_INTERRUPT_DISABLE) { 327 DPRINTF(("PCIe INTx Disable set\n")); 328 reg &= ~PCI_COMMAND_INTERRUPT_DISABLE; 329 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 330 PCI_COMMAND_STATUS_REG, reg); 331 } 332 333 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IWN_PCI_BAR0); 334 error = pci_mapreg_map(pa, IWN_PCI_BAR0, memtype, 0, &sc->sc_st, 335 &sc->sc_sh, NULL, &sc->sc_sz, 0); 336 if (error != 0) { 337 printf(": can't map mem space\n"); 338 return; 339 } 340 341 /* Install interrupt handler. */ 342 if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) { 343 printf(": can't map interrupt\n"); 344 return; 345 } 346 intrstr = pci_intr_string(sc->sc_pct, ih); 347 sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET, iwn_intr, sc, 348 sc->sc_dev.dv_xname); 349 if (sc->sc_ih == NULL) { 350 printf(": can't establish interrupt"); 351 if (intrstr != NULL) 352 printf(" at %s", intrstr); 353 printf("\n"); 354 return; 355 } 356 printf(": %s", intrstr); 357 358 /* Read hardware revision and attach. */ 359 sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> 4) & 0xf; 360 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 361 error = iwn4965_attach(sc, PCI_PRODUCT(pa->pa_id)); 362 else 363 error = iwn5000_attach(sc, PCI_PRODUCT(pa->pa_id)); 364 if (error != 0) { 365 printf(": could not attach device\n"); 366 return; 367 } 368 369 if ((error = iwn_hw_prepare(sc)) != 0) { 370 printf(": hardware not ready\n"); 371 return; 372 } 373 374 /* Read MAC address, channels, etc from EEPROM. */ 375 if ((error = iwn_read_eeprom(sc)) != 0) { 376 printf(": could not read EEPROM\n"); 377 return; 378 } 379 380 /* Allocate DMA memory for firmware transfers. */ 381 if ((error = iwn_alloc_fwmem(sc)) != 0) { 382 printf(": could not allocate memory for firmware\n"); 383 return; 384 } 385 386 /* Allocate "Keep Warm" page. */ 387 if ((error = iwn_alloc_kw(sc)) != 0) { 388 printf(": could not allocate keep warm page\n"); 389 goto fail1; 390 } 391 392 /* Allocate ICT table for 5000 Series. */ 393 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 394 (error = iwn_alloc_ict(sc)) != 0) { 395 printf(": could not allocate ICT table\n"); 396 goto fail2; 397 } 398 399 /* Allocate TX scheduler "rings". */ 400 if ((error = iwn_alloc_sched(sc)) != 0) { 401 printf(": could not allocate TX scheduler rings\n"); 402 goto fail3; 403 } 404 405 /* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */ 406 for (i = 0; i < sc->ntxqs; i++) { 407 if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 408 printf(": could not allocate TX ring %d\n", i); 409 goto fail4; 410 } 411 } 412 413 /* Allocate RX ring. */ 414 if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) { 415 printf(": could not allocate RX ring\n"); 416 goto fail4; 417 } 418 419 /* Clear pending interrupts. */ 420 IWN_WRITE(sc, IWN_INT, 0xffffffff); 421 422 /* Count the number of available chains. */ 423 sc->ntxchains = 424 ((sc->txchainmask >> 2) & 1) + 425 ((sc->txchainmask >> 1) & 1) + 426 ((sc->txchainmask >> 0) & 1); 427 sc->nrxchains = 428 ((sc->rxchainmask >> 2) & 1) + 429 ((sc->rxchainmask >> 1) & 1) + 430 ((sc->rxchainmask >> 0) & 1); 431 printf(", MIMO %dT%dR, %.4s, address %s\n", sc->ntxchains, 432 sc->nrxchains, sc->eeprom_domain, ether_sprintf(ic->ic_myaddr)); 433 434 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 435 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 436 ic->ic_state = IEEE80211_S_INIT; 437 438 /* Set device capabilities. */ 439 ic->ic_caps = 440 IEEE80211_C_WEP | /* WEP */ 441 IEEE80211_C_RSN | /* WPA/RSN */ 442 IEEE80211_C_MONITOR | /* monitor mode supported */ 443 IEEE80211_C_SHSLOT | /* short slot time supported */ 444 IEEE80211_C_SHPREAMBLE | /* short preamble supported */ 445 IEEE80211_C_PMGT; /* power saving supported */ 446 447 #ifndef IEEE80211_NO_HT 448 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 449 /* Set HT capabilities. */ 450 ic->ic_htcaps = 451 #if IWN_RBUF_SIZE == 8192 452 IEEE80211_HTCAP_AMSDU7935 | 453 #endif 454 IEEE80211_HTCAP_CBW20_40 | 455 IEEE80211_HTCAP_SGI20 | 456 IEEE80211_HTCAP_SGI40; 457 if (sc->hw_type != IWN_HW_REV_TYPE_4965) 458 ic->ic_htcaps |= IEEE80211_HTCAP_GF; 459 if (sc->hw_type == IWN_HW_REV_TYPE_6050) 460 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DYN; 461 else 462 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DIS; 463 } 464 #endif /* !IEEE80211_NO_HT */ 465 466 /* Set supported legacy rates. */ 467 ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b; 468 ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g; 469 if (sc->sc_flags & IWN_FLAG_HAS_5GHZ) { 470 ic->ic_sup_rates[IEEE80211_MODE_11A] = 471 ieee80211_std_rateset_11a; 472 } 473 #ifndef IEEE80211_NO_HT 474 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 475 /* Set supported HT rates. */ 476 ic->ic_sup_mcs[0] = 0xff; /* MCS 0-7 */ 477 if (sc->nrxchains > 1) 478 ic->ic_sup_mcs[1] = 0xff; /* MCS 7-15 */ 479 if (sc->nrxchains > 2) 480 ic->ic_sup_mcs[2] = 0xff; /* MCS 16-23 */ 481 } 482 #endif 483 484 /* IBSS channel undefined for now. */ 485 ic->ic_ibss_chan = &ic->ic_channels[0]; 486 487 ifp->if_softc = sc; 488 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 489 ifp->if_ioctl = iwn_ioctl; 490 ifp->if_start = iwn_start; 491 ifp->if_watchdog = iwn_watchdog; 492 IFQ_SET_READY(&ifp->if_snd); 493 memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 494 495 if_attach(ifp); 496 ieee80211_ifattach(ifp); 497 ic->ic_node_alloc = iwn_node_alloc; 498 ic->ic_newassoc = iwn_newassoc; 499 ic->ic_updateedca = iwn_updateedca; 500 ic->ic_set_key = iwn_set_key; 501 ic->ic_delete_key = iwn_delete_key; 502 #ifndef IEEE80211_NO_HT 503 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start; 504 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop; 505 ic->ic_ampdu_tx_start = iwn_ampdu_tx_start; 506 ic->ic_ampdu_tx_stop = iwn_ampdu_tx_stop; 507 #endif 508 509 /* Override 802.11 state transition machine. */ 510 sc->sc_newstate = ic->ic_newstate; 511 ic->ic_newstate = iwn_newstate; 512 ieee80211_media_init(ifp, iwn_media_change, ieee80211_media_status); 513 514 sc->amrr.amrr_min_success_threshold = 1; 515 sc->amrr.amrr_max_success_threshold = 15; 516 517 #if NBPFILTER > 0 518 iwn_radiotap_attach(sc); 519 #endif 520 timeout_set(&sc->calib_to, iwn_calib_timeout, sc); 521 return; 522 523 /* Free allocated memory if something failed during attachment. */ 524 fail4: while (--i >= 0) 525 iwn_free_tx_ring(sc, &sc->txq[i]); 526 iwn_free_sched(sc); 527 fail3: if (sc->ict != NULL) 528 iwn_free_ict(sc); 529 fail2: iwn_free_kw(sc); 530 fail1: iwn_free_fwmem(sc); 531 } 532 533 int 534 iwn4965_attach(struct iwn_softc *sc, pci_product_id_t pid) 535 { 536 struct iwn_ops *ops = &sc->ops; 537 538 ops->load_firmware = iwn4965_load_firmware; 539 ops->read_eeprom = iwn4965_read_eeprom; 540 ops->post_alive = iwn4965_post_alive; 541 ops->nic_config = iwn4965_nic_config; 542 ops->update_sched = iwn4965_update_sched; 543 ops->get_temperature = iwn4965_get_temperature; 544 ops->get_rssi = iwn4965_get_rssi; 545 ops->set_txpower = iwn4965_set_txpower; 546 ops->init_gains = iwn4965_init_gains; 547 ops->set_gains = iwn4965_set_gains; 548 ops->add_node = iwn4965_add_node; 549 ops->tx_done = iwn4965_tx_done; 550 #ifndef IEEE80211_NO_HT 551 ops->ampdu_tx_start = iwn4965_ampdu_tx_start; 552 ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop; 553 #endif 554 sc->ntxqs = IWN4965_NTXQUEUES; 555 sc->ndmachnls = IWN4965_NDMACHNLS; 556 sc->broadcast_id = IWN4965_ID_BROADCAST; 557 sc->rxonsz = IWN4965_RXONSZ; 558 sc->schedsz = IWN4965_SCHEDSZ; 559 sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ; 560 sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ; 561 sc->fwsz = IWN4965_FWSZ; 562 sc->sched_txfact_addr = IWN4965_SCHED_TXFACT; 563 sc->limits = &iwn4965_sensitivity_limits; 564 sc->fwname = "iwn-4965"; 565 /* Override chains masks, ROM is known to be broken. */ 566 sc->txchainmask = IWN_ANT_AB; 567 sc->rxchainmask = IWN_ANT_ABC; 568 569 return 0; 570 } 571 572 int 573 iwn5000_attach(struct iwn_softc *sc, pci_product_id_t pid) 574 { 575 struct iwn_ops *ops = &sc->ops; 576 577 ops->load_firmware = iwn5000_load_firmware; 578 ops->read_eeprom = iwn5000_read_eeprom; 579 ops->post_alive = iwn5000_post_alive; 580 ops->nic_config = iwn5000_nic_config; 581 ops->update_sched = iwn5000_update_sched; 582 ops->get_temperature = iwn5000_get_temperature; 583 ops->get_rssi = iwn5000_get_rssi; 584 ops->set_txpower = iwn5000_set_txpower; 585 ops->init_gains = iwn5000_init_gains; 586 ops->set_gains = iwn5000_set_gains; 587 ops->add_node = iwn5000_add_node; 588 ops->tx_done = iwn5000_tx_done; 589 #ifndef IEEE80211_NO_HT 590 ops->ampdu_tx_start = iwn5000_ampdu_tx_start; 591 ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop; 592 #endif 593 sc->ntxqs = IWN5000_NTXQUEUES; 594 sc->ndmachnls = IWN5000_NDMACHNLS; 595 sc->broadcast_id = IWN5000_ID_BROADCAST; 596 sc->rxonsz = IWN5000_RXONSZ; 597 sc->schedsz = IWN5000_SCHEDSZ; 598 sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ; 599 sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ; 600 sc->fwsz = IWN5000_FWSZ; 601 sc->sched_txfact_addr = IWN5000_SCHED_TXFACT; 602 603 switch (sc->hw_type) { 604 case IWN_HW_REV_TYPE_5100: 605 sc->limits = &iwn5000_sensitivity_limits; 606 sc->fwname = "iwn-5000"; 607 /* Override chains masks, ROM is known to be broken. */ 608 sc->txchainmask = IWN_ANT_B; 609 sc->rxchainmask = IWN_ANT_AB; 610 break; 611 case IWN_HW_REV_TYPE_5150: 612 sc->limits = &iwn5150_sensitivity_limits; 613 sc->fwname = "iwn-5150"; 614 break; 615 case IWN_HW_REV_TYPE_5300: 616 case IWN_HW_REV_TYPE_5350: 617 sc->limits = &iwn5000_sensitivity_limits; 618 sc->fwname = "iwn-5000"; 619 break; 620 case IWN_HW_REV_TYPE_1000: 621 sc->limits = &iwn1000_sensitivity_limits; 622 sc->fwname = "iwn-1000"; 623 break; 624 case IWN_HW_REV_TYPE_6000: 625 sc->limits = &iwn6000_sensitivity_limits; 626 sc->fwname = "iwn-6000"; 627 if (pid == PCI_PRODUCT_INTEL_WL_6200_1 || 628 pid == PCI_PRODUCT_INTEL_WL_6200_2) { 629 sc->sc_flags |= IWN_FLAG_INTERNAL_PA; 630 /* Override chains masks, ROM is known to be broken. */ 631 sc->txchainmask = IWN_ANT_BC; 632 sc->rxchainmask = IWN_ANT_BC; 633 } 634 break; 635 case IWN_HW_REV_TYPE_6050: 636 sc->limits = &iwn6000_sensitivity_limits; 637 sc->fwname = "iwn-6050"; 638 break; 639 case IWN_HW_REV_TYPE_6005: 640 sc->limits = &iwn6000_sensitivity_limits; 641 sc->fwname = "iwn-6005"; 642 break; 643 default: 644 printf(": adapter type %d not supported\n", sc->hw_type); 645 return ENOTSUP; 646 } 647 return 0; 648 } 649 650 #if NBPFILTER > 0 651 /* 652 * Attach the interface to 802.11 radiotap. 653 */ 654 void 655 iwn_radiotap_attach(struct iwn_softc *sc) 656 { 657 bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO, 658 sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN); 659 660 sc->sc_rxtap_len = sizeof sc->sc_rxtapu; 661 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); 662 sc->sc_rxtap.wr_ihdr.it_present = htole32(IWN_RX_RADIOTAP_PRESENT); 663 664 sc->sc_txtap_len = sizeof sc->sc_txtapu; 665 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); 666 sc->sc_txtap.wt_ihdr.it_present = htole32(IWN_TX_RADIOTAP_PRESENT); 667 } 668 #endif 669 670 int 671 iwn_detach(struct device *self, int flags) 672 { 673 struct iwn_softc *sc = (struct iwn_softc *)self; 674 struct ifnet *ifp = &sc->sc_ic.ic_if; 675 int qid; 676 677 timeout_del(&sc->calib_to); 678 679 /* Uninstall interrupt handler. */ 680 if (sc->sc_ih != NULL) 681 pci_intr_disestablish(sc->sc_pct, sc->sc_ih); 682 683 /* Free DMA resources. */ 684 iwn_free_rx_ring(sc, &sc->rxq); 685 for (qid = 0; qid < sc->ntxqs; qid++) 686 iwn_free_tx_ring(sc, &sc->txq[qid]); 687 iwn_free_sched(sc); 688 iwn_free_kw(sc); 689 if (sc->ict != NULL) 690 iwn_free_ict(sc); 691 iwn_free_fwmem(sc); 692 693 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz); 694 695 ieee80211_ifdetach(ifp); 696 if_detach(ifp); 697 698 return 0; 699 } 700 701 int 702 iwn_activate(struct device *self, int act) 703 { 704 struct iwn_softc *sc = (struct iwn_softc *)self; 705 struct ifnet *ifp = &sc->sc_ic.ic_if; 706 707 switch (act) { 708 case DVACT_SUSPEND: 709 if (ifp->if_flags & IFF_RUNNING) 710 iwn_stop(ifp, 0); 711 break; 712 case DVACT_RESUME: 713 workq_queue_task(NULL, &sc->sc_resume_wqt, 0, 714 iwn_resume, sc, NULL); 715 break; 716 } 717 718 return 0; 719 } 720 721 void 722 iwn_resume(void *arg1, void *arg2) 723 { 724 struct iwn_softc *sc = arg1; 725 struct ifnet *ifp = &sc->sc_ic.ic_if; 726 pcireg_t reg; 727 int s; 728 729 /* Clear device-specific "PCI retry timeout" register (41h). */ 730 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40); 731 if (reg & 0xff00) 732 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00); 733 734 s = splnet(); 735 while (sc->sc_flags & IWN_FLAG_BUSY) 736 tsleep(&sc->sc_flags, 0, "iwnpwr", 0); 737 sc->sc_flags |= IWN_FLAG_BUSY; 738 739 if (ifp->if_flags & IFF_UP) 740 iwn_init(ifp); 741 742 sc->sc_flags &= ~IWN_FLAG_BUSY; 743 wakeup(&sc->sc_flags); 744 splx(s); 745 } 746 747 int 748 iwn_nic_lock(struct iwn_softc *sc) 749 { 750 int ntries; 751 752 /* Request exclusive access to NIC. */ 753 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 754 755 /* Spin until we actually get the lock. */ 756 for (ntries = 0; ntries < 1000; ntries++) { 757 if ((IWN_READ(sc, IWN_GP_CNTRL) & 758 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) == 759 IWN_GP_CNTRL_MAC_ACCESS_ENA) 760 return 0; 761 DELAY(10); 762 } 763 return ETIMEDOUT; 764 } 765 766 static __inline void 767 iwn_nic_unlock(struct iwn_softc *sc) 768 { 769 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 770 } 771 772 static __inline uint32_t 773 iwn_prph_read(struct iwn_softc *sc, uint32_t addr) 774 { 775 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr); 776 IWN_BARRIER_READ_WRITE(sc); 777 return IWN_READ(sc, IWN_PRPH_RDATA); 778 } 779 780 static __inline void 781 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 782 { 783 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr); 784 IWN_BARRIER_WRITE(sc); 785 IWN_WRITE(sc, IWN_PRPH_WDATA, data); 786 } 787 788 static __inline void 789 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 790 { 791 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask); 792 } 793 794 static __inline void 795 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 796 { 797 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask); 798 } 799 800 static __inline void 801 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr, 802 const uint32_t *data, int count) 803 { 804 for (; count > 0; count--, data++, addr += 4) 805 iwn_prph_write(sc, addr, *data); 806 } 807 808 static __inline uint32_t 809 iwn_mem_read(struct iwn_softc *sc, uint32_t addr) 810 { 811 IWN_WRITE(sc, IWN_MEM_RADDR, addr); 812 IWN_BARRIER_READ_WRITE(sc); 813 return IWN_READ(sc, IWN_MEM_RDATA); 814 } 815 816 static __inline void 817 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 818 { 819 IWN_WRITE(sc, IWN_MEM_WADDR, addr); 820 IWN_BARRIER_WRITE(sc); 821 IWN_WRITE(sc, IWN_MEM_WDATA, data); 822 } 823 824 static __inline void 825 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data) 826 { 827 uint32_t tmp; 828 829 tmp = iwn_mem_read(sc, addr & ~3); 830 if (addr & 3) 831 tmp = (tmp & 0x0000ffff) | data << 16; 832 else 833 tmp = (tmp & 0xffff0000) | data; 834 iwn_mem_write(sc, addr & ~3, tmp); 835 } 836 837 static __inline void 838 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data, 839 int count) 840 { 841 for (; count > 0; count--, addr += 4) 842 *data++ = iwn_mem_read(sc, addr); 843 } 844 845 static __inline void 846 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val, 847 int count) 848 { 849 for (; count > 0; count--, addr += 4) 850 iwn_mem_write(sc, addr, val); 851 } 852 853 int 854 iwn_eeprom_lock(struct iwn_softc *sc) 855 { 856 int i, ntries; 857 858 for (i = 0; i < 100; i++) { 859 /* Request exclusive access to EEPROM. */ 860 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 861 IWN_HW_IF_CONFIG_EEPROM_LOCKED); 862 863 /* Spin until we actually get the lock. */ 864 for (ntries = 0; ntries < 100; ntries++) { 865 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 866 IWN_HW_IF_CONFIG_EEPROM_LOCKED) 867 return 0; 868 DELAY(10); 869 } 870 } 871 return ETIMEDOUT; 872 } 873 874 static __inline void 875 iwn_eeprom_unlock(struct iwn_softc *sc) 876 { 877 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED); 878 } 879 880 /* 881 * Initialize access by host to One Time Programmable ROM. 882 * NB: This kind of ROM can be found on 1000 or 6000 Series only. 883 */ 884 int 885 iwn_init_otprom(struct iwn_softc *sc) 886 { 887 uint16_t prev, base, next; 888 int count, error; 889 890 /* Wait for clock stabilization before accessing prph. */ 891 if ((error = iwn_clock_wait(sc)) != 0) 892 return error; 893 894 if ((error = iwn_nic_lock(sc)) != 0) 895 return error; 896 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 897 DELAY(5); 898 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 899 iwn_nic_unlock(sc); 900 901 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */ 902 if (sc->hw_type != IWN_HW_REV_TYPE_1000) { 903 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT, 904 IWN_RESET_LINK_PWR_MGMT_DIS); 905 } 906 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER); 907 /* Clear ECC status. */ 908 IWN_SETBITS(sc, IWN_OTP_GP, 909 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS); 910 911 /* 912 * Find the block before last block (contains the EEPROM image) 913 * for HW without OTP shadow RAM. 914 */ 915 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 916 /* Switch to absolute addressing mode. */ 917 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS); 918 base = 0; 919 for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) { 920 error = iwn_read_prom_data(sc, base, &next, 2); 921 if (error != 0) 922 return error; 923 if (next == 0) /* End of linked-list. */ 924 break; 925 prev = base; 926 base = letoh16(next); 927 } 928 if (count == 0 || count == IWN1000_OTP_NBLOCKS) 929 return EIO; 930 /* Skip "next" word. */ 931 sc->prom_base = prev + 1; 932 } 933 return 0; 934 } 935 936 int 937 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count) 938 { 939 uint8_t *out = data; 940 uint32_t val, tmp; 941 int ntries; 942 943 addr += sc->prom_base; 944 for (; count > 0; count -= 2, addr++) { 945 IWN_WRITE(sc, IWN_EEPROM, addr << 2); 946 for (ntries = 0; ntries < 10; ntries++) { 947 val = IWN_READ(sc, IWN_EEPROM); 948 if (val & IWN_EEPROM_READ_VALID) 949 break; 950 DELAY(5); 951 } 952 if (ntries == 10) { 953 printf("%s: timeout reading ROM at 0x%x\n", 954 sc->sc_dev.dv_xname, addr); 955 return ETIMEDOUT; 956 } 957 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 958 /* OTPROM, check for ECC errors. */ 959 tmp = IWN_READ(sc, IWN_OTP_GP); 960 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) { 961 printf("%s: OTPROM ECC error at 0x%x\n", 962 sc->sc_dev.dv_xname, addr); 963 return EIO; 964 } 965 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) { 966 /* Correctable ECC error, clear bit. */ 967 IWN_SETBITS(sc, IWN_OTP_GP, 968 IWN_OTP_GP_ECC_CORR_STTS); 969 } 970 } 971 *out++ = val >> 16; 972 if (count > 1) 973 *out++ = val >> 24; 974 } 975 return 0; 976 } 977 978 int 979 iwn_dma_contig_alloc(bus_dma_tag_t tag, struct iwn_dma_info *dma, void **kvap, 980 bus_size_t size, bus_size_t alignment) 981 { 982 int nsegs, error; 983 984 dma->tag = tag; 985 dma->size = size; 986 987 error = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT, 988 &dma->map); 989 if (error != 0) 990 goto fail; 991 992 error = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs, 993 BUS_DMA_NOWAIT | BUS_DMA_ZERO); 994 if (error != 0) 995 goto fail; 996 997 error = bus_dmamem_map(tag, &dma->seg, 1, size, &dma->vaddr, 998 BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 999 if (error != 0) 1000 goto fail; 1001 1002 error = bus_dmamap_load_raw(tag, dma->map, &dma->seg, 1, size, 1003 BUS_DMA_NOWAIT); 1004 if (error != 0) 1005 goto fail; 1006 1007 bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE); 1008 1009 dma->paddr = dma->map->dm_segs[0].ds_addr; 1010 if (kvap != NULL) 1011 *kvap = dma->vaddr; 1012 1013 return 0; 1014 1015 fail: iwn_dma_contig_free(dma); 1016 return error; 1017 } 1018 1019 void 1020 iwn_dma_contig_free(struct iwn_dma_info *dma) 1021 { 1022 if (dma->map != NULL) { 1023 if (dma->vaddr != NULL) { 1024 bus_dmamap_sync(dma->tag, dma->map, 0, dma->size, 1025 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1026 bus_dmamap_unload(dma->tag, dma->map); 1027 bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size); 1028 bus_dmamem_free(dma->tag, &dma->seg, 1); 1029 dma->vaddr = NULL; 1030 } 1031 bus_dmamap_destroy(dma->tag, dma->map); 1032 dma->map = NULL; 1033 } 1034 } 1035 1036 int 1037 iwn_alloc_sched(struct iwn_softc *sc) 1038 { 1039 /* TX scheduler rings must be aligned on a 1KB boundary. */ 1040 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma, 1041 (void **)&sc->sched, sc->schedsz, 1024); 1042 } 1043 1044 void 1045 iwn_free_sched(struct iwn_softc *sc) 1046 { 1047 iwn_dma_contig_free(&sc->sched_dma); 1048 } 1049 1050 int 1051 iwn_alloc_kw(struct iwn_softc *sc) 1052 { 1053 /* "Keep Warm" page must be aligned on a 4KB boundary. */ 1054 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, NULL, 4096, 1055 4096); 1056 } 1057 1058 void 1059 iwn_free_kw(struct iwn_softc *sc) 1060 { 1061 iwn_dma_contig_free(&sc->kw_dma); 1062 } 1063 1064 int 1065 iwn_alloc_ict(struct iwn_softc *sc) 1066 { 1067 /* ICT table must be aligned on a 4KB boundary. */ 1068 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, 1069 (void **)&sc->ict, IWN_ICT_SIZE, 4096); 1070 } 1071 1072 void 1073 iwn_free_ict(struct iwn_softc *sc) 1074 { 1075 iwn_dma_contig_free(&sc->ict_dma); 1076 } 1077 1078 int 1079 iwn_alloc_fwmem(struct iwn_softc *sc) 1080 { 1081 /* Must be aligned on a 16-byte boundary. */ 1082 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, NULL, 1083 sc->fwsz, 16); 1084 } 1085 1086 void 1087 iwn_free_fwmem(struct iwn_softc *sc) 1088 { 1089 iwn_dma_contig_free(&sc->fw_dma); 1090 } 1091 1092 int 1093 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1094 { 1095 bus_size_t size; 1096 int i, error; 1097 1098 ring->cur = 0; 1099 1100 /* Allocate RX descriptors (256-byte aligned). */ 1101 size = IWN_RX_RING_COUNT * sizeof (uint32_t); 1102 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, 1103 (void **)&ring->desc, size, 256); 1104 if (error != 0) { 1105 printf("%s: could not allocate RX ring DMA memory\n", 1106 sc->sc_dev.dv_xname); 1107 goto fail; 1108 } 1109 1110 /* Allocate RX status area (16-byte aligned). */ 1111 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, 1112 (void **)&ring->stat, sizeof (struct iwn_rx_status), 16); 1113 if (error != 0) { 1114 printf("%s: could not allocate RX status DMA memory\n", 1115 sc->sc_dev.dv_xname); 1116 goto fail; 1117 } 1118 1119 /* 1120 * Allocate and map RX buffers. 1121 */ 1122 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1123 struct iwn_rx_data *data = &ring->data[i]; 1124 1125 error = bus_dmamap_create(sc->sc_dmat, IWN_RBUF_SIZE, 1, 1126 IWN_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1127 &data->map); 1128 if (error != 0) { 1129 printf("%s: could not create RX buf DMA map\n", 1130 sc->sc_dev.dv_xname); 1131 goto fail; 1132 } 1133 1134 data->m = MCLGETI(NULL, M_DONTWAIT, NULL, IWN_RBUF_SIZE); 1135 if (data->m == NULL) { 1136 printf("%s: could not allocate RX mbuf\n", 1137 sc->sc_dev.dv_xname); 1138 error = ENOBUFS; 1139 goto fail; 1140 } 1141 1142 error = bus_dmamap_load(sc->sc_dmat, data->map, 1143 mtod(data->m, void *), IWN_RBUF_SIZE, NULL, 1144 BUS_DMA_NOWAIT | BUS_DMA_READ); 1145 if (error != 0) { 1146 printf("%s: can't not map mbuf (error %d)\n", 1147 sc->sc_dev.dv_xname, error); 1148 goto fail; 1149 } 1150 1151 /* Set physical address of RX buffer (256-byte aligned). */ 1152 ring->desc[i] = htole32(data->map->dm_segs[0].ds_addr >> 8); 1153 } 1154 1155 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0, size, 1156 BUS_DMASYNC_PREWRITE); 1157 1158 return 0; 1159 1160 fail: iwn_free_rx_ring(sc, ring); 1161 return error; 1162 } 1163 1164 void 1165 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1166 { 1167 int ntries; 1168 1169 if (iwn_nic_lock(sc) == 0) { 1170 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 1171 for (ntries = 0; ntries < 1000; ntries++) { 1172 if (IWN_READ(sc, IWN_FH_RX_STATUS) & 1173 IWN_FH_RX_STATUS_IDLE) 1174 break; 1175 DELAY(10); 1176 } 1177 iwn_nic_unlock(sc); 1178 } 1179 ring->cur = 0; 1180 sc->last_rx_valid = 0; 1181 } 1182 1183 void 1184 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1185 { 1186 int i; 1187 1188 iwn_dma_contig_free(&ring->desc_dma); 1189 iwn_dma_contig_free(&ring->stat_dma); 1190 1191 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1192 struct iwn_rx_data *data = &ring->data[i]; 1193 1194 if (data->m != NULL) { 1195 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1196 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1197 bus_dmamap_unload(sc->sc_dmat, data->map); 1198 m_freem(data->m); 1199 } 1200 if (data->map != NULL) 1201 bus_dmamap_destroy(sc->sc_dmat, data->map); 1202 } 1203 } 1204 1205 int 1206 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid) 1207 { 1208 bus_addr_t paddr; 1209 bus_size_t size; 1210 int i, error; 1211 1212 ring->qid = qid; 1213 ring->queued = 0; 1214 ring->cur = 0; 1215 1216 /* Allocate TX descriptors (256-byte aligned). */ 1217 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc); 1218 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, 1219 (void **)&ring->desc, size, 256); 1220 if (error != 0) { 1221 printf("%s: could not allocate TX ring DMA memory\n", 1222 sc->sc_dev.dv_xname); 1223 goto fail; 1224 } 1225 /* 1226 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need 1227 * to allocate commands space for other rings. 1228 * XXX Do we really need to allocate descriptors for other rings? 1229 */ 1230 if (qid > 4) 1231 return 0; 1232 1233 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd); 1234 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, 1235 (void **)&ring->cmd, size, 4); 1236 if (error != 0) { 1237 printf("%s: could not allocate TX cmd DMA memory\n", 1238 sc->sc_dev.dv_xname); 1239 goto fail; 1240 } 1241 1242 paddr = ring->cmd_dma.paddr; 1243 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1244 struct iwn_tx_data *data = &ring->data[i]; 1245 1246 data->cmd_paddr = paddr; 1247 data->scratch_paddr = paddr + 12; 1248 paddr += sizeof (struct iwn_tx_cmd); 1249 1250 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1251 IWN_MAX_SCATTER - 1, MCLBYTES, 0, BUS_DMA_NOWAIT, 1252 &data->map); 1253 if (error != 0) { 1254 printf("%s: could not create TX buf DMA map\n", 1255 sc->sc_dev.dv_xname); 1256 goto fail; 1257 } 1258 } 1259 return 0; 1260 1261 fail: iwn_free_tx_ring(sc, ring); 1262 return error; 1263 } 1264 1265 void 1266 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1267 { 1268 int i; 1269 1270 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1271 struct iwn_tx_data *data = &ring->data[i]; 1272 1273 if (data->m != NULL) { 1274 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1275 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1276 bus_dmamap_unload(sc->sc_dmat, data->map); 1277 m_freem(data->m); 1278 data->m = NULL; 1279 } 1280 } 1281 /* Clear TX descriptors. */ 1282 memset(ring->desc, 0, ring->desc_dma.size); 1283 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0, 1284 ring->desc_dma.size, BUS_DMASYNC_PREWRITE); 1285 sc->qfullmsk &= ~(1 << ring->qid); 1286 ring->queued = 0; 1287 ring->cur = 0; 1288 } 1289 1290 void 1291 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1292 { 1293 int i; 1294 1295 iwn_dma_contig_free(&ring->desc_dma); 1296 iwn_dma_contig_free(&ring->cmd_dma); 1297 1298 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1299 struct iwn_tx_data *data = &ring->data[i]; 1300 1301 if (data->m != NULL) { 1302 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1303 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1304 bus_dmamap_unload(sc->sc_dmat, data->map); 1305 m_freem(data->m); 1306 } 1307 if (data->map != NULL) 1308 bus_dmamap_destroy(sc->sc_dmat, data->map); 1309 } 1310 } 1311 1312 void 1313 iwn5000_ict_reset(struct iwn_softc *sc) 1314 { 1315 /* Disable interrupts. */ 1316 IWN_WRITE(sc, IWN_INT_MASK, 0); 1317 1318 /* Reset ICT table. */ 1319 memset(sc->ict, 0, IWN_ICT_SIZE); 1320 sc->ict_cur = 0; 1321 1322 /* Set physical address of ICT table (4KB aligned). */ 1323 DPRINTF(("enabling ICT\n")); 1324 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE | 1325 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12); 1326 1327 /* Enable periodic RX interrupt. */ 1328 sc->int_mask |= IWN_INT_RX_PERIODIC; 1329 /* Switch to ICT interrupt mode in driver. */ 1330 sc->sc_flags |= IWN_FLAG_USE_ICT; 1331 1332 /* Re-enable interrupts. */ 1333 IWN_WRITE(sc, IWN_INT, 0xffffffff); 1334 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 1335 } 1336 1337 int 1338 iwn_read_eeprom(struct iwn_softc *sc) 1339 { 1340 struct iwn_ops *ops = &sc->ops; 1341 struct ieee80211com *ic = &sc->sc_ic; 1342 uint16_t val; 1343 int error; 1344 1345 /* Check whether adapter has an EEPROM or an OTPROM. */ 1346 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 && 1347 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP)) 1348 sc->sc_flags |= IWN_FLAG_HAS_OTPROM; 1349 DPRINTF(("%s found\n", (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? 1350 "OTPROM" : "EEPROM")); 1351 1352 /* Adapter has to be powered on for EEPROM access to work. */ 1353 if ((error = iwn_apm_init(sc)) != 0) { 1354 printf("%s: could not power ON adapter\n", 1355 sc->sc_dev.dv_xname); 1356 return error; 1357 } 1358 1359 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) { 1360 printf("%s: bad ROM signature\n", sc->sc_dev.dv_xname); 1361 return EIO; 1362 } 1363 if ((error = iwn_eeprom_lock(sc)) != 0) { 1364 printf("%s: could not lock ROM (error=%d)\n", 1365 sc->sc_dev.dv_xname, error); 1366 return error; 1367 } 1368 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1369 if ((error = iwn_init_otprom(sc)) != 0) { 1370 printf("%s: could not initialize OTPROM\n", 1371 sc->sc_dev.dv_xname); 1372 return error; 1373 } 1374 } 1375 1376 iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2); 1377 DPRINTF(("SKU capabilities=0x%04x\n", letoh16(val))); 1378 /* Check if HT support is bonded out. */ 1379 if (val & htole16(IWN_EEPROM_SKU_CAP_11N)) 1380 sc->sc_flags |= IWN_FLAG_HAS_11N; 1381 1382 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2); 1383 sc->rfcfg = letoh16(val); 1384 DPRINTF(("radio config=0x%04x\n", sc->rfcfg)); 1385 /* Read Tx/Rx chains from ROM unless it's known to be broken. */ 1386 if (sc->txchainmask == 0) 1387 sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg); 1388 if (sc->rxchainmask == 0) 1389 sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg); 1390 1391 /* Read MAC address. */ 1392 iwn_read_prom_data(sc, IWN_EEPROM_MAC, ic->ic_myaddr, 6); 1393 1394 /* Read adapter-specific information from EEPROM. */ 1395 ops->read_eeprom(sc); 1396 1397 iwn_apm_stop(sc); /* Power OFF adapter. */ 1398 1399 iwn_eeprom_unlock(sc); 1400 return 0; 1401 } 1402 1403 void 1404 iwn4965_read_eeprom(struct iwn_softc *sc) 1405 { 1406 uint32_t addr; 1407 uint16_t val; 1408 int i; 1409 1410 /* Read regulatory domain (4 ASCII characters). */ 1411 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4); 1412 1413 /* Read the list of authorized channels (20MHz ones only). */ 1414 for (i = 0; i < 5; i++) { 1415 addr = iwn4965_regulatory_bands[i]; 1416 iwn_read_eeprom_channels(sc, i, addr); 1417 } 1418 1419 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */ 1420 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2); 1421 sc->maxpwr2GHz = val & 0xff; 1422 sc->maxpwr5GHz = val >> 8; 1423 /* Check that EEPROM values are within valid range. */ 1424 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50) 1425 sc->maxpwr5GHz = 38; 1426 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50) 1427 sc->maxpwr2GHz = 38; 1428 DPRINTF(("maxpwr 2GHz=%d 5GHz=%d\n", sc->maxpwr2GHz, sc->maxpwr5GHz)); 1429 1430 /* Read samples for each TX power group. */ 1431 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands, 1432 sizeof sc->bands); 1433 1434 /* Read voltage at which samples were taken. */ 1435 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2); 1436 sc->eeprom_voltage = (int16_t)letoh16(val); 1437 DPRINTF(("voltage=%d (in 0.3V)\n", sc->eeprom_voltage)); 1438 1439 #ifdef IWN_DEBUG 1440 /* Print samples. */ 1441 if (iwn_debug > 0) { 1442 for (i = 0; i < IWN_NBANDS; i++) 1443 iwn4965_print_power_group(sc, i); 1444 } 1445 #endif 1446 } 1447 1448 #ifdef IWN_DEBUG 1449 void 1450 iwn4965_print_power_group(struct iwn_softc *sc, int i) 1451 { 1452 struct iwn4965_eeprom_band *band = &sc->bands[i]; 1453 struct iwn4965_eeprom_chan_samples *chans = band->chans; 1454 int j, c; 1455 1456 printf("===band %d===\n", i); 1457 printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi); 1458 printf("chan1 num=%d\n", chans[0].num); 1459 for (c = 0; c < 2; c++) { 1460 for (j = 0; j < IWN_NSAMPLES; j++) { 1461 printf("chain %d, sample %d: temp=%d gain=%d " 1462 "power=%d pa_det=%d\n", c, j, 1463 chans[0].samples[c][j].temp, 1464 chans[0].samples[c][j].gain, 1465 chans[0].samples[c][j].power, 1466 chans[0].samples[c][j].pa_det); 1467 } 1468 } 1469 printf("chan2 num=%d\n", chans[1].num); 1470 for (c = 0; c < 2; c++) { 1471 for (j = 0; j < IWN_NSAMPLES; j++) { 1472 printf("chain %d, sample %d: temp=%d gain=%d " 1473 "power=%d pa_det=%d\n", c, j, 1474 chans[1].samples[c][j].temp, 1475 chans[1].samples[c][j].gain, 1476 chans[1].samples[c][j].power, 1477 chans[1].samples[c][j].pa_det); 1478 } 1479 } 1480 } 1481 #endif 1482 1483 void 1484 iwn5000_read_eeprom(struct iwn_softc *sc) 1485 { 1486 struct iwn5000_eeprom_calib_hdr hdr; 1487 int32_t volt; 1488 uint32_t base, addr; 1489 uint16_t val; 1490 int i; 1491 1492 /* Read regulatory domain (4 ASCII characters). */ 1493 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 1494 base = letoh16(val); 1495 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN, 1496 sc->eeprom_domain, 4); 1497 1498 /* Read the list of authorized channels (20MHz ones only). */ 1499 for (i = 0; i < 5; i++) { 1500 addr = base + iwn5000_regulatory_bands[i]; 1501 iwn_read_eeprom_channels(sc, i, addr); 1502 } 1503 1504 /* Read enhanced TX power information for 6000 Series. */ 1505 if (sc->hw_type >= IWN_HW_REV_TYPE_6000) 1506 iwn_read_eeprom_enhinfo(sc); 1507 1508 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2); 1509 base = letoh16(val); 1510 iwn_read_prom_data(sc, base, &hdr, sizeof hdr); 1511 DPRINTF(("calib version=%u pa type=%u voltage=%u\n", 1512 hdr.version, hdr.pa_type, letoh16(hdr.volt))); 1513 sc->calib_ver = hdr.version; 1514 1515 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 1516 /* Compute temperature offset. */ 1517 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 1518 sc->eeprom_temp = letoh16(val); 1519 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 1520 volt = letoh16(val); 1521 sc->temp_off = sc->eeprom_temp - (volt / -5); 1522 DPRINTF(("temp=%d volt=%d offset=%dK\n", 1523 sc->eeprom_temp, volt, sc->temp_off)); 1524 } else { 1525 /* Read crystal calibration. */ 1526 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL, 1527 &sc->eeprom_crystal, sizeof (uint32_t)); 1528 DPRINTF(("crystal calibration 0x%08x\n", 1529 letoh32(sc->eeprom_crystal))); 1530 } 1531 } 1532 1533 void 1534 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr) 1535 { 1536 struct ieee80211com *ic = &sc->sc_ic; 1537 const struct iwn_chan_band *band = &iwn_bands[n]; 1538 struct iwn_eeprom_chan channels[IWN_MAX_CHAN_PER_BAND]; 1539 uint8_t chan; 1540 int i; 1541 1542 iwn_read_prom_data(sc, addr, channels, 1543 band->nchan * sizeof (struct iwn_eeprom_chan)); 1544 1545 for (i = 0; i < band->nchan; i++) { 1546 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) 1547 continue; 1548 1549 chan = band->chan[i]; 1550 1551 if (n == 0) { /* 2GHz band */ 1552 ic->ic_channels[chan].ic_freq = 1553 ieee80211_ieee2mhz(chan, IEEE80211_CHAN_2GHZ); 1554 ic->ic_channels[chan].ic_flags = 1555 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM | 1556 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; 1557 1558 } else { /* 5GHz band */ 1559 /* 1560 * Some adapters support channels 7, 8, 11 and 12 1561 * both in the 2GHz and 4.9GHz bands. 1562 * Because of limitations in our net80211 layer, 1563 * we don't support them in the 4.9GHz band. 1564 */ 1565 if (chan <= 14) 1566 continue; 1567 1568 ic->ic_channels[chan].ic_freq = 1569 ieee80211_ieee2mhz(chan, IEEE80211_CHAN_5GHZ); 1570 ic->ic_channels[chan].ic_flags = IEEE80211_CHAN_A; 1571 /* We have at least one valid 5GHz channel. */ 1572 sc->sc_flags |= IWN_FLAG_HAS_5GHZ; 1573 } 1574 1575 /* Is active scan allowed on this channel? */ 1576 if (!(channels[i].flags & IWN_EEPROM_CHAN_ACTIVE)) { 1577 ic->ic_channels[chan].ic_flags |= 1578 IEEE80211_CHAN_PASSIVE; 1579 } 1580 1581 /* Save maximum allowed TX power for this channel. */ 1582 sc->maxpwr[chan] = channels[i].maxpwr; 1583 1584 DPRINTF(("adding chan %d flags=0x%x maxpwr=%d\n", 1585 chan, channels[i].flags, sc->maxpwr[chan])); 1586 } 1587 } 1588 1589 void 1590 iwn_read_eeprom_enhinfo(struct iwn_softc *sc) 1591 { 1592 struct iwn_eeprom_enhinfo enhinfo[35]; 1593 uint16_t val, base; 1594 int8_t maxpwr; 1595 int i; 1596 1597 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 1598 base = letoh16(val); 1599 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO, 1600 enhinfo, sizeof enhinfo); 1601 1602 memset(sc->enh_maxpwr, 0, sizeof sc->enh_maxpwr); 1603 for (i = 0; i < nitems(enhinfo); i++) { 1604 if (enhinfo[i].chan == 0 || enhinfo[i].reserved != 0) 1605 continue; /* Skip invalid entries. */ 1606 1607 maxpwr = 0; 1608 if (sc->txchainmask & IWN_ANT_A) 1609 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]); 1610 if (sc->txchainmask & IWN_ANT_B) 1611 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]); 1612 if (sc->txchainmask & IWN_ANT_C) 1613 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]); 1614 if (sc->ntxchains == 2) 1615 maxpwr = MAX(maxpwr, enhinfo[i].mimo2); 1616 else if (sc->ntxchains == 3) 1617 maxpwr = MAX(maxpwr, enhinfo[i].mimo3); 1618 maxpwr /= 2; /* Convert half-dBm to dBm. */ 1619 1620 DPRINTF(("enhinfo %d, maxpwr=%d\n", i, maxpwr)); 1621 sc->enh_maxpwr[i] = maxpwr; 1622 } 1623 } 1624 1625 struct ieee80211_node * 1626 iwn_node_alloc(struct ieee80211com *ic) 1627 { 1628 return malloc(sizeof (struct iwn_node), M_DEVBUF, M_NOWAIT | M_ZERO); 1629 } 1630 1631 void 1632 iwn_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, int isnew) 1633 { 1634 struct iwn_softc *sc = ic->ic_if.if_softc; 1635 struct iwn_node *wn = (void *)ni; 1636 uint8_t rate; 1637 int ridx, i; 1638 1639 ieee80211_amrr_node_init(&sc->amrr, &wn->amn); 1640 /* Start at lowest available bit-rate, AMRR will raise. */ 1641 ni->ni_txrate = 0; 1642 1643 for (i = 0; i < ni->ni_rates.rs_nrates; i++) { 1644 rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL; 1645 /* Map 802.11 rate to HW rate index. */ 1646 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) 1647 if (iwn_rates[ridx].rate == rate) 1648 break; 1649 wn->ridx[i] = ridx; 1650 } 1651 } 1652 1653 int 1654 iwn_media_change(struct ifnet *ifp) 1655 { 1656 struct iwn_softc *sc = ifp->if_softc; 1657 struct ieee80211com *ic = &sc->sc_ic; 1658 uint8_t rate, ridx; 1659 int error; 1660 1661 error = ieee80211_media_change(ifp); 1662 if (error != ENETRESET) 1663 return error; 1664 1665 if (ic->ic_fixed_rate != -1) { 1666 rate = ic->ic_sup_rates[ic->ic_curmode]. 1667 rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL; 1668 /* Map 802.11 rate to HW rate index. */ 1669 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) 1670 if (iwn_rates[ridx].rate == rate) 1671 break; 1672 sc->fixed_ridx = ridx; 1673 } 1674 1675 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 1676 (IFF_UP | IFF_RUNNING)) { 1677 iwn_stop(ifp, 0); 1678 error = iwn_init(ifp); 1679 } 1680 return error; 1681 } 1682 1683 int 1684 iwn_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg) 1685 { 1686 struct ifnet *ifp = &ic->ic_if; 1687 struct iwn_softc *sc = ifp->if_softc; 1688 int error; 1689 1690 timeout_del(&sc->calib_to); 1691 1692 switch (nstate) { 1693 case IEEE80211_S_SCAN: 1694 /* Make the link LED blink while we're scanning. */ 1695 iwn_set_led(sc, IWN_LED_LINK, 10, 10); 1696 1697 if ((error = iwn_scan(sc, IEEE80211_CHAN_2GHZ)) != 0) { 1698 printf("%s: could not initiate scan\n", 1699 sc->sc_dev.dv_xname); 1700 return error; 1701 } 1702 ic->ic_state = nstate; 1703 return 0; 1704 1705 case IEEE80211_S_ASSOC: 1706 if (ic->ic_state != IEEE80211_S_RUN) 1707 break; 1708 /* FALLTHROUGH */ 1709 case IEEE80211_S_AUTH: 1710 /* Reset state to handle reassociations correctly. */ 1711 sc->rxon.associd = 0; 1712 sc->rxon.filter &= ~htole32(IWN_FILTER_BSS); 1713 sc->calib.state = IWN_CALIB_STATE_INIT; 1714 1715 if ((error = iwn_auth(sc)) != 0) { 1716 printf("%s: could not move to auth state\n", 1717 sc->sc_dev.dv_xname); 1718 return error; 1719 } 1720 break; 1721 1722 case IEEE80211_S_RUN: 1723 if ((error = iwn_run(sc)) != 0) { 1724 printf("%s: could not move to run state\n", 1725 sc->sc_dev.dv_xname); 1726 return error; 1727 } 1728 break; 1729 1730 case IEEE80211_S_INIT: 1731 sc->calib.state = IWN_CALIB_STATE_INIT; 1732 break; 1733 } 1734 1735 return sc->sc_newstate(ic, nstate, arg); 1736 } 1737 1738 void 1739 iwn_iter_func(void *arg, struct ieee80211_node *ni) 1740 { 1741 struct iwn_softc *sc = arg; 1742 struct iwn_node *wn = (struct iwn_node *)ni; 1743 1744 ieee80211_amrr_choose(&sc->amrr, ni, &wn->amn); 1745 } 1746 1747 void 1748 iwn_calib_timeout(void *arg) 1749 { 1750 struct iwn_softc *sc = arg; 1751 struct ieee80211com *ic = &sc->sc_ic; 1752 int s; 1753 1754 s = splnet(); 1755 if (ic->ic_fixed_rate == -1) { 1756 if (ic->ic_opmode == IEEE80211_M_STA) 1757 iwn_iter_func(sc, ic->ic_bss); 1758 else 1759 ieee80211_iterate_nodes(ic, iwn_iter_func, sc); 1760 } 1761 /* Force automatic TX power calibration every 60 secs. */ 1762 if (++sc->calib_cnt >= 120) { 1763 uint32_t flags = 0; 1764 1765 DPRINTF(("sending request for statistics\n")); 1766 (void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, 1767 sizeof flags, 1); 1768 sc->calib_cnt = 0; 1769 } 1770 splx(s); 1771 1772 /* Automatic rate control triggered every 500ms. */ 1773 timeout_add_msec(&sc->calib_to, 500); 1774 } 1775 1776 int 1777 iwn_ccmp_decap(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_key *k) 1778 { 1779 struct ieee80211_frame *wh; 1780 uint64_t pn, *prsc; 1781 uint8_t *ivp; 1782 uint8_t tid; 1783 int hdrlen; 1784 1785 wh = mtod(m, struct ieee80211_frame *); 1786 hdrlen = ieee80211_get_hdrlen(wh); 1787 ivp = (uint8_t *)wh + hdrlen; 1788 1789 /* Check that ExtIV bit is be set. */ 1790 if (!(ivp[3] & IEEE80211_WEP_EXTIV)) { 1791 DPRINTF(("CCMP decap ExtIV not set\n")); 1792 return 1; 1793 } 1794 tid = ieee80211_has_qos(wh) ? 1795 ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0; 1796 prsc = &k->k_rsc[tid]; 1797 1798 /* Extract the 48-bit PN from the CCMP header. */ 1799 pn = (uint64_t)ivp[0] | 1800 (uint64_t)ivp[1] << 8 | 1801 (uint64_t)ivp[4] << 16 | 1802 (uint64_t)ivp[5] << 24 | 1803 (uint64_t)ivp[6] << 32 | 1804 (uint64_t)ivp[7] << 40; 1805 if (pn <= *prsc) { 1806 /* 1807 * Not necessarily a replayed frame since we did not check 1808 * the sequence number of the 802.11 header yet. 1809 */ 1810 DPRINTF(("CCMP replayed\n")); 1811 return 1; 1812 } 1813 /* Update last seen packet number. */ 1814 *prsc = pn; 1815 1816 /* Clear Protected bit and strip IV. */ 1817 wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED; 1818 ovbcopy(wh, mtod(m, caddr_t) + IEEE80211_CCMP_HDRLEN, hdrlen); 1819 m_adj(m, IEEE80211_CCMP_HDRLEN); 1820 /* Strip MIC. */ 1821 m_adj(m, -IEEE80211_CCMP_MICLEN); 1822 return 0; 1823 } 1824 1825 /* 1826 * Process an RX_PHY firmware notification. This is usually immediately 1827 * followed by an MPDU_RX_DONE notification. 1828 */ 1829 void 1830 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc, 1831 struct iwn_rx_data *data) 1832 { 1833 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1); 1834 1835 DPRINTFN(2, ("received PHY stats\n")); 1836 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 1837 sizeof (*stat), BUS_DMASYNC_POSTREAD); 1838 1839 /* Save RX statistics, they will be used on MPDU_RX_DONE. */ 1840 memcpy(&sc->last_rx_stat, stat, sizeof (*stat)); 1841 sc->last_rx_valid = 1; 1842 } 1843 1844 /* 1845 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification. 1846 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one. 1847 */ 1848 void 1849 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 1850 struct iwn_rx_data *data) 1851 { 1852 struct iwn_ops *ops = &sc->ops; 1853 struct ieee80211com *ic = &sc->sc_ic; 1854 struct ifnet *ifp = &ic->ic_if; 1855 struct iwn_rx_ring *ring = &sc->rxq; 1856 struct ieee80211_frame *wh; 1857 struct ieee80211_rxinfo rxi; 1858 struct ieee80211_node *ni; 1859 struct mbuf *m, *m1; 1860 struct iwn_rx_stat *stat; 1861 caddr_t head; 1862 uint32_t flags; 1863 int error, len, rssi; 1864 1865 if (desc->type == IWN_MPDU_RX_DONE) { 1866 /* Check for prior RX_PHY notification. */ 1867 if (!sc->last_rx_valid) { 1868 DPRINTF(("missing RX_PHY\n")); 1869 return; 1870 } 1871 sc->last_rx_valid = 0; 1872 stat = &sc->last_rx_stat; 1873 } else 1874 stat = (struct iwn_rx_stat *)(desc + 1); 1875 1876 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWN_RBUF_SIZE, 1877 BUS_DMASYNC_POSTREAD); 1878 1879 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) { 1880 printf("%s: invalid RX statistic header\n", 1881 sc->sc_dev.dv_xname); 1882 return; 1883 } 1884 if (desc->type == IWN_MPDU_RX_DONE) { 1885 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1); 1886 head = (caddr_t)(mpdu + 1); 1887 len = letoh16(mpdu->len); 1888 } else { 1889 head = (caddr_t)(stat + 1) + stat->cfg_phy_len; 1890 len = letoh16(stat->len); 1891 } 1892 1893 flags = letoh32(*(uint32_t *)(head + len)); 1894 1895 /* Discard frames with a bad FCS early. */ 1896 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) { 1897 DPRINTFN(2, ("RX flags error %x\n", flags)); 1898 ifp->if_ierrors++; 1899 return; 1900 } 1901 /* Discard frames that are too short. */ 1902 if (len < sizeof (*wh)) { 1903 DPRINTF(("frame too short: %d\n", len)); 1904 ic->ic_stats.is_rx_tooshort++; 1905 ifp->if_ierrors++; 1906 return; 1907 } 1908 1909 m1 = MCLGETI(NULL, M_DONTWAIT, NULL, IWN_RBUF_SIZE); 1910 if (m1 == NULL) { 1911 ic->ic_stats.is_rx_nombuf++; 1912 ifp->if_ierrors++; 1913 return; 1914 } 1915 bus_dmamap_unload(sc->sc_dmat, data->map); 1916 1917 error = bus_dmamap_load(sc->sc_dmat, data->map, mtod(m1, void *), 1918 IWN_RBUF_SIZE, NULL, BUS_DMA_NOWAIT | BUS_DMA_READ); 1919 if (error != 0) { 1920 m_freem(m1); 1921 1922 /* Try to reload the old mbuf. */ 1923 error = bus_dmamap_load(sc->sc_dmat, data->map, 1924 mtod(data->m, void *), IWN_RBUF_SIZE, NULL, 1925 BUS_DMA_NOWAIT | BUS_DMA_READ); 1926 if (error != 0) { 1927 panic("%s: could not load old RX mbuf", 1928 sc->sc_dev.dv_xname); 1929 } 1930 /* Physical address may have changed. */ 1931 ring->desc[ring->cur] = 1932 htole32(data->map->dm_segs[0].ds_addr >> 8); 1933 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 1934 ring->cur * sizeof (uint32_t), sizeof (uint32_t), 1935 BUS_DMASYNC_PREWRITE); 1936 ifp->if_ierrors++; 1937 return; 1938 } 1939 1940 m = data->m; 1941 data->m = m1; 1942 /* Update RX descriptor. */ 1943 ring->desc[ring->cur] = htole32(data->map->dm_segs[0].ds_addr >> 8); 1944 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 1945 ring->cur * sizeof (uint32_t), sizeof (uint32_t), 1946 BUS_DMASYNC_PREWRITE); 1947 1948 /* Finalize mbuf. */ 1949 m->m_pkthdr.rcvif = ifp; 1950 m->m_data = head; 1951 m->m_pkthdr.len = m->m_len = len; 1952 1953 /* Grab a reference to the source node. */ 1954 wh = mtod(m, struct ieee80211_frame *); 1955 ni = ieee80211_find_rxnode(ic, wh); 1956 1957 rxi.rxi_flags = 0; 1958 if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) && 1959 !IEEE80211_IS_MULTICAST(wh->i_addr1) && 1960 (ni->ni_flags & IEEE80211_NODE_RXPROT) && 1961 ni->ni_pairwise_key.k_cipher == IEEE80211_CIPHER_CCMP) { 1962 if ((flags & IWN_RX_CIPHER_MASK) != IWN_RX_CIPHER_CCMP) { 1963 ic->ic_stats.is_ccmp_dec_errs++; 1964 ifp->if_ierrors++; 1965 m_freem(m); 1966 return; 1967 } 1968 /* Check whether decryption was successful or not. */ 1969 if ((desc->type == IWN_MPDU_RX_DONE && 1970 (flags & (IWN_RX_MPDU_DEC | IWN_RX_MPDU_MIC_OK)) != 1971 (IWN_RX_MPDU_DEC | IWN_RX_MPDU_MIC_OK)) || 1972 (desc->type != IWN_MPDU_RX_DONE && 1973 (flags & IWN_RX_DECRYPT_MASK) != IWN_RX_DECRYPT_OK)) { 1974 DPRINTF(("CCMP decryption failed 0x%x\n", flags)); 1975 ic->ic_stats.is_ccmp_dec_errs++; 1976 ifp->if_ierrors++; 1977 m_freem(m); 1978 return; 1979 } 1980 if (iwn_ccmp_decap(sc, m, &ni->ni_pairwise_key) != 0) { 1981 ifp->if_ierrors++; 1982 m_freem(m); 1983 return; 1984 } 1985 rxi.rxi_flags |= IEEE80211_RXI_HWDEC; 1986 } 1987 1988 rssi = ops->get_rssi(stat); 1989 1990 #if NBPFILTER > 0 1991 if (sc->sc_drvbpf != NULL) { 1992 struct mbuf mb; 1993 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap; 1994 1995 tap->wr_flags = 0; 1996 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE)) 1997 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 1998 tap->wr_chan_freq = 1999 htole16(ic->ic_channels[stat->chan].ic_freq); 2000 tap->wr_chan_flags = 2001 htole16(ic->ic_channels[stat->chan].ic_flags); 2002 tap->wr_dbm_antsignal = (int8_t)rssi; 2003 tap->wr_dbm_antnoise = (int8_t)sc->noise; 2004 tap->wr_tsft = stat->tstamp; 2005 switch (stat->rate) { 2006 /* CCK rates. */ 2007 case 10: tap->wr_rate = 2; break; 2008 case 20: tap->wr_rate = 4; break; 2009 case 55: tap->wr_rate = 11; break; 2010 case 110: tap->wr_rate = 22; break; 2011 /* OFDM rates. */ 2012 case 0xd: tap->wr_rate = 12; break; 2013 case 0xf: tap->wr_rate = 18; break; 2014 case 0x5: tap->wr_rate = 24; break; 2015 case 0x7: tap->wr_rate = 36; break; 2016 case 0x9: tap->wr_rate = 48; break; 2017 case 0xb: tap->wr_rate = 72; break; 2018 case 0x1: tap->wr_rate = 96; break; 2019 case 0x3: tap->wr_rate = 108; break; 2020 /* Unknown rate: should not happen. */ 2021 default: tap->wr_rate = 0; 2022 } 2023 2024 mb.m_data = (caddr_t)tap; 2025 mb.m_len = sc->sc_rxtap_len; 2026 mb.m_next = m; 2027 mb.m_nextpkt = NULL; 2028 mb.m_type = 0; 2029 mb.m_flags = 0; 2030 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN); 2031 } 2032 #endif 2033 2034 /* Send the frame to the 802.11 layer. */ 2035 rxi.rxi_rssi = rssi; 2036 rxi.rxi_tstamp = 0; /* unused */ 2037 ieee80211_input(ifp, m, ni, &rxi); 2038 2039 /* Node is no longer needed. */ 2040 ieee80211_release_node(ic, ni); 2041 } 2042 2043 #ifndef IEEE80211_NO_HT 2044 /* Process an incoming Compressed BlockAck. */ 2045 void 2046 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2047 struct iwn_rx_data *data) 2048 { 2049 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1); 2050 struct iwn_tx_ring *txq; 2051 2052 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), sizeof (*ba), 2053 BUS_DMASYNC_POSTREAD); 2054 2055 txq = &sc->txq[letoh16(ba->qid)]; 2056 /* XXX TBD */ 2057 } 2058 #endif 2059 2060 /* 2061 * Process a CALIBRATION_RESULT notification sent by the initialization 2062 * firmware on response to a CMD_CALIB_CONFIG command (5000 only). 2063 */ 2064 void 2065 iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2066 struct iwn_rx_data *data) 2067 { 2068 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1); 2069 int len, idx = -1; 2070 2071 /* Runtime firmware should not send such a notification. */ 2072 if (sc->sc_flags & IWN_FLAG_CALIB_DONE) 2073 return; 2074 2075 len = (letoh32(desc->len) & 0x3fff) - 4; 2076 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), len, 2077 BUS_DMASYNC_POSTREAD); 2078 2079 switch (calib->code) { 2080 case IWN5000_PHY_CALIB_DC: 2081 if (sc->hw_type == IWN_HW_REV_TYPE_5150) 2082 idx = 0; 2083 break; 2084 case IWN5000_PHY_CALIB_LO: 2085 idx = 1; 2086 break; 2087 case IWN5000_PHY_CALIB_TX_IQ: 2088 idx = 2; 2089 break; 2090 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC: 2091 if (sc->hw_type < IWN_HW_REV_TYPE_6000 && 2092 sc->hw_type != IWN_HW_REV_TYPE_5150) 2093 idx = 3; 2094 break; 2095 case IWN5000_PHY_CALIB_BASE_BAND: 2096 idx = 4; 2097 break; 2098 } 2099 if (idx == -1) /* Ignore other results. */ 2100 return; 2101 2102 /* Save calibration result. */ 2103 if (sc->calibcmd[idx].buf != NULL) 2104 free(sc->calibcmd[idx].buf, M_DEVBUF); 2105 sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT); 2106 if (sc->calibcmd[idx].buf == NULL) { 2107 DPRINTF(("not enough memory for calibration result %d\n", 2108 calib->code)); 2109 return; 2110 } 2111 DPRINTF(("saving calibration result code=%d len=%d\n", 2112 calib->code, len)); 2113 sc->calibcmd[idx].len = len; 2114 memcpy(sc->calibcmd[idx].buf, calib, len); 2115 } 2116 2117 /* 2118 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification. 2119 * The latter is sent by the firmware after each received beacon. 2120 */ 2121 void 2122 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2123 struct iwn_rx_data *data) 2124 { 2125 struct iwn_ops *ops = &sc->ops; 2126 struct ieee80211com *ic = &sc->sc_ic; 2127 struct iwn_calib_state *calib = &sc->calib; 2128 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1); 2129 int temp; 2130 2131 /* Ignore statistics received during a scan. */ 2132 if (ic->ic_state != IEEE80211_S_RUN) 2133 return; 2134 2135 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2136 sizeof (*stats), BUS_DMASYNC_POSTREAD); 2137 2138 DPRINTFN(3, ("received statistics (cmd=%d)\n", desc->type)); 2139 sc->calib_cnt = 0; /* Reset TX power calibration timeout. */ 2140 2141 /* Test if temperature has changed. */ 2142 if (stats->general.temp != sc->rawtemp) { 2143 /* Convert "raw" temperature to degC. */ 2144 sc->rawtemp = stats->general.temp; 2145 temp = ops->get_temperature(sc); 2146 DPRINTFN(2, ("temperature=%dC\n", temp)); 2147 2148 /* Update TX power if need be (4965AGN only). */ 2149 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 2150 iwn4965_power_calibration(sc, temp); 2151 } 2152 2153 if (desc->type != IWN_BEACON_STATISTICS) 2154 return; /* Reply to a statistics request. */ 2155 2156 sc->noise = iwn_get_noise(&stats->rx.general); 2157 2158 /* Test that RSSI and noise are present in stats report. */ 2159 if (letoh32(stats->rx.general.flags) != 1) { 2160 DPRINTF(("received statistics without RSSI\n")); 2161 return; 2162 } 2163 2164 /* 2165 * XXX Differential gain calibration makes the 6005 firmware 2166 * crap out, so skip it for now. This effectively disables 2167 * sensitivity tuning as well. 2168 */ 2169 if (sc->hw_type == IWN_HW_REV_TYPE_6005) 2170 return; 2171 2172 if (calib->state == IWN_CALIB_STATE_ASSOC) 2173 iwn_collect_noise(sc, &stats->rx.general); 2174 else if (calib->state == IWN_CALIB_STATE_RUN) 2175 iwn_tune_sensitivity(sc, &stats->rx); 2176 } 2177 2178 /* 2179 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN 2180 * and 5000 adapters have different incompatible TX status formats. 2181 */ 2182 void 2183 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2184 struct iwn_rx_data *data) 2185 { 2186 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1); 2187 2188 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2189 sizeof (*stat), BUS_DMASYNC_POSTREAD); 2190 iwn_tx_done(sc, desc, stat->ackfailcnt, letoh32(stat->status) & 0xff); 2191 } 2192 2193 void 2194 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2195 struct iwn_rx_data *data) 2196 { 2197 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1); 2198 2199 #ifdef notyet 2200 /* Reset TX scheduler slot. */ 2201 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx); 2202 #endif 2203 2204 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2205 sizeof (*stat), BUS_DMASYNC_POSTREAD); 2206 iwn_tx_done(sc, desc, stat->ackfailcnt, letoh16(stat->status) & 0xff); 2207 } 2208 2209 /* 2210 * Adapter-independent backend for TX_DONE firmware notifications. 2211 */ 2212 void 2213 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt, 2214 uint8_t status) 2215 { 2216 struct ieee80211com *ic = &sc->sc_ic; 2217 struct ifnet *ifp = &ic->ic_if; 2218 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf]; 2219 struct iwn_tx_data *data = &ring->data[desc->idx]; 2220 struct iwn_node *wn = (struct iwn_node *)data->ni; 2221 2222 /* Update rate control statistics. */ 2223 wn->amn.amn_txcnt++; 2224 if (ackfailcnt > 0) 2225 wn->amn.amn_retrycnt++; 2226 2227 if (status != 1 && status != 2) 2228 ifp->if_oerrors++; 2229 else 2230 ifp->if_opackets++; 2231 2232 /* Unmap and free mbuf. */ 2233 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize, 2234 BUS_DMASYNC_POSTWRITE); 2235 bus_dmamap_unload(sc->sc_dmat, data->map); 2236 m_freem(data->m); 2237 data->m = NULL; 2238 ieee80211_release_node(ic, data->ni); 2239 data->ni = NULL; 2240 2241 sc->sc_tx_timer = 0; 2242 if (--ring->queued < IWN_TX_RING_LOMARK) { 2243 sc->qfullmsk &= ~(1 << ring->qid); 2244 if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) { 2245 ifp->if_flags &= ~IFF_OACTIVE; 2246 (*ifp->if_start)(ifp); 2247 } 2248 } 2249 } 2250 2251 /* 2252 * Process a "command done" firmware notification. This is where we wakeup 2253 * processes waiting for a synchronous command completion. 2254 */ 2255 void 2256 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc) 2257 { 2258 struct iwn_tx_ring *ring = &sc->txq[4]; 2259 struct iwn_tx_data *data; 2260 2261 if ((desc->qid & 0xf) != 4) 2262 return; /* Not a command ack. */ 2263 2264 data = &ring->data[desc->idx]; 2265 2266 /* If the command was mapped in an mbuf, free it. */ 2267 if (data->m != NULL) { 2268 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 2269 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2270 bus_dmamap_unload(sc->sc_dmat, data->map); 2271 m_freem(data->m); 2272 data->m = NULL; 2273 } 2274 wakeup(&ring->desc[desc->idx]); 2275 } 2276 2277 /* 2278 * Process an INT_FH_RX or INT_SW_RX interrupt. 2279 */ 2280 void 2281 iwn_notif_intr(struct iwn_softc *sc) 2282 { 2283 struct iwn_ops *ops = &sc->ops; 2284 struct ieee80211com *ic = &sc->sc_ic; 2285 struct ifnet *ifp = &ic->ic_if; 2286 uint16_t hw; 2287 2288 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map, 2289 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD); 2290 2291 hw = letoh16(sc->rxq.stat->closed_count) & 0xfff; 2292 while (sc->rxq.cur != hw) { 2293 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 2294 struct iwn_rx_desc *desc; 2295 2296 bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof (*desc), 2297 BUS_DMASYNC_POSTREAD); 2298 desc = mtod(data->m, struct iwn_rx_desc *); 2299 2300 DPRINTFN(4, ("notification qid=%d idx=%d flags=%x type=%d\n", 2301 desc->qid & 0xf, desc->idx, desc->flags, desc->type)); 2302 2303 if (!(desc->qid & 0x80)) /* Reply to a command. */ 2304 iwn_cmd_done(sc, desc); 2305 2306 switch (desc->type) { 2307 case IWN_RX_PHY: 2308 iwn_rx_phy(sc, desc, data); 2309 break; 2310 2311 case IWN_RX_DONE: /* 4965AGN only. */ 2312 case IWN_MPDU_RX_DONE: 2313 /* An 802.11 frame has been received. */ 2314 iwn_rx_done(sc, desc, data); 2315 break; 2316 #ifndef IEEE80211_NO_HT 2317 case IWN_RX_COMPRESSED_BA: 2318 /* A Compressed BlockAck has been received. */ 2319 iwn_rx_compressed_ba(sc, desc, data); 2320 break; 2321 #endif 2322 case IWN_TX_DONE: 2323 /* An 802.11 frame has been transmitted. */ 2324 ops->tx_done(sc, desc, data); 2325 break; 2326 2327 case IWN_RX_STATISTICS: 2328 case IWN_BEACON_STATISTICS: 2329 iwn_rx_statistics(sc, desc, data); 2330 break; 2331 2332 case IWN_BEACON_MISSED: 2333 { 2334 struct iwn_beacon_missed *miss = 2335 (struct iwn_beacon_missed *)(desc + 1); 2336 2337 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2338 sizeof (*miss), BUS_DMASYNC_POSTREAD); 2339 /* 2340 * If more than 5 consecutive beacons are missed, 2341 * reinitialize the sensitivity state machine. 2342 */ 2343 DPRINTF(("beacons missed %d/%d\n", 2344 letoh32(miss->consecutive), letoh32(miss->total))); 2345 if (ic->ic_state == IEEE80211_S_RUN && 2346 letoh32(miss->consecutive) > 5) 2347 (void)iwn_init_sensitivity(sc); 2348 break; 2349 } 2350 case IWN_UC_READY: 2351 { 2352 struct iwn_ucode_info *uc = 2353 (struct iwn_ucode_info *)(desc + 1); 2354 2355 /* The microcontroller is ready. */ 2356 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2357 sizeof (*uc), BUS_DMASYNC_POSTREAD); 2358 DPRINTF(("microcode alive notification version=%d.%d " 2359 "subtype=%x alive=%x\n", uc->major, uc->minor, 2360 uc->subtype, letoh32(uc->valid))); 2361 2362 if (letoh32(uc->valid) != 1) { 2363 printf("%s: microcontroller initialization " 2364 "failed\n", sc->sc_dev.dv_xname); 2365 break; 2366 } 2367 if (uc->subtype == IWN_UCODE_INIT) { 2368 /* Save microcontroller report. */ 2369 memcpy(&sc->ucode_info, uc, sizeof (*uc)); 2370 } 2371 /* Save the address of the error log in SRAM. */ 2372 sc->errptr = letoh32(uc->errptr); 2373 break; 2374 } 2375 case IWN_STATE_CHANGED: 2376 { 2377 uint32_t *status = (uint32_t *)(desc + 1); 2378 2379 /* Enabled/disabled notification. */ 2380 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2381 sizeof (*status), BUS_DMASYNC_POSTREAD); 2382 DPRINTF(("state changed to %x\n", letoh32(*status))); 2383 2384 if (letoh32(*status) & 1) { 2385 /* The radio button has to be pushed. */ 2386 printf("%s: Radio transmitter is off\n", 2387 sc->sc_dev.dv_xname); 2388 /* Turn the interface down. */ 2389 ifp->if_flags &= ~IFF_UP; 2390 iwn_stop(ifp, 1); 2391 return; /* No further processing. */ 2392 } 2393 break; 2394 } 2395 case IWN_START_SCAN: 2396 { 2397 struct iwn_start_scan *scan = 2398 (struct iwn_start_scan *)(desc + 1); 2399 2400 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2401 sizeof (*scan), BUS_DMASYNC_POSTREAD); 2402 DPRINTFN(2, ("scanning channel %d status %x\n", 2403 scan->chan, letoh32(scan->status))); 2404 2405 /* Fix current channel. */ 2406 ic->ic_bss->ni_chan = &ic->ic_channels[scan->chan]; 2407 break; 2408 } 2409 case IWN_STOP_SCAN: 2410 { 2411 struct iwn_stop_scan *scan = 2412 (struct iwn_stop_scan *)(desc + 1); 2413 2414 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2415 sizeof (*scan), BUS_DMASYNC_POSTREAD); 2416 DPRINTF(("scan finished nchan=%d status=%d chan=%d\n", 2417 scan->nchan, scan->status, scan->chan)); 2418 2419 if (scan->status == 1 && scan->chan <= 14 && 2420 (sc->sc_flags & IWN_FLAG_HAS_5GHZ)) { 2421 /* 2422 * We just finished scanning 2GHz channels, 2423 * start scanning 5GHz ones. 2424 */ 2425 if (iwn_scan(sc, IEEE80211_CHAN_5GHZ) == 0) 2426 break; 2427 } 2428 ieee80211_end_scan(ifp); 2429 break; 2430 } 2431 case IWN5000_CALIBRATION_RESULT: 2432 iwn5000_rx_calib_results(sc, desc, data); 2433 break; 2434 2435 case IWN5000_CALIBRATION_DONE: 2436 sc->sc_flags |= IWN_FLAG_CALIB_DONE; 2437 wakeup(sc); 2438 break; 2439 } 2440 2441 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT; 2442 } 2443 2444 /* Tell the firmware what we have processed. */ 2445 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1; 2446 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7); 2447 } 2448 2449 /* 2450 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 2451 * from power-down sleep mode. 2452 */ 2453 void 2454 iwn_wakeup_intr(struct iwn_softc *sc) 2455 { 2456 int qid; 2457 2458 DPRINTF(("ucode wakeup from power-down sleep\n")); 2459 2460 /* Wakeup RX and TX rings. */ 2461 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7); 2462 for (qid = 0; qid < sc->ntxqs; qid++) { 2463 struct iwn_tx_ring *ring = &sc->txq[qid]; 2464 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur); 2465 } 2466 } 2467 2468 /* 2469 * Dump the error log of the firmware when a firmware panic occurs. Although 2470 * we can't debug the firmware because it is neither open source nor free, it 2471 * can help us to identify certain classes of problems. 2472 */ 2473 void 2474 iwn_fatal_intr(struct iwn_softc *sc) 2475 { 2476 struct iwn_fw_dump dump; 2477 int i; 2478 2479 /* Force a complete recalibration on next init. */ 2480 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE; 2481 2482 /* Check that the error log address is valid. */ 2483 if (sc->errptr < IWN_FW_DATA_BASE || 2484 sc->errptr + sizeof (dump) > 2485 IWN_FW_DATA_BASE + sc->fw_data_maxsz) { 2486 printf("%s: bad firmware error log address 0x%08x\n", 2487 sc->sc_dev.dv_xname, sc->errptr); 2488 return; 2489 } 2490 if (iwn_nic_lock(sc) != 0) { 2491 printf("%s: could not read firmware error log\n", 2492 sc->sc_dev.dv_xname); 2493 return; 2494 } 2495 /* Read firmware error log from SRAM. */ 2496 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump, 2497 sizeof (dump) / sizeof (uint32_t)); 2498 iwn_nic_unlock(sc); 2499 2500 if (dump.valid == 0) { 2501 printf("%s: firmware error log is empty\n", 2502 sc->sc_dev.dv_xname); 2503 return; 2504 } 2505 printf("firmware error log:\n"); 2506 printf(" error type = \"%s\" (0x%08X)\n", 2507 (dump.id < nitems(iwn_fw_errmsg)) ? 2508 iwn_fw_errmsg[dump.id] : "UNKNOWN", 2509 dump.id); 2510 printf(" program counter = 0x%08X\n", dump.pc); 2511 printf(" source line = 0x%08X\n", dump.src_line); 2512 printf(" error data = 0x%08X%08X\n", 2513 dump.error_data[0], dump.error_data[1]); 2514 printf(" branch link = 0x%08X%08X\n", 2515 dump.branch_link[0], dump.branch_link[1]); 2516 printf(" interrupt link = 0x%08X%08X\n", 2517 dump.interrupt_link[0], dump.interrupt_link[1]); 2518 printf(" time = %u\n", dump.time[0]); 2519 2520 /* Dump driver status (TX and RX rings) while we're here. */ 2521 printf("driver status:\n"); 2522 for (i = 0; i < sc->ntxqs; i++) { 2523 struct iwn_tx_ring *ring = &sc->txq[i]; 2524 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 2525 i, ring->qid, ring->cur, ring->queued); 2526 } 2527 printf(" rx ring: cur=%d\n", sc->rxq.cur); 2528 printf(" 802.11 state %d\n", sc->sc_ic.ic_state); 2529 } 2530 2531 int 2532 iwn_intr(void *arg) 2533 { 2534 struct iwn_softc *sc = arg; 2535 struct ifnet *ifp = &sc->sc_ic.ic_if; 2536 uint32_t r1, r2, tmp; 2537 2538 /* Disable interrupts. */ 2539 IWN_WRITE(sc, IWN_INT_MASK, 0); 2540 2541 /* Read interrupts from ICT (fast) or from registers (slow). */ 2542 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 2543 tmp = 0; 2544 while (sc->ict[sc->ict_cur] != 0) { 2545 tmp |= sc->ict[sc->ict_cur]; 2546 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */ 2547 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT; 2548 } 2549 tmp = letoh32(tmp); 2550 if (tmp == 0xffffffff) /* Shouldn't happen. */ 2551 tmp = 0; 2552 else if (tmp & 0xc0000) /* Workaround a HW bug. */ 2553 tmp |= 0x8000; 2554 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff); 2555 r2 = 0; /* Unused. */ 2556 } else { 2557 r1 = IWN_READ(sc, IWN_INT); 2558 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 2559 return 0; /* Hardware gone! */ 2560 r2 = IWN_READ(sc, IWN_FH_INT); 2561 } 2562 if (r1 == 0 && r2 == 0) { 2563 if (ifp->if_flags & IFF_UP) 2564 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 2565 return 0; /* Interrupt not for us. */ 2566 } 2567 2568 /* Acknowledge interrupts. */ 2569 IWN_WRITE(sc, IWN_INT, r1); 2570 if (!(sc->sc_flags & IWN_FLAG_USE_ICT)) 2571 IWN_WRITE(sc, IWN_FH_INT, r2); 2572 2573 if (r1 & IWN_INT_RF_TOGGLED) { 2574 tmp = IWN_READ(sc, IWN_GP_CNTRL); 2575 printf("%s: RF switch: radio %s\n", sc->sc_dev.dv_xname, 2576 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled"); 2577 } 2578 if (r1 & IWN_INT_CT_REACHED) { 2579 printf("%s: critical temperature reached!\n", 2580 sc->sc_dev.dv_xname); 2581 } 2582 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) { 2583 printf("%s: fatal firmware error\n", sc->sc_dev.dv_xname); 2584 /* Dump firmware error log and stop. */ 2585 iwn_fatal_intr(sc); 2586 ifp->if_flags &= ~IFF_UP; 2587 iwn_stop(ifp, 1); 2588 return 1; 2589 } 2590 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) || 2591 (r2 & IWN_FH_INT_RX)) { 2592 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 2593 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) 2594 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX); 2595 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 2596 IWN_INT_PERIODIC_DIS); 2597 iwn_notif_intr(sc); 2598 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) { 2599 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 2600 IWN_INT_PERIODIC_ENA); 2601 } 2602 } else 2603 iwn_notif_intr(sc); 2604 } 2605 2606 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) { 2607 if (sc->sc_flags & IWN_FLAG_USE_ICT) 2608 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX); 2609 wakeup(sc); /* FH DMA transfer completed. */ 2610 } 2611 2612 if (r1 & IWN_INT_ALIVE) 2613 wakeup(sc); /* Firmware is alive. */ 2614 2615 if (r1 & IWN_INT_WAKEUP) 2616 iwn_wakeup_intr(sc); 2617 2618 /* Re-enable interrupts. */ 2619 if (ifp->if_flags & IFF_UP) 2620 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 2621 2622 return 1; 2623 } 2624 2625 /* 2626 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and 2627 * 5000 adapters use a slightly different format). 2628 */ 2629 void 2630 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 2631 uint16_t len) 2632 { 2633 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx]; 2634 2635 *w = htole16(len + 8); 2636 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2637 (caddr_t)w - sc->sched_dma.vaddr, sizeof (uint16_t), 2638 BUS_DMASYNC_PREWRITE); 2639 if (idx < IWN_SCHED_WINSZ) { 2640 *(w + IWN_TX_RING_COUNT) = *w; 2641 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2642 (caddr_t)(w + IWN_TX_RING_COUNT) - sc->sched_dma.vaddr, 2643 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 2644 } 2645 } 2646 2647 void 2648 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 2649 uint16_t len) 2650 { 2651 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 2652 2653 *w = htole16(id << 12 | (len + 8)); 2654 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2655 (caddr_t)w - sc->sched_dma.vaddr, sizeof (uint16_t), 2656 BUS_DMASYNC_PREWRITE); 2657 if (idx < IWN_SCHED_WINSZ) { 2658 *(w + IWN_TX_RING_COUNT) = *w; 2659 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2660 (caddr_t)(w + IWN_TX_RING_COUNT) - sc->sched_dma.vaddr, 2661 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 2662 } 2663 } 2664 2665 void 2666 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx) 2667 { 2668 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 2669 2670 *w = (*w & htole16(0xf000)) | htole16(1); 2671 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2672 (caddr_t)w - sc->sched_dma.vaddr, sizeof (uint16_t), 2673 BUS_DMASYNC_PREWRITE); 2674 if (idx < IWN_SCHED_WINSZ) { 2675 *(w + IWN_TX_RING_COUNT) = *w; 2676 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2677 (caddr_t)(w + IWN_TX_RING_COUNT) - sc->sched_dma.vaddr, 2678 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 2679 } 2680 } 2681 2682 int 2683 iwn_tx(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 2684 { 2685 struct ieee80211com *ic = &sc->sc_ic; 2686 struct iwn_node *wn = (void *)ni; 2687 struct iwn_tx_ring *ring; 2688 struct iwn_tx_desc *desc; 2689 struct iwn_tx_data *data; 2690 struct iwn_tx_cmd *cmd; 2691 struct iwn_cmd_data *tx; 2692 const struct iwn_rate *rinfo; 2693 struct ieee80211_frame *wh; 2694 struct ieee80211_key *k = NULL; 2695 struct mbuf *m1; 2696 enum ieee80211_edca_ac ac; 2697 uint32_t flags; 2698 uint16_t qos; 2699 u_int hdrlen; 2700 bus_dma_segment_t *seg; 2701 uint8_t *ivp, tid, ridx, txant, type; 2702 int i, totlen, hasqos, error, pad; 2703 2704 wh = mtod(m, struct ieee80211_frame *); 2705 hdrlen = ieee80211_get_hdrlen(wh); 2706 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2707 2708 /* Select EDCA Access Category and TX ring for this frame. */ 2709 if ((hasqos = ieee80211_has_qos(wh))) { 2710 qos = ieee80211_get_qos(wh); 2711 tid = qos & IEEE80211_QOS_TID; 2712 ac = ieee80211_up_to_ac(ic, tid); 2713 } else { 2714 tid = 0; 2715 ac = EDCA_AC_BE; 2716 } 2717 2718 ring = &sc->txq[ac]; 2719 desc = &ring->desc[ring->cur]; 2720 data = &ring->data[ring->cur]; 2721 2722 /* Choose a TX rate index. */ 2723 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 2724 type != IEEE80211_FC0_TYPE_DATA) { 2725 ridx = (ic->ic_curmode == IEEE80211_MODE_11A) ? 2726 IWN_RIDX_OFDM6 : IWN_RIDX_CCK1; 2727 } else if (ic->ic_fixed_rate != -1) { 2728 ridx = sc->fixed_ridx; 2729 } else 2730 ridx = wn->ridx[ni->ni_txrate]; 2731 rinfo = &iwn_rates[ridx]; 2732 2733 #if NBPFILTER > 0 2734 if (sc->sc_drvbpf != NULL) { 2735 struct mbuf mb; 2736 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 2737 2738 tap->wt_flags = 0; 2739 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq); 2740 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags); 2741 tap->wt_rate = rinfo->rate; 2742 tap->wt_hwqueue = ac; 2743 if ((ic->ic_flags & IEEE80211_F_WEPON) && 2744 (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)) 2745 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2746 2747 mb.m_data = (caddr_t)tap; 2748 mb.m_len = sc->sc_txtap_len; 2749 mb.m_next = m; 2750 mb.m_nextpkt = NULL; 2751 mb.m_type = 0; 2752 mb.m_flags = 0; 2753 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT); 2754 } 2755 #endif 2756 2757 totlen = m->m_pkthdr.len; 2758 2759 /* Encrypt the frame if need be. */ 2760 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 2761 /* Retrieve key for TX. */ 2762 k = ieee80211_get_txkey(ic, wh, ni); 2763 if (k->k_cipher != IEEE80211_CIPHER_CCMP) { 2764 /* Do software encryption. */ 2765 if ((m = ieee80211_encrypt(ic, m, k)) == NULL) 2766 return ENOBUFS; 2767 /* 802.11 header may have moved. */ 2768 wh = mtod(m, struct ieee80211_frame *); 2769 totlen = m->m_pkthdr.len; 2770 2771 } else /* HW appends CCMP MIC. */ 2772 totlen += IEEE80211_CCMP_HDRLEN; 2773 } 2774 2775 /* Prepare TX firmware command. */ 2776 cmd = &ring->cmd[ring->cur]; 2777 cmd->code = IWN_CMD_TX_DATA; 2778 cmd->flags = 0; 2779 cmd->qid = ring->qid; 2780 cmd->idx = ring->cur; 2781 2782 tx = (struct iwn_cmd_data *)cmd->data; 2783 /* NB: No need to clear tx, all fields are reinitialized here. */ 2784 tx->scratch = 0; /* clear "scratch" area */ 2785 2786 flags = 0; 2787 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2788 /* Unicast frame, check if an ACK is expected. */ 2789 if (!hasqos || (qos & IEEE80211_QOS_ACK_POLICY_MASK) != 2790 IEEE80211_QOS_ACK_POLICY_NOACK) 2791 flags |= IWN_TX_NEED_ACK; 2792 } 2793 if ((wh->i_fc[0] & 2794 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == 2795 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR)) 2796 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */ 2797 2798 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2799 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */ 2800 2801 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 2802 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2803 /* NB: Group frames are sent using CCK in 802.11b/g. */ 2804 if (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold) { 2805 flags |= IWN_TX_NEED_RTS; 2806 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 2807 ridx >= IWN_RIDX_OFDM6) { 2808 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 2809 flags |= IWN_TX_NEED_CTS; 2810 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 2811 flags |= IWN_TX_NEED_RTS; 2812 } 2813 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) { 2814 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 2815 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 2816 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS); 2817 flags |= IWN_TX_NEED_PROTECTION; 2818 } else 2819 flags |= IWN_TX_FULL_TXOP; 2820 } 2821 } 2822 2823 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 2824 type != IEEE80211_FC0_TYPE_DATA) 2825 tx->id = sc->broadcast_id; 2826 else 2827 tx->id = wn->id; 2828 2829 if (type == IEEE80211_FC0_TYPE_MGT) { 2830 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2831 2832 #ifndef IEEE80211_STA_ONLY 2833 /* Tell HW to set timestamp in probe responses. */ 2834 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2835 flags |= IWN_TX_INSERT_TSTAMP; 2836 #endif 2837 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2838 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2839 tx->timeout = htole16(3); 2840 else 2841 tx->timeout = htole16(2); 2842 } else 2843 tx->timeout = htole16(0); 2844 2845 if (hdrlen & 3) { 2846 /* First segment length must be a multiple of 4. */ 2847 flags |= IWN_TX_NEED_PADDING; 2848 pad = 4 - (hdrlen & 3); 2849 } else 2850 pad = 0; 2851 2852 tx->len = htole16(totlen); 2853 tx->tid = tid; 2854 tx->rts_ntries = 60; 2855 tx->data_ntries = 15; 2856 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 2857 tx->plcp = rinfo->plcp; 2858 tx->rflags = rinfo->flags; 2859 if (tx->id == sc->broadcast_id) { 2860 /* Group or management frame. */ 2861 tx->linkq = 0; 2862 /* XXX Alternate between antenna A and B? */ 2863 txant = IWN_LSB(sc->txchainmask); 2864 tx->rflags |= IWN_RFLAG_ANT(txant); 2865 } else { 2866 tx->linkq = ni->ni_rates.rs_nrates - ni->ni_txrate - 1; 2867 flags |= IWN_TX_LINKQ; /* enable MRR */ 2868 } 2869 /* Set physical address of "scratch area". */ 2870 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 2871 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 2872 2873 /* Copy 802.11 header in TX command. */ 2874 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 2875 2876 if (k != NULL && k->k_cipher == IEEE80211_CIPHER_CCMP) { 2877 /* Trim 802.11 header and prepend CCMP IV. */ 2878 m_adj(m, hdrlen - IEEE80211_CCMP_HDRLEN); 2879 ivp = mtod(m, uint8_t *); 2880 k->k_tsc++; 2881 ivp[0] = k->k_tsc; 2882 ivp[1] = k->k_tsc >> 8; 2883 ivp[2] = 0; 2884 ivp[3] = k->k_id << 6 | IEEE80211_WEP_EXTIV; 2885 ivp[4] = k->k_tsc >> 16; 2886 ivp[5] = k->k_tsc >> 24; 2887 ivp[6] = k->k_tsc >> 32; 2888 ivp[7] = k->k_tsc >> 40; 2889 2890 tx->security = IWN_CIPHER_CCMP; 2891 /* XXX flags |= IWN_TX_AMPDU_CCMP; */ 2892 memcpy(tx->key, k->k_key, k->k_len); 2893 2894 /* TX scheduler includes CCMP MIC len w/5000 Series. */ 2895 if (sc->hw_type != IWN_HW_REV_TYPE_4965) 2896 totlen += IEEE80211_CCMP_MICLEN; 2897 } else { 2898 /* Trim 802.11 header. */ 2899 m_adj(m, hdrlen); 2900 tx->security = 0; 2901 } 2902 tx->flags = htole32(flags); 2903 2904 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m, 2905 BUS_DMA_NOWAIT | BUS_DMA_WRITE); 2906 if (error != 0) { 2907 if (error != EFBIG) { 2908 printf("%s: can't map mbuf (error %d)\n", 2909 sc->sc_dev.dv_xname, error); 2910 m_freem(m); 2911 return error; 2912 } 2913 /* Too many DMA segments, linearize mbuf. */ 2914 MGETHDR(m1, M_DONTWAIT, MT_DATA); 2915 if (m1 == NULL) { 2916 m_freem(m); 2917 return ENOBUFS; 2918 } 2919 if (m->m_pkthdr.len > MHLEN) { 2920 MCLGET(m1, M_DONTWAIT); 2921 if (!(m1->m_flags & M_EXT)) { 2922 m_freem(m); 2923 m_freem(m1); 2924 return ENOBUFS; 2925 } 2926 } 2927 m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, caddr_t)); 2928 m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len; 2929 m_freem(m); 2930 m = m1; 2931 2932 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m, 2933 BUS_DMA_NOWAIT | BUS_DMA_WRITE); 2934 if (error != 0) { 2935 printf("%s: can't map mbuf (error %d)\n", 2936 sc->sc_dev.dv_xname, error); 2937 m_freem(m); 2938 return error; 2939 } 2940 } 2941 2942 data->m = m; 2943 data->ni = ni; 2944 2945 DPRINTFN(4, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n", 2946 ring->qid, ring->cur, m->m_pkthdr.len, data->map->dm_nsegs)); 2947 2948 /* Fill TX descriptor. */ 2949 desc->nsegs = 1 + data->map->dm_nsegs; 2950 /* First DMA segment is used by the TX command. */ 2951 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 2952 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 2953 (4 + sizeof (*tx) + hdrlen + pad) << 4); 2954 /* Other DMA segments are for data payload. */ 2955 seg = data->map->dm_segs; 2956 for (i = 1; i <= data->map->dm_nsegs; i++) { 2957 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); 2958 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | 2959 seg->ds_len << 4); 2960 seg++; 2961 } 2962 2963 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize, 2964 BUS_DMASYNC_PREWRITE); 2965 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map, 2966 (caddr_t)cmd - ring->cmd_dma.vaddr, sizeof (*cmd), 2967 BUS_DMASYNC_PREWRITE); 2968 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 2969 (caddr_t)desc - ring->desc_dma.vaddr, sizeof (*desc), 2970 BUS_DMASYNC_PREWRITE); 2971 2972 #ifdef notyet 2973 /* Update TX scheduler. */ 2974 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 2975 #endif 2976 2977 /* Kick TX ring. */ 2978 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 2979 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 2980 2981 /* Mark TX ring as full if we reach a certain threshold. */ 2982 if (++ring->queued > IWN_TX_RING_HIMARK) 2983 sc->qfullmsk |= 1 << ring->qid; 2984 2985 return 0; 2986 } 2987 2988 void 2989 iwn_start(struct ifnet *ifp) 2990 { 2991 struct iwn_softc *sc = ifp->if_softc; 2992 struct ieee80211com *ic = &sc->sc_ic; 2993 struct ieee80211_node *ni; 2994 struct mbuf *m; 2995 2996 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 2997 return; 2998 2999 for (;;) { 3000 if (sc->qfullmsk != 0) { 3001 ifp->if_flags |= IFF_OACTIVE; 3002 break; 3003 } 3004 /* Send pending management frames first. */ 3005 IF_DEQUEUE(&ic->ic_mgtq, m); 3006 if (m != NULL) { 3007 ni = (void *)m->m_pkthdr.rcvif; 3008 goto sendit; 3009 } 3010 if (ic->ic_state != IEEE80211_S_RUN) 3011 break; 3012 3013 /* Encapsulate and send data frames. */ 3014 IFQ_DEQUEUE(&ifp->if_snd, m); 3015 if (m == NULL) 3016 break; 3017 #if NBPFILTER > 0 3018 if (ifp->if_bpf != NULL) 3019 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 3020 #endif 3021 if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) 3022 continue; 3023 sendit: 3024 #if NBPFILTER > 0 3025 if (ic->ic_rawbpf != NULL) 3026 bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT); 3027 #endif 3028 if (iwn_tx(sc, m, ni) != 0) { 3029 ieee80211_release_node(ic, ni); 3030 ifp->if_oerrors++; 3031 continue; 3032 } 3033 3034 sc->sc_tx_timer = 5; 3035 ifp->if_timer = 1; 3036 } 3037 } 3038 3039 void 3040 iwn_watchdog(struct ifnet *ifp) 3041 { 3042 struct iwn_softc *sc = ifp->if_softc; 3043 3044 ifp->if_timer = 0; 3045 3046 if (sc->sc_tx_timer > 0) { 3047 if (--sc->sc_tx_timer == 0) { 3048 printf("%s: device timeout\n", sc->sc_dev.dv_xname); 3049 ifp->if_flags &= ~IFF_UP; 3050 iwn_stop(ifp, 1); 3051 ifp->if_oerrors++; 3052 return; 3053 } 3054 ifp->if_timer = 1; 3055 } 3056 3057 ieee80211_watchdog(ifp); 3058 } 3059 3060 int 3061 iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 3062 { 3063 struct iwn_softc *sc = ifp->if_softc; 3064 struct ieee80211com *ic = &sc->sc_ic; 3065 struct ifaddr *ifa; 3066 struct ifreq *ifr; 3067 int s, error = 0; 3068 3069 s = splnet(); 3070 /* 3071 * Prevent processes from entering this function while another 3072 * process is tsleep'ing in it. 3073 */ 3074 while ((sc->sc_flags & IWN_FLAG_BUSY) && error == 0) 3075 error = tsleep(&sc->sc_flags, PCATCH, "iwnioc", 0); 3076 if (error != 0) { 3077 splx(s); 3078 return error; 3079 } 3080 sc->sc_flags |= IWN_FLAG_BUSY; 3081 3082 switch (cmd) { 3083 case SIOCSIFADDR: 3084 ifa = (struct ifaddr *)data; 3085 ifp->if_flags |= IFF_UP; 3086 #ifdef INET 3087 if (ifa->ifa_addr->sa_family == AF_INET) 3088 arp_ifinit(&ic->ic_ac, ifa); 3089 #endif 3090 /* FALLTHROUGH */ 3091 case SIOCSIFFLAGS: 3092 if (ifp->if_flags & IFF_UP) { 3093 if (!(ifp->if_flags & IFF_RUNNING)) 3094 error = iwn_init(ifp); 3095 } else { 3096 if (ifp->if_flags & IFF_RUNNING) 3097 iwn_stop(ifp, 1); 3098 } 3099 break; 3100 3101 case SIOCADDMULTI: 3102 case SIOCDELMULTI: 3103 ifr = (struct ifreq *)data; 3104 error = (cmd == SIOCADDMULTI) ? 3105 ether_addmulti(ifr, &ic->ic_ac) : 3106 ether_delmulti(ifr, &ic->ic_ac); 3107 3108 if (error == ENETRESET) 3109 error = 0; 3110 break; 3111 3112 case SIOCS80211POWER: 3113 error = ieee80211_ioctl(ifp, cmd, data); 3114 if (error != ENETRESET) 3115 break; 3116 if (ic->ic_state == IEEE80211_S_RUN && 3117 sc->calib.state == IWN_CALIB_STATE_RUN) { 3118 if (ic->ic_flags & IEEE80211_F_PMGTON) 3119 error = iwn_set_pslevel(sc, 0, 3, 0); 3120 else /* back to CAM */ 3121 error = iwn_set_pslevel(sc, 0, 0, 0); 3122 } else { 3123 /* Defer until transition to IWN_CALIB_STATE_RUN. */ 3124 error = 0; 3125 } 3126 break; 3127 3128 default: 3129 error = ieee80211_ioctl(ifp, cmd, data); 3130 } 3131 3132 if (error == ENETRESET) { 3133 error = 0; 3134 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 3135 (IFF_UP | IFF_RUNNING)) { 3136 iwn_stop(ifp, 0); 3137 error = iwn_init(ifp); 3138 } 3139 } 3140 3141 sc->sc_flags &= ~IWN_FLAG_BUSY; 3142 wakeup(&sc->sc_flags); 3143 splx(s); 3144 return error; 3145 } 3146 3147 /* 3148 * Send a command to the firmware. 3149 */ 3150 int 3151 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async) 3152 { 3153 struct iwn_tx_ring *ring = &sc->txq[4]; 3154 struct iwn_tx_desc *desc; 3155 struct iwn_tx_data *data; 3156 struct iwn_tx_cmd *cmd; 3157 struct mbuf *m; 3158 bus_addr_t paddr; 3159 int totlen, error; 3160 3161 desc = &ring->desc[ring->cur]; 3162 data = &ring->data[ring->cur]; 3163 totlen = 4 + size; 3164 3165 if (size > sizeof cmd->data) { 3166 /* Command is too large to fit in a descriptor. */ 3167 if (totlen > MCLBYTES) 3168 return EINVAL; 3169 MGETHDR(m, M_DONTWAIT, MT_DATA); 3170 if (m == NULL) 3171 return ENOMEM; 3172 if (totlen > MHLEN) { 3173 MCLGET(m, M_DONTWAIT); 3174 if (!(m->m_flags & M_EXT)) { 3175 m_freem(m); 3176 return ENOMEM; 3177 } 3178 } 3179 cmd = mtod(m, struct iwn_tx_cmd *); 3180 error = bus_dmamap_load(sc->sc_dmat, data->map, cmd, totlen, 3181 NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE); 3182 if (error != 0) { 3183 m_freem(m); 3184 return error; 3185 } 3186 data->m = m; 3187 paddr = data->map->dm_segs[0].ds_addr; 3188 } else { 3189 cmd = &ring->cmd[ring->cur]; 3190 paddr = data->cmd_paddr; 3191 } 3192 3193 cmd->code = code; 3194 cmd->flags = 0; 3195 cmd->qid = ring->qid; 3196 cmd->idx = ring->cur; 3197 memcpy(cmd->data, buf, size); 3198 3199 desc->nsegs = 1; 3200 desc->segs[0].addr = htole32(IWN_LOADDR(paddr)); 3201 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4); 3202 3203 if (size > sizeof cmd->data) { 3204 bus_dmamap_sync(sc->sc_dmat, data->map, 0, totlen, 3205 BUS_DMASYNC_PREWRITE); 3206 } else { 3207 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map, 3208 (caddr_t)cmd - ring->cmd_dma.vaddr, totlen, 3209 BUS_DMASYNC_PREWRITE); 3210 } 3211 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 3212 (caddr_t)desc - ring->desc_dma.vaddr, sizeof (*desc), 3213 BUS_DMASYNC_PREWRITE); 3214 3215 #ifdef notyet 3216 /* Update TX scheduler. */ 3217 ops->update_sched(sc, ring->qid, ring->cur, 0, 0); 3218 #endif 3219 3220 /* Kick command ring. */ 3221 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3222 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3223 3224 return async ? 0 : tsleep(desc, PCATCH, "iwncmd", hz); 3225 } 3226 3227 int 3228 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 3229 { 3230 struct iwn4965_node_info hnode; 3231 caddr_t src, dst; 3232 3233 /* 3234 * We use the node structure for 5000 Series internally (it is 3235 * a superset of the one for 4965AGN). We thus copy the common 3236 * fields before sending the command. 3237 */ 3238 src = (caddr_t)node; 3239 dst = (caddr_t)&hnode; 3240 memcpy(dst, src, 48); 3241 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */ 3242 memcpy(dst + 48, src + 72, 20); 3243 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async); 3244 } 3245 3246 int 3247 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 3248 { 3249 /* Direct mapping. */ 3250 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async); 3251 } 3252 3253 int 3254 iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni) 3255 { 3256 struct iwn_node *wn = (void *)ni; 3257 struct ieee80211_rateset *rs = &ni->ni_rates; 3258 struct iwn_cmd_link_quality linkq; 3259 const struct iwn_rate *rinfo; 3260 uint8_t txant; 3261 int i, txrate; 3262 3263 /* Use the first valid TX antenna. */ 3264 txant = IWN_LSB(sc->txchainmask); 3265 3266 memset(&linkq, 0, sizeof linkq); 3267 linkq.id = wn->id; 3268 linkq.antmsk_1stream = txant; 3269 linkq.antmsk_2stream = IWN_ANT_AB; 3270 linkq.ampdu_max = 31; 3271 linkq.ampdu_threshold = 3; 3272 linkq.ampdu_limit = htole16(4000); /* 4ms */ 3273 3274 /* Start at highest available bit-rate. */ 3275 txrate = rs->rs_nrates - 1; 3276 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) { 3277 rinfo = &iwn_rates[wn->ridx[txrate]]; 3278 linkq.retry[i].plcp = rinfo->plcp; 3279 linkq.retry[i].rflags = rinfo->flags; 3280 linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant); 3281 /* Next retry at immediate lower bit-rate. */ 3282 if (txrate > 0) 3283 txrate--; 3284 } 3285 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1); 3286 } 3287 3288 /* 3289 * Broadcast node is used to send group-addressed and management frames. 3290 */ 3291 int 3292 iwn_add_broadcast_node(struct iwn_softc *sc, int async) 3293 { 3294 struct iwn_ops *ops = &sc->ops; 3295 struct iwn_node_info node; 3296 struct iwn_cmd_link_quality linkq; 3297 const struct iwn_rate *rinfo; 3298 uint8_t txant; 3299 int i, error; 3300 3301 memset(&node, 0, sizeof node); 3302 IEEE80211_ADDR_COPY(node.macaddr, etherbroadcastaddr); 3303 node.id = sc->broadcast_id; 3304 DPRINTF(("adding broadcast node\n")); 3305 if ((error = ops->add_node(sc, &node, async)) != 0) 3306 return error; 3307 3308 /* Use the first valid TX antenna. */ 3309 txant = IWN_LSB(sc->txchainmask); 3310 3311 memset(&linkq, 0, sizeof linkq); 3312 linkq.id = sc->broadcast_id; 3313 linkq.antmsk_1stream = txant; 3314 linkq.antmsk_2stream = IWN_ANT_AB; 3315 linkq.ampdu_max = 64; 3316 linkq.ampdu_threshold = 3; 3317 linkq.ampdu_limit = htole16(4000); /* 4ms */ 3318 3319 /* Use lowest mandatory bit-rate. */ 3320 rinfo = (sc->sc_ic.ic_curmode != IEEE80211_MODE_11A) ? 3321 &iwn_rates[IWN_RIDX_CCK1] : &iwn_rates[IWN_RIDX_OFDM6]; 3322 linkq.retry[0].plcp = rinfo->plcp; 3323 linkq.retry[0].rflags = rinfo->flags; 3324 linkq.retry[0].rflags |= IWN_RFLAG_ANT(txant); 3325 /* Use same bit-rate for all TX retries. */ 3326 for (i = 1; i < IWN_MAX_TX_RETRIES; i++) { 3327 linkq.retry[i].plcp = linkq.retry[0].plcp; 3328 linkq.retry[i].rflags = linkq.retry[0].rflags; 3329 } 3330 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async); 3331 } 3332 3333 void 3334 iwn_updateedca(struct ieee80211com *ic) 3335 { 3336 #define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 3337 struct iwn_softc *sc = ic->ic_softc; 3338 struct iwn_edca_params cmd; 3339 int aci; 3340 3341 memset(&cmd, 0, sizeof cmd); 3342 cmd.flags = htole32(IWN_EDCA_UPDATE); 3343 for (aci = 0; aci < EDCA_NUM_AC; aci++) { 3344 const struct ieee80211_edca_ac_params *ac = 3345 &ic->ic_edca_ac[aci]; 3346 cmd.ac[aci].aifsn = ac->ac_aifsn; 3347 cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->ac_ecwmin)); 3348 cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->ac_ecwmax)); 3349 cmd.ac[aci].txoplimit = 3350 htole16(IEEE80211_TXOP_TO_US(ac->ac_txoplimit)); 3351 } 3352 (void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 3353 #undef IWN_EXP2 3354 } 3355 3356 void 3357 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on) 3358 { 3359 struct iwn_cmd_led led; 3360 3361 /* Clear microcode LED ownership. */ 3362 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL); 3363 3364 led.which = which; 3365 led.unit = htole32(10000); /* on/off in unit of 100ms */ 3366 led.off = off; 3367 led.on = on; 3368 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1); 3369 } 3370 3371 /* 3372 * Set the critical temperature at which the firmware will stop the radio 3373 * and notify us. 3374 */ 3375 int 3376 iwn_set_critical_temp(struct iwn_softc *sc) 3377 { 3378 struct iwn_critical_temp crit; 3379 int32_t temp; 3380 3381 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF); 3382 3383 if (sc->hw_type == IWN_HW_REV_TYPE_5150) 3384 temp = (IWN_CTOK(110) - sc->temp_off) * -5; 3385 else if (sc->hw_type == IWN_HW_REV_TYPE_4965) 3386 temp = IWN_CTOK(110); 3387 else 3388 temp = 110; 3389 memset(&crit, 0, sizeof crit); 3390 crit.tempR = htole32(temp); 3391 DPRINTF(("setting critical temperature to %d\n", temp)); 3392 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0); 3393 } 3394 3395 int 3396 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni) 3397 { 3398 struct iwn_cmd_timing cmd; 3399 uint64_t val, mod; 3400 3401 memset(&cmd, 0, sizeof cmd); 3402 memcpy(&cmd.tstamp, ni->ni_tstamp, sizeof (uint64_t)); 3403 cmd.bintval = htole16(ni->ni_intval); 3404 cmd.lintval = htole16(10); 3405 3406 /* Compute remaining time until next beacon. */ 3407 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 3408 mod = letoh64(cmd.tstamp) % val; 3409 cmd.binitval = htole32((uint32_t)(val - mod)); 3410 3411 DPRINTF(("timing bintval=%u, tstamp=%llu, init=%u\n", 3412 ni->ni_intval, letoh64(cmd.tstamp), (uint32_t)(val - mod))); 3413 3414 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1); 3415 } 3416 3417 void 3418 iwn4965_power_calibration(struct iwn_softc *sc, int temp) 3419 { 3420 /* Adjust TX power if need be (delta >= 3 degC). */ 3421 DPRINTF(("temperature %d->%d\n", sc->temp, temp)); 3422 if (abs(temp - sc->temp) >= 3) { 3423 /* Record temperature of last calibration. */ 3424 sc->temp = temp; 3425 (void)iwn4965_set_txpower(sc, 1); 3426 } 3427 } 3428 3429 /* 3430 * Set TX power for current channel (each rate has its own power settings). 3431 * This function takes into account the regulatory information from EEPROM, 3432 * the current temperature and the current voltage. 3433 */ 3434 int 3435 iwn4965_set_txpower(struct iwn_softc *sc, int async) 3436 { 3437 /* Fixed-point arithmetic division using a n-bit fractional part. */ 3438 #define fdivround(a, b, n) \ 3439 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 3440 /* Linear interpolation. */ 3441 #define interpolate(x, x1, y1, x2, y2, n) \ 3442 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 3443 3444 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 }; 3445 struct ieee80211com *ic = &sc->sc_ic; 3446 struct iwn_ucode_info *uc = &sc->ucode_info; 3447 struct ieee80211_channel *ch; 3448 struct iwn4965_cmd_txpower cmd; 3449 struct iwn4965_eeprom_chan_samples *chans; 3450 const uint8_t *rf_gain, *dsp_gain; 3451 int32_t vdiff, tdiff; 3452 int i, c, grp, maxpwr; 3453 uint8_t chan; 3454 3455 /* Retrieve current channel from last RXON. */ 3456 chan = sc->rxon.chan; 3457 DPRINTF(("setting TX power for channel %d\n", chan)); 3458 ch = &ic->ic_channels[chan]; 3459 3460 memset(&cmd, 0, sizeof cmd); 3461 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1; 3462 cmd.chan = chan; 3463 3464 if (IEEE80211_IS_CHAN_5GHZ(ch)) { 3465 maxpwr = sc->maxpwr5GHz; 3466 rf_gain = iwn4965_rf_gain_5ghz; 3467 dsp_gain = iwn4965_dsp_gain_5ghz; 3468 } else { 3469 maxpwr = sc->maxpwr2GHz; 3470 rf_gain = iwn4965_rf_gain_2ghz; 3471 dsp_gain = iwn4965_dsp_gain_2ghz; 3472 } 3473 3474 /* Compute voltage compensation. */ 3475 vdiff = ((int32_t)letoh32(uc->volt) - sc->eeprom_voltage) / 7; 3476 if (vdiff > 0) 3477 vdiff *= 2; 3478 if (abs(vdiff) > 2) 3479 vdiff = 0; 3480 DPRINTF(("voltage compensation=%d (UCODE=%d, EEPROM=%d)\n", 3481 vdiff, letoh32(uc->volt), sc->eeprom_voltage)); 3482 3483 /* Get channel attenuation group. */ 3484 if (chan <= 20) /* 1-20 */ 3485 grp = 4; 3486 else if (chan <= 43) /* 34-43 */ 3487 grp = 0; 3488 else if (chan <= 70) /* 44-70 */ 3489 grp = 1; 3490 else if (chan <= 124) /* 71-124 */ 3491 grp = 2; 3492 else /* 125-200 */ 3493 grp = 3; 3494 DPRINTF(("chan %d, attenuation group=%d\n", chan, grp)); 3495 3496 /* Get channel sub-band. */ 3497 for (i = 0; i < IWN_NBANDS; i++) 3498 if (sc->bands[i].lo != 0 && 3499 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi) 3500 break; 3501 if (i == IWN_NBANDS) /* Can't happen in real-life. */ 3502 return EINVAL; 3503 chans = sc->bands[i].chans; 3504 DPRINTF(("chan %d sub-band=%d\n", chan, i)); 3505 3506 for (c = 0; c < 2; c++) { 3507 uint8_t power, gain, temp; 3508 int maxchpwr, pwr, ridx, idx; 3509 3510 power = interpolate(chan, 3511 chans[0].num, chans[0].samples[c][1].power, 3512 chans[1].num, chans[1].samples[c][1].power, 1); 3513 gain = interpolate(chan, 3514 chans[0].num, chans[0].samples[c][1].gain, 3515 chans[1].num, chans[1].samples[c][1].gain, 1); 3516 temp = interpolate(chan, 3517 chans[0].num, chans[0].samples[c][1].temp, 3518 chans[1].num, chans[1].samples[c][1].temp, 1); 3519 DPRINTF(("TX chain %d: power=%d gain=%d temp=%d\n", 3520 c, power, gain, temp)); 3521 3522 /* Compute temperature compensation. */ 3523 tdiff = ((sc->temp - temp) * 2) / tdiv[grp]; 3524 DPRINTF(("temperature compensation=%d (current=%d, " 3525 "EEPROM=%d)\n", tdiff, sc->temp, temp)); 3526 3527 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) { 3528 /* Convert dBm to half-dBm. */ 3529 maxchpwr = sc->maxpwr[chan] * 2; 3530 if ((ridx / 8) & 1) 3531 maxchpwr -= 6; /* MIMO 2T: -3dB */ 3532 3533 pwr = maxpwr; 3534 3535 /* Adjust TX power based on rate. */ 3536 if ((ridx % 8) == 5) 3537 pwr -= 15; /* OFDM48: -7.5dB */ 3538 else if ((ridx % 8) == 6) 3539 pwr -= 17; /* OFDM54: -8.5dB */ 3540 else if ((ridx % 8) == 7) 3541 pwr -= 20; /* OFDM60: -10dB */ 3542 else 3543 pwr -= 10; /* Others: -5dB */ 3544 3545 /* Do not exceed channel max TX power. */ 3546 if (pwr > maxchpwr) 3547 pwr = maxchpwr; 3548 3549 idx = gain - (pwr - power) - tdiff - vdiff; 3550 if ((ridx / 8) & 1) /* MIMO */ 3551 idx += (int32_t)letoh32(uc->atten[grp][c]); 3552 3553 if (cmd.band == 0) 3554 idx += 9; /* 5GHz */ 3555 if (ridx == IWN_RIDX_MAX) 3556 idx += 5; /* CCK */ 3557 3558 /* Make sure idx stays in a valid range. */ 3559 if (idx < 0) 3560 idx = 0; 3561 else if (idx > IWN4965_MAX_PWR_INDEX) 3562 idx = IWN4965_MAX_PWR_INDEX; 3563 3564 DPRINTF(("TX chain %d, rate idx %d: power=%d\n", 3565 c, ridx, idx)); 3566 cmd.power[ridx].rf_gain[c] = rf_gain[idx]; 3567 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx]; 3568 } 3569 } 3570 3571 DPRINTF(("setting TX power for chan %d\n", chan)); 3572 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async); 3573 3574 #undef interpolate 3575 #undef fdivround 3576 } 3577 3578 int 3579 iwn5000_set_txpower(struct iwn_softc *sc, int async) 3580 { 3581 struct iwn5000_cmd_txpower cmd; 3582 3583 /* 3584 * TX power calibration is handled automatically by the firmware 3585 * for 5000 Series. 3586 */ 3587 memset(&cmd, 0, sizeof cmd); 3588 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */ 3589 cmd.flags = IWN5000_TXPOWER_NO_CLOSED; 3590 cmd.srv_limit = IWN5000_TXPOWER_AUTO; 3591 DPRINTF(("setting TX power\n")); 3592 return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async); 3593 } 3594 3595 /* 3596 * Retrieve the maximum RSSI (in dBm) among receivers. 3597 */ 3598 int 3599 iwn4965_get_rssi(const struct iwn_rx_stat *stat) 3600 { 3601 struct iwn4965_rx_phystat *phy = (void *)stat->phybuf; 3602 uint8_t mask, agc; 3603 int rssi; 3604 3605 mask = (letoh16(phy->antenna) >> 4) & IWN_ANT_ABC; 3606 agc = (letoh16(phy->agc) >> 7) & 0x7f; 3607 3608 rssi = 0; 3609 if (mask & IWN_ANT_A) 3610 rssi = MAX(rssi, phy->rssi[0]); 3611 if (mask & IWN_ANT_B) 3612 rssi = MAX(rssi, phy->rssi[2]); 3613 if (mask & IWN_ANT_C) 3614 rssi = MAX(rssi, phy->rssi[4]); 3615 3616 return rssi - agc - IWN_RSSI_TO_DBM; 3617 } 3618 3619 int 3620 iwn5000_get_rssi(const struct iwn_rx_stat *stat) 3621 { 3622 struct iwn5000_rx_phystat *phy = (void *)stat->phybuf; 3623 uint8_t agc; 3624 int rssi; 3625 3626 agc = (letoh32(phy->agc) >> 9) & 0x7f; 3627 3628 rssi = MAX(letoh16(phy->rssi[0]) & 0xff, 3629 letoh16(phy->rssi[1]) & 0xff); 3630 rssi = MAX(letoh16(phy->rssi[2]) & 0xff, rssi); 3631 3632 return rssi - agc - IWN_RSSI_TO_DBM; 3633 } 3634 3635 /* 3636 * Retrieve the average noise (in dBm) among receivers. 3637 */ 3638 int 3639 iwn_get_noise(const struct iwn_rx_general_stats *stats) 3640 { 3641 int i, total, nbant, noise; 3642 3643 total = nbant = 0; 3644 for (i = 0; i < 3; i++) { 3645 if ((noise = letoh32(stats->noise[i]) & 0xff) == 0) 3646 continue; 3647 total += noise; 3648 nbant++; 3649 } 3650 /* There should be at least one antenna but check anyway. */ 3651 return (nbant == 0) ? -127 : (total / nbant) - 107; 3652 } 3653 3654 /* 3655 * Compute temperature (in degC) from last received statistics. 3656 */ 3657 int 3658 iwn4965_get_temperature(struct iwn_softc *sc) 3659 { 3660 struct iwn_ucode_info *uc = &sc->ucode_info; 3661 int32_t r1, r2, r3, r4, temp; 3662 3663 r1 = letoh32(uc->temp[0].chan20MHz); 3664 r2 = letoh32(uc->temp[1].chan20MHz); 3665 r3 = letoh32(uc->temp[2].chan20MHz); 3666 r4 = letoh32(sc->rawtemp); 3667 3668 if (r1 == r3) /* Prevents division by 0 (should not happen). */ 3669 return 0; 3670 3671 /* Sign-extend 23-bit R4 value to 32-bit. */ 3672 r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000; 3673 /* Compute temperature in Kelvin. */ 3674 temp = (259 * (r4 - r2)) / (r3 - r1); 3675 temp = (temp * 97) / 100 + 8; 3676 3677 DPRINTF(("temperature %dK/%dC\n", temp, IWN_KTOC(temp))); 3678 return IWN_KTOC(temp); 3679 } 3680 3681 int 3682 iwn5000_get_temperature(struct iwn_softc *sc) 3683 { 3684 int32_t temp; 3685 3686 /* 3687 * Temperature is not used by the driver for 5000 Series because 3688 * TX power calibration is handled by firmware. 3689 */ 3690 temp = letoh32(sc->rawtemp); 3691 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 3692 temp = (temp / -5) + sc->temp_off; 3693 temp = IWN_KTOC(temp); 3694 } 3695 return temp; 3696 } 3697 3698 /* 3699 * Initialize sensitivity calibration state machine. 3700 */ 3701 int 3702 iwn_init_sensitivity(struct iwn_softc *sc) 3703 { 3704 struct iwn_ops *ops = &sc->ops; 3705 struct iwn_calib_state *calib = &sc->calib; 3706 uint32_t flags; 3707 int error; 3708 3709 /* Reset calibration state machine. */ 3710 memset(calib, 0, sizeof (*calib)); 3711 calib->state = IWN_CALIB_STATE_INIT; 3712 calib->cck_state = IWN_CCK_STATE_HIFA; 3713 /* Set initial correlation values. */ 3714 calib->ofdm_x1 = sc->limits->min_ofdm_x1; 3715 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1; 3716 calib->ofdm_x4 = sc->limits->min_ofdm_x4; 3717 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4; 3718 calib->cck_x4 = 125; 3719 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4; 3720 calib->energy_cck = sc->limits->energy_cck; 3721 3722 /* Write initial sensitivity. */ 3723 if ((error = iwn_send_sensitivity(sc)) != 0) 3724 return error; 3725 3726 /* Write initial gains. */ 3727 if ((error = ops->init_gains(sc)) != 0) 3728 return error; 3729 3730 /* Request statistics at each beacon interval. */ 3731 flags = 0; 3732 DPRINTF(("sending request for statistics\n")); 3733 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1); 3734 } 3735 3736 /* 3737 * Collect noise and RSSI statistics for the first 20 beacons received 3738 * after association and use them to determine connected antennas and 3739 * to set differential gains. 3740 */ 3741 void 3742 iwn_collect_noise(struct iwn_softc *sc, 3743 const struct iwn_rx_general_stats *stats) 3744 { 3745 struct iwn_ops *ops = &sc->ops; 3746 struct iwn_calib_state *calib = &sc->calib; 3747 uint32_t val; 3748 int i; 3749 3750 /* Accumulate RSSI and noise for all 3 antennas. */ 3751 for (i = 0; i < 3; i++) { 3752 calib->rssi[i] += letoh32(stats->rssi[i]) & 0xff; 3753 calib->noise[i] += letoh32(stats->noise[i]) & 0xff; 3754 } 3755 /* NB: We update differential gains only once after 20 beacons. */ 3756 if (++calib->nbeacons < 20) 3757 return; 3758 3759 /* Determine highest average RSSI. */ 3760 val = MAX(calib->rssi[0], calib->rssi[1]); 3761 val = MAX(calib->rssi[2], val); 3762 3763 /* Determine which antennas are connected. */ 3764 sc->chainmask = sc->rxchainmask; 3765 for (i = 0; i < 3; i++) 3766 if (val - calib->rssi[i] > 15 * 20) 3767 sc->chainmask &= ~(1 << i); 3768 DPRINTF(("RX chains mask: theoretical=0x%x, actual=0x%x\n", 3769 sc->rxchainmask, sc->chainmask)); 3770 3771 /* If none of the TX antennas are connected, keep at least one. */ 3772 if ((sc->chainmask & sc->txchainmask) == 0) 3773 sc->chainmask |= IWN_LSB(sc->txchainmask); 3774 3775 (void)ops->set_gains(sc); 3776 calib->state = IWN_CALIB_STATE_RUN; 3777 3778 #ifdef notyet 3779 /* XXX Disable RX chains with no antennas connected. */ 3780 sc->rxon.rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask)); 3781 (void)iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1); 3782 #endif 3783 3784 /* Enable power-saving mode if requested by user. */ 3785 if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON) 3786 (void)iwn_set_pslevel(sc, 0, 3, 1); 3787 } 3788 3789 int 3790 iwn4965_init_gains(struct iwn_softc *sc) 3791 { 3792 struct iwn_phy_calib_gain cmd; 3793 3794 memset(&cmd, 0, sizeof cmd); 3795 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 3796 /* Differential gains initially set to 0 for all 3 antennas. */ 3797 DPRINTF(("setting initial differential gains\n")); 3798 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 3799 } 3800 3801 int 3802 iwn5000_init_gains(struct iwn_softc *sc) 3803 { 3804 struct iwn_phy_calib cmd; 3805 3806 memset(&cmd, 0, sizeof cmd); 3807 cmd.code = IWN5000_PHY_CALIB_RESET_NOISE_GAIN; 3808 cmd.ngroups = 1; 3809 cmd.isvalid = 1; 3810 DPRINTF(("setting initial differential gains\n")); 3811 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 3812 } 3813 3814 int 3815 iwn4965_set_gains(struct iwn_softc *sc) 3816 { 3817 struct iwn_calib_state *calib = &sc->calib; 3818 struct iwn_phy_calib_gain cmd; 3819 int i, delta, noise; 3820 3821 /* Get minimal noise among connected antennas. */ 3822 noise = INT_MAX; /* NB: There's at least one antenna. */ 3823 for (i = 0; i < 3; i++) 3824 if (sc->chainmask & (1 << i)) 3825 noise = MIN(calib->noise[i], noise); 3826 3827 memset(&cmd, 0, sizeof cmd); 3828 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 3829 /* Set differential gains for connected antennas. */ 3830 for (i = 0; i < 3; i++) { 3831 if (sc->chainmask & (1 << i)) { 3832 /* Compute attenuation (in unit of 1.5dB). */ 3833 delta = (noise - (int32_t)calib->noise[i]) / 30; 3834 /* NB: delta <= 0 */ 3835 /* Limit to [-4.5dB,0]. */ 3836 cmd.gain[i] = MIN(abs(delta), 3); 3837 if (delta < 0) 3838 cmd.gain[i] |= 1 << 2; /* sign bit */ 3839 } 3840 } 3841 DPRINTF(("setting differential gains Ant A/B/C: %x/%x/%x (%x)\n", 3842 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask)); 3843 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 3844 } 3845 3846 int 3847 iwn5000_set_gains(struct iwn_softc *sc) 3848 { 3849 struct iwn_calib_state *calib = &sc->calib; 3850 struct iwn_phy_calib_gain cmd; 3851 int i, ant, div, delta; 3852 3853 /* We collected 20 beacons and !=6050 need a 1.5 factor. */ 3854 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30; 3855 3856 memset(&cmd, 0, sizeof cmd); 3857 cmd.code = IWN5000_PHY_CALIB_NOISE_GAIN; 3858 cmd.ngroups = 1; 3859 cmd.isvalid = 1; 3860 /* Get first available RX antenna as referential. */ 3861 ant = IWN_LSB(sc->rxchainmask); 3862 /* Set differential gains for other antennas. */ 3863 for (i = ant + 1; i < 3; i++) { 3864 if (sc->chainmask & (1 << i)) { 3865 /* The delta is relative to antenna "ant". */ 3866 delta = ((int32_t)calib->noise[ant] - 3867 (int32_t)calib->noise[i]) / div; 3868 /* Limit to [-4.5dB,+4.5dB]. */ 3869 cmd.gain[i - 1] = MIN(abs(delta), 3); 3870 if (delta < 0) 3871 cmd.gain[i - 1] |= 1 << 2; /* sign bit */ 3872 } 3873 } 3874 DPRINTF(("setting differential gains: %x/%x (%x)\n", 3875 cmd.gain[0], cmd.gain[1], sc->chainmask)); 3876 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 3877 } 3878 3879 /* 3880 * Tune RF RX sensitivity based on the number of false alarms detected 3881 * during the last beacon period. 3882 */ 3883 void 3884 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats) 3885 { 3886 #define inc(val, inc, max) \ 3887 if ((val) < (max)) { \ 3888 if ((val) < (max) - (inc)) \ 3889 (val) += (inc); \ 3890 else \ 3891 (val) = (max); \ 3892 needs_update = 1; \ 3893 } 3894 #define dec(val, dec, min) \ 3895 if ((val) > (min)) { \ 3896 if ((val) > (min) + (dec)) \ 3897 (val) -= (dec); \ 3898 else \ 3899 (val) = (min); \ 3900 needs_update = 1; \ 3901 } 3902 3903 const struct iwn_sensitivity_limits *limits = sc->limits; 3904 struct iwn_calib_state *calib = &sc->calib; 3905 uint32_t val, rxena, fa; 3906 uint32_t energy[3], energy_min; 3907 uint8_t noise[3], noise_ref; 3908 int i, needs_update = 0; 3909 3910 /* Check that we've been enabled long enough. */ 3911 if ((rxena = letoh32(stats->general.load)) == 0) 3912 return; 3913 3914 /* Compute number of false alarms since last call for OFDM. */ 3915 fa = letoh32(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm; 3916 fa += letoh32(stats->ofdm.fa) - calib->fa_ofdm; 3917 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ 3918 3919 /* Save counters values for next call. */ 3920 calib->bad_plcp_ofdm = letoh32(stats->ofdm.bad_plcp); 3921 calib->fa_ofdm = letoh32(stats->ofdm.fa); 3922 3923 if (fa > 50 * rxena) { 3924 /* High false alarm count, decrease sensitivity. */ 3925 DPRINTFN(2, ("OFDM high false alarm count: %u\n", fa)); 3926 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1); 3927 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1); 3928 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4); 3929 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4); 3930 3931 } else if (fa < 5 * rxena) { 3932 /* Low false alarm count, increase sensitivity. */ 3933 DPRINTFN(2, ("OFDM low false alarm count: %u\n", fa)); 3934 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1); 3935 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1); 3936 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4); 3937 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4); 3938 } 3939 3940 /* Compute maximum noise among 3 receivers. */ 3941 for (i = 0; i < 3; i++) 3942 noise[i] = (letoh32(stats->general.noise[i]) >> 8) & 0xff; 3943 val = MAX(noise[0], noise[1]); 3944 val = MAX(noise[2], val); 3945 /* Insert it into our samples table. */ 3946 calib->noise_samples[calib->cur_noise_sample] = val; 3947 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20; 3948 3949 /* Compute maximum noise among last 20 samples. */ 3950 noise_ref = calib->noise_samples[0]; 3951 for (i = 1; i < 20; i++) 3952 noise_ref = MAX(noise_ref, calib->noise_samples[i]); 3953 3954 /* Compute maximum energy among 3 receivers. */ 3955 for (i = 0; i < 3; i++) 3956 energy[i] = letoh32(stats->general.energy[i]); 3957 val = MIN(energy[0], energy[1]); 3958 val = MIN(energy[2], val); 3959 /* Insert it into our samples table. */ 3960 calib->energy_samples[calib->cur_energy_sample] = val; 3961 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10; 3962 3963 /* Compute minimum energy among last 10 samples. */ 3964 energy_min = calib->energy_samples[0]; 3965 for (i = 1; i < 10; i++) 3966 energy_min = MAX(energy_min, calib->energy_samples[i]); 3967 energy_min += 6; 3968 3969 /* Compute number of false alarms since last call for CCK. */ 3970 fa = letoh32(stats->cck.bad_plcp) - calib->bad_plcp_cck; 3971 fa += letoh32(stats->cck.fa) - calib->fa_cck; 3972 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ 3973 3974 /* Save counters values for next call. */ 3975 calib->bad_plcp_cck = letoh32(stats->cck.bad_plcp); 3976 calib->fa_cck = letoh32(stats->cck.fa); 3977 3978 if (fa > 50 * rxena) { 3979 /* High false alarm count, decrease sensitivity. */ 3980 DPRINTFN(2, ("CCK high false alarm count: %u\n", fa)); 3981 calib->cck_state = IWN_CCK_STATE_HIFA; 3982 calib->low_fa = 0; 3983 3984 if (calib->cck_x4 > 160) { 3985 calib->noise_ref = noise_ref; 3986 if (calib->energy_cck > 2) 3987 dec(calib->energy_cck, 2, energy_min); 3988 } 3989 if (calib->cck_x4 < 160) { 3990 calib->cck_x4 = 161; 3991 needs_update = 1; 3992 } else 3993 inc(calib->cck_x4, 3, limits->max_cck_x4); 3994 3995 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4); 3996 3997 } else if (fa < 5 * rxena) { 3998 /* Low false alarm count, increase sensitivity. */ 3999 DPRINTFN(2, ("CCK low false alarm count: %u\n", fa)); 4000 calib->cck_state = IWN_CCK_STATE_LOFA; 4001 calib->low_fa++; 4002 4003 if (calib->cck_state != IWN_CCK_STATE_INIT && 4004 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 || 4005 calib->low_fa > 100)) { 4006 inc(calib->energy_cck, 2, limits->min_energy_cck); 4007 dec(calib->cck_x4, 3, limits->min_cck_x4); 4008 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4); 4009 } 4010 } else { 4011 /* Not worth to increase or decrease sensitivity. */ 4012 DPRINTFN(2, ("CCK normal false alarm count: %u\n", fa)); 4013 calib->low_fa = 0; 4014 calib->noise_ref = noise_ref; 4015 4016 if (calib->cck_state == IWN_CCK_STATE_HIFA) { 4017 /* Previous interval had many false alarms. */ 4018 dec(calib->energy_cck, 8, energy_min); 4019 } 4020 calib->cck_state = IWN_CCK_STATE_INIT; 4021 } 4022 4023 if (needs_update) 4024 (void)iwn_send_sensitivity(sc); 4025 #undef dec 4026 #undef inc 4027 } 4028 4029 int 4030 iwn_send_sensitivity(struct iwn_softc *sc) 4031 { 4032 struct iwn_calib_state *calib = &sc->calib; 4033 struct iwn_enhanced_sensitivity_cmd cmd; 4034 int len; 4035 4036 memset(&cmd, 0, sizeof cmd); 4037 len = sizeof (struct iwn_sensitivity_cmd); 4038 cmd.which = IWN_SENSITIVITY_WORKTBL; 4039 /* OFDM modulation. */ 4040 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1); 4041 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1); 4042 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4); 4043 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4); 4044 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm); 4045 cmd.energy_ofdm_th = htole16(62); 4046 /* CCK modulation. */ 4047 cmd.corr_cck_x4 = htole16(calib->cck_x4); 4048 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4); 4049 cmd.energy_cck = htole16(calib->energy_cck); 4050 /* Barker modulation: use default values. */ 4051 cmd.corr_barker = htole16(190); 4052 cmd.corr_barker_mrc = htole16(390); 4053 if (!(sc->sc_flags & IWN_FLAG_ENH_SENS)) 4054 goto send; 4055 /* Enhanced sensitivity settings. */ 4056 len = sizeof (struct iwn_enhanced_sensitivity_cmd); 4057 cmd.ofdm_det_slope_mrc = htole16(668); 4058 cmd.ofdm_det_icept_mrc = htole16(4); 4059 cmd.ofdm_det_slope = htole16(486); 4060 cmd.ofdm_det_icept = htole16(37); 4061 cmd.cck_det_slope_mrc = htole16(853); 4062 cmd.cck_det_icept_mrc = htole16(4); 4063 cmd.cck_det_slope = htole16(476); 4064 cmd.cck_det_icept = htole16(99); 4065 send: 4066 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1); 4067 } 4068 4069 /* 4070 * Set STA mode power saving level (between 0 and 5). 4071 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 4072 */ 4073 int 4074 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async) 4075 { 4076 struct iwn_pmgt_cmd cmd; 4077 const struct iwn_pmgt *pmgt; 4078 uint32_t max, skip_dtim; 4079 pcireg_t reg; 4080 int i; 4081 4082 /* Select which PS parameters to use. */ 4083 if (dtim <= 2) 4084 pmgt = &iwn_pmgt[0][level]; 4085 else if (dtim <= 10) 4086 pmgt = &iwn_pmgt[1][level]; 4087 else 4088 pmgt = &iwn_pmgt[2][level]; 4089 4090 memset(&cmd, 0, sizeof cmd); 4091 if (level != 0) /* not CAM */ 4092 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP); 4093 if (level == 5) 4094 cmd.flags |= htole16(IWN_PS_FAST_PD); 4095 /* Retrieve PCIe Active State Power Management (ASPM). */ 4096 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 4097 sc->sc_cap_off + PCI_PCIE_LCSR); 4098 if (!(reg & PCI_PCIE_LCSR_ASPM_L0S)) /* L0s Entry disabled. */ 4099 cmd.flags |= htole16(IWN_PS_PCI_PMGT); 4100 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024); 4101 cmd.txtimeout = htole32(pmgt->txtimeout * 1024); 4102 4103 if (dtim == 0) { 4104 dtim = 1; 4105 skip_dtim = 0; 4106 } else 4107 skip_dtim = pmgt->skip_dtim; 4108 if (skip_dtim != 0) { 4109 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM); 4110 max = pmgt->intval[4]; 4111 if (max == (uint32_t)-1) 4112 max = dtim * (skip_dtim + 1); 4113 else if (max > dtim) 4114 max = (max / dtim) * dtim; 4115 } else 4116 max = dtim; 4117 for (i = 0; i < 5; i++) 4118 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 4119 4120 DPRINTF(("setting power saving level to %d\n", level)); 4121 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 4122 } 4123 4124 int 4125 iwn_send_btcoex(struct iwn_softc *sc) 4126 { 4127 struct iwn_bluetooth cmd; 4128 4129 memset(&cmd, 0, sizeof cmd); 4130 cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO; 4131 cmd.lead_time = IWN_BT_LEAD_TIME_DEF; 4132 cmd.max_kill = IWN_BT_MAX_KILL_DEF; 4133 DPRINTF(("configuring bluetooth coexistence\n")); 4134 return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 4135 } 4136 4137 int 4138 iwn5000_runtime_calib(struct iwn_softc *sc) 4139 { 4140 struct iwn5000_calib_config cmd; 4141 4142 memset(&cmd, 0, sizeof cmd); 4143 cmd.ucode.once.enable = 0xffffffff; 4144 cmd.ucode.once.start = IWN5000_CALIB_DC; 4145 DPRINTF(("configuring runtime calibration\n")); 4146 return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0); 4147 } 4148 4149 int 4150 iwn_config(struct iwn_softc *sc) 4151 { 4152 struct iwn_ops *ops = &sc->ops; 4153 struct ieee80211com *ic = &sc->sc_ic; 4154 struct ifnet *ifp = &ic->ic_if; 4155 uint32_t txmask; 4156 uint16_t rxchain; 4157 int error; 4158 4159 if (sc->hw_type == IWN_HW_REV_TYPE_6005) { 4160 /* Set radio temperature sensor offset. */ 4161 error = iwn5000_temp_offset_calib(sc); 4162 if (error != 0) { 4163 printf("%s: could not set temperature offset\n", 4164 sc->sc_dev.dv_xname); 4165 return error; 4166 } 4167 } 4168 4169 if (sc->hw_type == IWN_HW_REV_TYPE_6050 || 4170 sc->hw_type == IWN_HW_REV_TYPE_6005) { 4171 /* Configure runtime DC calibration. */ 4172 error = iwn5000_runtime_calib(sc); 4173 if (error != 0) { 4174 printf("%s: could not configure runtime calibration\n", 4175 sc->sc_dev.dv_xname); 4176 return error; 4177 } 4178 } 4179 4180 /* Configure valid TX chains for >=5000 Series. */ 4181 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4182 txmask = htole32(sc->txchainmask); 4183 DPRINTF(("configuring valid TX chains 0x%x\n", txmask)); 4184 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask, 4185 sizeof txmask, 0); 4186 if (error != 0) { 4187 printf("%s: could not configure valid TX chains\n", 4188 sc->sc_dev.dv_xname); 4189 return error; 4190 } 4191 } 4192 4193 /* Configure bluetooth coexistence. */ 4194 error = iwn_send_btcoex(sc); 4195 if (error != 0) { 4196 printf("%s: could not configure bluetooth coexistence\n", 4197 sc->sc_dev.dv_xname); 4198 return error; 4199 } 4200 4201 /* Set mode, channel, RX filter and enable RX. */ 4202 memset(&sc->rxon, 0, sizeof (struct iwn_rxon)); 4203 IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl)); 4204 IEEE80211_ADDR_COPY(sc->rxon.myaddr, ic->ic_myaddr); 4205 IEEE80211_ADDR_COPY(sc->rxon.wlap, ic->ic_myaddr); 4206 sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_ibss_chan); 4207 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 4208 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_ibss_chan)) 4209 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 4210 switch (ic->ic_opmode) { 4211 case IEEE80211_M_STA: 4212 sc->rxon.mode = IWN_MODE_STA; 4213 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST); 4214 break; 4215 case IEEE80211_M_MONITOR: 4216 sc->rxon.mode = IWN_MODE_MONITOR; 4217 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST | 4218 IWN_FILTER_CTL | IWN_FILTER_PROMISC); 4219 break; 4220 default: 4221 /* Should not get there. */ 4222 break; 4223 } 4224 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ 4225 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ 4226 sc->rxon.ht_single_mask = 0xff; 4227 sc->rxon.ht_dual_mask = 0xff; 4228 sc->rxon.ht_triple_mask = 0xff; 4229 rxchain = 4230 IWN_RXCHAIN_VALID(sc->rxchainmask) | 4231 IWN_RXCHAIN_MIMO_COUNT(2) | 4232 IWN_RXCHAIN_IDLE_COUNT(2); 4233 sc->rxon.rxchain = htole16(rxchain); 4234 DPRINTF(("setting configuration\n")); 4235 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 0); 4236 if (error != 0) { 4237 printf("%s: RXON command failed\n", sc->sc_dev.dv_xname); 4238 return error; 4239 } 4240 4241 if ((error = iwn_add_broadcast_node(sc, 0)) != 0) { 4242 printf("%s: could not add broadcast node\n", 4243 sc->sc_dev.dv_xname); 4244 return error; 4245 } 4246 4247 /* Configuration has changed, set TX power accordingly. */ 4248 if ((error = ops->set_txpower(sc, 0)) != 0) { 4249 printf("%s: could not set TX power\n", sc->sc_dev.dv_xname); 4250 return error; 4251 } 4252 4253 if ((error = iwn_set_critical_temp(sc)) != 0) { 4254 printf("%s: could not set critical temperature\n", 4255 sc->sc_dev.dv_xname); 4256 return error; 4257 } 4258 4259 /* Set power saving level to CAM during initialization. */ 4260 if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) { 4261 printf("%s: could not set power saving level\n", 4262 sc->sc_dev.dv_xname); 4263 return error; 4264 } 4265 return 0; 4266 } 4267 4268 int 4269 iwn_scan(struct iwn_softc *sc, uint16_t flags) 4270 { 4271 struct ieee80211com *ic = &sc->sc_ic; 4272 struct iwn_scan_hdr *hdr; 4273 struct iwn_cmd_data *tx; 4274 struct iwn_scan_essid *essid; 4275 struct iwn_scan_chan *chan; 4276 struct ieee80211_frame *wh; 4277 struct ieee80211_rateset *rs; 4278 struct ieee80211_channel *c; 4279 uint8_t *buf, *frm; 4280 uint16_t rxchain; 4281 uint8_t txant; 4282 int buflen, error; 4283 4284 buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 4285 if (buf == NULL) { 4286 printf("%s: could not allocate buffer for scan command\n", 4287 sc->sc_dev.dv_xname); 4288 return ENOMEM; 4289 } 4290 hdr = (struct iwn_scan_hdr *)buf; 4291 /* 4292 * Move to the next channel if no frames are received within 10ms 4293 * after sending the probe request. 4294 */ 4295 hdr->quiet_time = htole16(10); /* timeout in milliseconds */ 4296 hdr->quiet_threshold = htole16(1); /* min # of packets */ 4297 4298 /* Select antennas for scanning. */ 4299 rxchain = 4300 IWN_RXCHAIN_VALID(sc->rxchainmask) | 4301 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) | 4302 IWN_RXCHAIN_DRIVER_FORCE; 4303 if ((flags & IEEE80211_CHAN_5GHZ) && 4304 sc->hw_type == IWN_HW_REV_TYPE_4965) { 4305 /* Ant A must be avoided in 5GHz because of an HW bug. */ 4306 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_BC); 4307 } else /* Use all available RX antennas. */ 4308 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask); 4309 hdr->rxchain = htole16(rxchain); 4310 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON); 4311 4312 tx = (struct iwn_cmd_data *)(hdr + 1); 4313 tx->flags = htole32(IWN_TX_AUTO_SEQ); 4314 tx->id = sc->broadcast_id; 4315 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 4316 4317 if (flags & IEEE80211_CHAN_5GHZ) { 4318 hdr->crc_threshold = 0xffff; 4319 /* Send probe requests at 6Mbps. */ 4320 tx->plcp = iwn_rates[IWN_RIDX_OFDM6].plcp; 4321 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 4322 } else { 4323 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO); 4324 /* Send probe requests at 1Mbps. */ 4325 tx->plcp = iwn_rates[IWN_RIDX_CCK1].plcp; 4326 tx->rflags = IWN_RFLAG_CCK; 4327 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 4328 } 4329 /* Use the first valid TX antenna. */ 4330 txant = IWN_LSB(sc->txchainmask); 4331 tx->rflags |= IWN_RFLAG_ANT(txant); 4332 4333 essid = (struct iwn_scan_essid *)(tx + 1); 4334 if (ic->ic_des_esslen != 0) { 4335 essid[0].id = IEEE80211_ELEMID_SSID; 4336 essid[0].len = ic->ic_des_esslen; 4337 memcpy(essid[0].data, ic->ic_des_essid, ic->ic_des_esslen); 4338 } 4339 /* 4340 * Build a probe request frame. Most of the following code is a 4341 * copy & paste of what is done in net80211. 4342 */ 4343 wh = (struct ieee80211_frame *)(essid + 20); 4344 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 4345 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 4346 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 4347 IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr); 4348 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr); 4349 IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr); 4350 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */ 4351 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */ 4352 4353 frm = (uint8_t *)(wh + 1); 4354 frm = ieee80211_add_ssid(frm, NULL, 0); 4355 frm = ieee80211_add_rates(frm, rs); 4356 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 4357 frm = ieee80211_add_xrates(frm, rs); 4358 #ifndef IEEE80211_NO_HT 4359 if (ic->ic_flags & IEEE80211_F_HTON) 4360 frm = ieee80211_add_htcaps(frm, ic); 4361 #endif 4362 4363 /* Set length of probe request. */ 4364 tx->len = htole16(frm - (uint8_t *)wh); 4365 4366 chan = (struct iwn_scan_chan *)frm; 4367 for (c = &ic->ic_channels[1]; 4368 c <= &ic->ic_channels[IEEE80211_CHAN_MAX]; c++) { 4369 if ((c->ic_flags & flags) != flags) 4370 continue; 4371 4372 chan->chan = htole16(ieee80211_chan2ieee(ic, c)); 4373 DPRINTFN(2, ("adding channel %d\n", chan->chan)); 4374 chan->flags = 0; 4375 if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) 4376 chan->flags |= htole32(IWN_CHAN_ACTIVE); 4377 if (ic->ic_des_esslen != 0) 4378 chan->flags |= htole32(IWN_CHAN_NPBREQS(1)); 4379 chan->dsp_gain = 0x6e; 4380 if (IEEE80211_IS_CHAN_5GHZ(c)) { 4381 chan->rf_gain = 0x3b; 4382 chan->active = htole16(24); 4383 chan->passive = htole16(110); 4384 } else { 4385 chan->rf_gain = 0x28; 4386 chan->active = htole16(36); 4387 chan->passive = htole16(120); 4388 } 4389 hdr->nchan++; 4390 chan++; 4391 } 4392 4393 buflen = (uint8_t *)chan - buf; 4394 hdr->len = htole16(buflen); 4395 4396 DPRINTF(("sending scan command nchan=%d\n", hdr->nchan)); 4397 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1); 4398 free(buf, M_DEVBUF); 4399 return error; 4400 } 4401 4402 int 4403 iwn_auth(struct iwn_softc *sc) 4404 { 4405 struct iwn_ops *ops = &sc->ops; 4406 struct ieee80211com *ic = &sc->sc_ic; 4407 struct ieee80211_node *ni = ic->ic_bss; 4408 int error; 4409 4410 /* Update adapter configuration. */ 4411 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4412 sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan); 4413 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 4414 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 4415 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 4416 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4417 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT); 4418 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4419 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE); 4420 switch (ic->ic_curmode) { 4421 case IEEE80211_MODE_11A: 4422 sc->rxon.cck_mask = 0; 4423 sc->rxon.ofdm_mask = 0x15; 4424 break; 4425 case IEEE80211_MODE_11B: 4426 sc->rxon.cck_mask = 0x03; 4427 sc->rxon.ofdm_mask = 0; 4428 break; 4429 default: /* Assume 802.11b/g. */ 4430 sc->rxon.cck_mask = 0x0f; 4431 sc->rxon.ofdm_mask = 0x15; 4432 } 4433 DPRINTF(("rxon chan %d flags %x cck %x ofdm %x\n", sc->rxon.chan, 4434 sc->rxon.flags, sc->rxon.cck_mask, sc->rxon.ofdm_mask)); 4435 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1); 4436 if (error != 0) { 4437 printf("%s: RXON command failed\n", sc->sc_dev.dv_xname); 4438 return error; 4439 } 4440 4441 /* Configuration has changed, set TX power accordingly. */ 4442 if ((error = ops->set_txpower(sc, 1)) != 0) { 4443 printf("%s: could not set TX power\n", sc->sc_dev.dv_xname); 4444 return error; 4445 } 4446 /* 4447 * Reconfiguring RXON clears the firmware nodes table so we must 4448 * add the broadcast node again. 4449 */ 4450 if ((error = iwn_add_broadcast_node(sc, 1)) != 0) { 4451 printf("%s: could not add broadcast node\n", 4452 sc->sc_dev.dv_xname); 4453 return error; 4454 } 4455 return 0; 4456 } 4457 4458 int 4459 iwn_run(struct iwn_softc *sc) 4460 { 4461 struct iwn_ops *ops = &sc->ops; 4462 struct ieee80211com *ic = &sc->sc_ic; 4463 struct ieee80211_node *ni = ic->ic_bss; 4464 struct iwn_node_info node; 4465 int error; 4466 4467 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 4468 /* Link LED blinks while monitoring. */ 4469 iwn_set_led(sc, IWN_LED_LINK, 5, 5); 4470 return 0; 4471 } 4472 if ((error = iwn_set_timing(sc, ni)) != 0) { 4473 printf("%s: could not set timing\n", sc->sc_dev.dv_xname); 4474 return error; 4475 } 4476 4477 /* Update adapter configuration. */ 4478 sc->rxon.associd = htole16(IEEE80211_AID(ni->ni_associd)); 4479 /* Short preamble and slot time are negotiated when associating. */ 4480 sc->rxon.flags &= ~htole32(IWN_RXON_SHPREAMBLE | IWN_RXON_SHSLOT); 4481 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4482 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT); 4483 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4484 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE); 4485 sc->rxon.filter |= htole32(IWN_FILTER_BSS); 4486 DPRINTF(("rxon chan %d flags %x\n", sc->rxon.chan, sc->rxon.flags)); 4487 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1); 4488 if (error != 0) { 4489 printf("%s: could not update configuration\n", 4490 sc->sc_dev.dv_xname); 4491 return error; 4492 } 4493 4494 /* Configuration has changed, set TX power accordingly. */ 4495 if ((error = ops->set_txpower(sc, 1)) != 0) { 4496 printf("%s: could not set TX power\n", sc->sc_dev.dv_xname); 4497 return error; 4498 } 4499 4500 /* Fake a join to initialize the TX rate. */ 4501 ((struct iwn_node *)ni)->id = IWN_ID_BSS; 4502 iwn_newassoc(ic, ni, 1); 4503 4504 /* Add BSS node. */ 4505 memset(&node, 0, sizeof node); 4506 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 4507 node.id = IWN_ID_BSS; 4508 #ifdef notyet 4509 node.htflags = htole32(IWN_AMDPU_SIZE_FACTOR(3) | 4510 IWN_AMDPU_DENSITY(5)); /* 2us */ 4511 #endif 4512 DPRINTF(("adding BSS node\n")); 4513 error = ops->add_node(sc, &node, 1); 4514 if (error != 0) { 4515 printf("%s: could not add BSS node\n", sc->sc_dev.dv_xname); 4516 return error; 4517 } 4518 DPRINTF(("setting link quality for node %d\n", node.id)); 4519 if ((error = iwn_set_link_quality(sc, ni)) != 0) { 4520 printf("%s: could not setup link quality for node %d\n", 4521 sc->sc_dev.dv_xname, node.id); 4522 return error; 4523 } 4524 4525 if ((error = iwn_init_sensitivity(sc)) != 0) { 4526 printf("%s: could not set sensitivity\n", 4527 sc->sc_dev.dv_xname); 4528 return error; 4529 } 4530 /* Start periodic calibration timer. */ 4531 sc->calib.state = IWN_CALIB_STATE_ASSOC; 4532 sc->calib_cnt = 0; 4533 timeout_add_msec(&sc->calib_to, 500); 4534 4535 /* Link LED always on while associated. */ 4536 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 4537 return 0; 4538 } 4539 4540 /* 4541 * We support CCMP hardware encryption/decryption of unicast frames only. 4542 * HW support for TKIP really sucks. We should let TKIP die anyway. 4543 */ 4544 int 4545 iwn_set_key(struct ieee80211com *ic, struct ieee80211_node *ni, 4546 struct ieee80211_key *k) 4547 { 4548 struct iwn_softc *sc = ic->ic_softc; 4549 struct iwn_ops *ops = &sc->ops; 4550 struct iwn_node *wn = (void *)ni; 4551 struct iwn_node_info node; 4552 uint16_t kflags; 4553 4554 if ((k->k_flags & IEEE80211_KEY_GROUP) || 4555 k->k_cipher != IEEE80211_CIPHER_CCMP) 4556 return ieee80211_set_key(ic, ni, k); 4557 4558 kflags = IWN_KFLAG_CCMP | IWN_KFLAG_MAP | IWN_KFLAG_KID(k->k_id); 4559 if (k->k_flags & IEEE80211_KEY_GROUP) 4560 kflags |= IWN_KFLAG_GROUP; 4561 4562 memset(&node, 0, sizeof node); 4563 node.id = (k->k_flags & IEEE80211_KEY_GROUP) ? 4564 sc->broadcast_id : wn->id; 4565 node.control = IWN_NODE_UPDATE; 4566 node.flags = IWN_FLAG_SET_KEY; 4567 node.kflags = htole16(kflags); 4568 node.kid = k->k_id; 4569 memcpy(node.key, k->k_key, k->k_len); 4570 DPRINTF(("set key id=%d for node %d\n", k->k_id, node.id)); 4571 return ops->add_node(sc, &node, 1); 4572 } 4573 4574 void 4575 iwn_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni, 4576 struct ieee80211_key *k) 4577 { 4578 struct iwn_softc *sc = ic->ic_softc; 4579 struct iwn_ops *ops = &sc->ops; 4580 struct iwn_node *wn = (void *)ni; 4581 struct iwn_node_info node; 4582 4583 if ((k->k_flags & IEEE80211_KEY_GROUP) || 4584 k->k_cipher != IEEE80211_CIPHER_CCMP) { 4585 /* See comment about other ciphers above. */ 4586 ieee80211_delete_key(ic, ni, k); 4587 return; 4588 } 4589 if (ic->ic_state != IEEE80211_S_RUN) 4590 return; /* Nothing to do. */ 4591 memset(&node, 0, sizeof node); 4592 node.id = (k->k_flags & IEEE80211_KEY_GROUP) ? 4593 sc->broadcast_id : wn->id; 4594 node.control = IWN_NODE_UPDATE; 4595 node.flags = IWN_FLAG_SET_KEY; 4596 node.kflags = htole16(IWN_KFLAG_INVALID); 4597 node.kid = 0xff; 4598 DPRINTF(("delete keys for node %d\n", node.id)); 4599 (void)ops->add_node(sc, &node, 1); 4600 } 4601 4602 #ifndef IEEE80211_NO_HT 4603 /* 4604 * This function is called by upper layer when an ADDBA request is received 4605 * from another STA and before the ADDBA response is sent. 4606 */ 4607 int 4608 iwn_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 4609 uint8_t tid) 4610 { 4611 struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid]; 4612 struct iwn_softc *sc = ic->ic_softc; 4613 struct iwn_ops *ops = &sc->ops; 4614 struct iwn_node *wn = (void *)ni; 4615 struct iwn_node_info node; 4616 4617 memset(&node, 0, sizeof node); 4618 node.id = wn->id; 4619 node.control = IWN_NODE_UPDATE; 4620 node.flags = IWN_FLAG_SET_ADDBA; 4621 node.addba_tid = tid; 4622 node.addba_ssn = htole16(ba->ba_winstart); 4623 DPRINTFN(2, ("ADDBA RA=%d TID=%d SSN=%d\n", wn->id, tid, 4624 ba->ba_winstart)); 4625 return ops->add_node(sc, &node, 1); 4626 } 4627 4628 /* 4629 * This function is called by upper layer on teardown of an HT-immediate 4630 * Block Ack agreement (eg. uppon receipt of a DELBA frame). 4631 */ 4632 void 4633 iwn_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni, 4634 uint8_t tid) 4635 { 4636 struct iwn_softc *sc = ic->ic_softc; 4637 struct iwn_ops *ops = &sc->ops; 4638 struct iwn_node *wn = (void *)ni; 4639 struct iwn_node_info node; 4640 4641 memset(&node, 0, sizeof node); 4642 node.id = wn->id; 4643 node.control = IWN_NODE_UPDATE; 4644 node.flags = IWN_FLAG_SET_DELBA; 4645 node.delba_tid = tid; 4646 DPRINTFN(2, ("DELBA RA=%d TID=%d\n", wn->id, tid)); 4647 (void)ops->add_node(sc, &node, 1); 4648 } 4649 4650 /* 4651 * This function is called by upper layer when an ADDBA response is received 4652 * from another STA. 4653 */ 4654 int 4655 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 4656 uint8_t tid) 4657 { 4658 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid]; 4659 struct iwn_softc *sc = ic->ic_softc; 4660 struct iwn_ops *ops = &sc->ops; 4661 struct iwn_node *wn = (void *)ni; 4662 struct iwn_node_info node; 4663 int error; 4664 4665 /* Enable TX for the specified RA/TID. */ 4666 wn->disable_tid &= ~(1 << tid); 4667 memset(&node, 0, sizeof node); 4668 node.id = wn->id; 4669 node.control = IWN_NODE_UPDATE; 4670 node.flags = IWN_FLAG_SET_DISABLE_TID; 4671 node.disable_tid = htole16(wn->disable_tid); 4672 error = ops->add_node(sc, &node, 1); 4673 if (error != 0) 4674 return error; 4675 4676 if ((error = iwn_nic_lock(sc)) != 0) 4677 return error; 4678 ops->ampdu_tx_start(sc, ni, tid, ba->ba_winstart); 4679 iwn_nic_unlock(sc); 4680 return 0; 4681 } 4682 4683 void 4684 iwn_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni, 4685 uint8_t tid) 4686 { 4687 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid]; 4688 struct iwn_softc *sc = ic->ic_softc; 4689 struct iwn_ops *ops = &sc->ops; 4690 4691 if (iwn_nic_lock(sc) != 0) 4692 return; 4693 ops->ampdu_tx_stop(sc, tid, ba->ba_winstart); 4694 iwn_nic_unlock(sc); 4695 } 4696 4697 void 4698 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 4699 uint8_t tid, uint16_t ssn) 4700 { 4701 struct iwn_node *wn = (void *)ni; 4702 int qid = 7 + tid; 4703 4704 /* Stop TX scheduler while we're changing its configuration. */ 4705 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 4706 IWN4965_TXQ_STATUS_CHGACT); 4707 4708 /* Assign RA/TID translation to the queue. */ 4709 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid), 4710 wn->id << 4 | tid); 4711 4712 /* Enable chain-building mode for the queue. */ 4713 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid); 4714 4715 /* Set starting sequence number from the ADDBA request. */ 4716 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 4717 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 4718 4719 /* Set scheduler window size. */ 4720 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid), 4721 IWN_SCHED_WINSZ); 4722 /* Set scheduler frame limit. */ 4723 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 4724 IWN_SCHED_LIMIT << 16); 4725 4726 /* Enable interrupts for the queue. */ 4727 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 4728 4729 /* Mark the queue as active. */ 4730 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 4731 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA | 4732 iwn_tid2fifo[tid] << 1); 4733 } 4734 4735 void 4736 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn) 4737 { 4738 int qid = 7 + tid; 4739 4740 /* Stop TX scheduler while we're changing its configuration. */ 4741 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 4742 IWN4965_TXQ_STATUS_CHGACT); 4743 4744 /* Set starting sequence number from the ADDBA request. */ 4745 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 4746 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 4747 4748 /* Disable interrupts for the queue. */ 4749 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 4750 4751 /* Mark the queue as inactive. */ 4752 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 4753 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1); 4754 } 4755 4756 void 4757 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 4758 uint8_t tid, uint16_t ssn) 4759 { 4760 struct iwn_node *wn = (void *)ni; 4761 int qid = 10 + tid; 4762 4763 /* Stop TX scheduler while we're changing its configuration. */ 4764 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 4765 IWN5000_TXQ_STATUS_CHGACT); 4766 4767 /* Assign RA/TID translation to the queue. */ 4768 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid), 4769 wn->id << 4 | tid); 4770 4771 /* Enable chain-building mode for the queue. */ 4772 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid); 4773 4774 /* Enable aggregation for the queue. */ 4775 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 4776 4777 /* Set starting sequence number from the ADDBA request. */ 4778 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 4779 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 4780 4781 /* Set scheduler window size and frame limit. */ 4782 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 4783 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 4784 4785 /* Enable interrupts for the queue. */ 4786 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 4787 4788 /* Mark the queue as active. */ 4789 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 4790 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]); 4791 } 4792 4793 void 4794 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn) 4795 { 4796 int qid = 10 + tid; 4797 4798 /* Stop TX scheduler while we're changing its configuration. */ 4799 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 4800 IWN5000_TXQ_STATUS_CHGACT); 4801 4802 /* Disable aggregation for the queue. */ 4803 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 4804 4805 /* Set starting sequence number from the ADDBA request. */ 4806 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 4807 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 4808 4809 /* Disable interrupts for the queue. */ 4810 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 4811 4812 /* Mark the queue as inactive. */ 4813 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 4814 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]); 4815 } 4816 #endif /* !IEEE80211_NO_HT */ 4817 4818 /* 4819 * Query calibration tables from the initialization firmware. We do this 4820 * only once at first boot. Called from a process context. 4821 */ 4822 int 4823 iwn5000_query_calibration(struct iwn_softc *sc) 4824 { 4825 struct iwn5000_calib_config cmd; 4826 int error; 4827 4828 memset(&cmd, 0, sizeof cmd); 4829 cmd.ucode.once.enable = 0xffffffff; 4830 cmd.ucode.once.start = 0xffffffff; 4831 cmd.ucode.once.send = 0xffffffff; 4832 cmd.ucode.flags = 0xffffffff; 4833 DPRINTF(("sending calibration query\n")); 4834 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0); 4835 if (error != 0) 4836 return error; 4837 4838 /* Wait at most two seconds for calibration to complete. */ 4839 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) 4840 error = tsleep(sc, PCATCH, "iwncal", 2 * hz); 4841 return error; 4842 } 4843 4844 /* 4845 * Send calibration results to the runtime firmware. These results were 4846 * obtained on first boot from the initialization firmware. 4847 */ 4848 int 4849 iwn5000_send_calibration(struct iwn_softc *sc) 4850 { 4851 int idx, error; 4852 4853 for (idx = 0; idx < 5; idx++) { 4854 if (sc->calibcmd[idx].buf == NULL) 4855 continue; /* No results available. */ 4856 DPRINTF(("send calibration result idx=%d len=%d\n", 4857 idx, sc->calibcmd[idx].len)); 4858 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf, 4859 sc->calibcmd[idx].len, 0); 4860 if (error != 0) { 4861 printf("%s: could not send calibration result\n", 4862 sc->sc_dev.dv_xname); 4863 return error; 4864 } 4865 } 4866 return 0; 4867 } 4868 4869 int 4870 iwn5000_send_wimax_coex(struct iwn_softc *sc) 4871 { 4872 struct iwn5000_wimax_coex wimax; 4873 4874 #ifdef notyet 4875 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 4876 /* Enable WiMAX coexistence for combo adapters. */ 4877 wimax.flags = 4878 IWN_WIMAX_COEX_ASSOC_WA_UNMASK | 4879 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK | 4880 IWN_WIMAX_COEX_STA_TABLE_VALID | 4881 IWN_WIMAX_COEX_ENABLE; 4882 memcpy(wimax.events, iwn6050_wimax_events, 4883 sizeof iwn6050_wimax_events); 4884 } else 4885 #endif 4886 { 4887 /* Disable WiMAX coexistence. */ 4888 wimax.flags = 0; 4889 memset(wimax.events, 0, sizeof wimax.events); 4890 } 4891 DPRINTF(("Configuring WiMAX coexistence\n")); 4892 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0); 4893 } 4894 4895 int 4896 iwn5000_crystal_calib(struct iwn_softc *sc) 4897 { 4898 struct iwn5000_phy_calib_crystal cmd; 4899 4900 memset(&cmd, 0, sizeof cmd); 4901 cmd.code = IWN5000_PHY_CALIB_CRYSTAL; 4902 cmd.ngroups = 1; 4903 cmd.isvalid = 1; 4904 cmd.cap_pin[0] = letoh32(sc->eeprom_crystal) & 0xff; 4905 cmd.cap_pin[1] = (letoh32(sc->eeprom_crystal) >> 16) & 0xff; 4906 DPRINTF(("sending crystal calibration %d, %d\n", 4907 cmd.cap_pin[0], cmd.cap_pin[1])); 4908 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 4909 } 4910 4911 int 4912 iwn5000_temp_offset_calib(struct iwn_softc *sc) 4913 { 4914 struct iwn5000_phy_calib_temp_offset cmd; 4915 4916 memset(&cmd, 0, sizeof cmd); 4917 cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET; 4918 cmd.ngroups = 1; 4919 cmd.isvalid = 1; 4920 if (sc->eeprom_temp != 0) 4921 cmd.offset = htole16(sc->eeprom_temp); 4922 else 4923 cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET); 4924 DPRINTF(("setting radio sensor offset to %d\n", letoh16(cmd.offset))); 4925 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 4926 } 4927 4928 /* 4929 * This function is called after the runtime firmware notifies us of its 4930 * readiness (called in a process context). 4931 */ 4932 int 4933 iwn4965_post_alive(struct iwn_softc *sc) 4934 { 4935 int error, qid; 4936 4937 if ((error = iwn_nic_lock(sc)) != 0) 4938 return error; 4939 4940 /* Clear TX scheduler state in SRAM. */ 4941 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 4942 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0, 4943 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t)); 4944 4945 /* Set physical address of TX scheduler rings (1KB aligned). */ 4946 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 4947 4948 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 4949 4950 /* Disable chain mode for all our 16 queues. */ 4951 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0); 4952 4953 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) { 4954 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0); 4955 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 4956 4957 /* Set scheduler window size. */ 4958 iwn_mem_write(sc, sc->sched_base + 4959 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ); 4960 /* Set scheduler frame limit. */ 4961 iwn_mem_write(sc, sc->sched_base + 4962 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 4963 IWN_SCHED_LIMIT << 16); 4964 } 4965 4966 /* Enable interrupts for all our 16 queues. */ 4967 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff); 4968 /* Identify TX FIFO rings (0-7). */ 4969 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff); 4970 4971 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 4972 for (qid = 0; qid < 7; qid++) { 4973 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 }; 4974 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 4975 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1); 4976 } 4977 iwn_nic_unlock(sc); 4978 return 0; 4979 } 4980 4981 /* 4982 * This function is called after the initialization or runtime firmware 4983 * notifies us of its readiness (called in a process context). 4984 */ 4985 int 4986 iwn5000_post_alive(struct iwn_softc *sc) 4987 { 4988 int error, qid; 4989 4990 /* Switch to using ICT interrupt mode. */ 4991 iwn5000_ict_reset(sc); 4992 4993 if ((error = iwn_nic_lock(sc)) != 0) 4994 return error; 4995 4996 /* Clear TX scheduler state in SRAM. */ 4997 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 4998 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0, 4999 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t)); 5000 5001 /* Set physical address of TX scheduler rings (1KB aligned). */ 5002 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 5003 5004 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 5005 5006 /* Enable chain mode for all queues, except command queue. */ 5007 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef); 5008 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0); 5009 5010 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) { 5011 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0); 5012 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 5013 5014 iwn_mem_write(sc, sc->sched_base + 5015 IWN5000_SCHED_QUEUE_OFFSET(qid), 0); 5016 /* Set scheduler window size and frame limit. */ 5017 iwn_mem_write(sc, sc->sched_base + 5018 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 5019 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 5020 } 5021 5022 /* Enable interrupts for all our 20 queues. */ 5023 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff); 5024 /* Identify TX FIFO rings (0-7). */ 5025 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff); 5026 5027 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 5028 for (qid = 0; qid < 7; qid++) { 5029 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 }; 5030 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5031 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 5032 } 5033 iwn_nic_unlock(sc); 5034 5035 /* Configure WiMAX coexistence for combo adapters. */ 5036 error = iwn5000_send_wimax_coex(sc); 5037 if (error != 0) { 5038 printf("%s: could not configure WiMAX coexistence\n", 5039 sc->sc_dev.dv_xname); 5040 return error; 5041 } 5042 if (sc->hw_type != IWN_HW_REV_TYPE_5150) { 5043 /* Perform crystal calibration. */ 5044 error = iwn5000_crystal_calib(sc); 5045 if (error != 0) { 5046 printf("%s: crystal calibration failed\n", 5047 sc->sc_dev.dv_xname); 5048 return error; 5049 } 5050 } 5051 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) { 5052 /* Query calibration from the initialization firmware. */ 5053 if ((error = iwn5000_query_calibration(sc)) != 0) { 5054 printf("%s: could not query calibration\n", 5055 sc->sc_dev.dv_xname); 5056 return error; 5057 } 5058 /* 5059 * We have the calibration results now, reboot with the 5060 * runtime firmware (call ourselves recursively!) 5061 */ 5062 iwn_hw_stop(sc); 5063 error = iwn_hw_init(sc); 5064 } else { 5065 /* Send calibration results to runtime firmware. */ 5066 error = iwn5000_send_calibration(sc); 5067 } 5068 return error; 5069 } 5070 5071 /* 5072 * The firmware boot code is small and is intended to be copied directly into 5073 * the NIC internal memory (no DMA transfer). 5074 */ 5075 int 5076 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size) 5077 { 5078 int error, ntries; 5079 5080 size /= sizeof (uint32_t); 5081 5082 if ((error = iwn_nic_lock(sc)) != 0) 5083 return error; 5084 5085 /* Copy microcode image into NIC memory. */ 5086 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE, 5087 (const uint32_t *)ucode, size); 5088 5089 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0); 5090 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE); 5091 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size); 5092 5093 /* Start boot load now. */ 5094 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START); 5095 5096 /* Wait for transfer to complete. */ 5097 for (ntries = 0; ntries < 1000; ntries++) { 5098 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) & 5099 IWN_BSM_WR_CTRL_START)) 5100 break; 5101 DELAY(10); 5102 } 5103 if (ntries == 1000) { 5104 printf("%s: could not load boot firmware\n", 5105 sc->sc_dev.dv_xname); 5106 iwn_nic_unlock(sc); 5107 return ETIMEDOUT; 5108 } 5109 5110 /* Enable boot after power up. */ 5111 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN); 5112 5113 iwn_nic_unlock(sc); 5114 return 0; 5115 } 5116 5117 int 5118 iwn4965_load_firmware(struct iwn_softc *sc) 5119 { 5120 struct iwn_fw_info *fw = &sc->fw; 5121 struct iwn_dma_info *dma = &sc->fw_dma; 5122 int error; 5123 5124 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 5125 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 5126 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, fw->init.datasz, 5127 BUS_DMASYNC_PREWRITE); 5128 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 5129 fw->init.text, fw->init.textsz); 5130 bus_dmamap_sync(sc->sc_dmat, dma->map, IWN4965_FW_DATA_MAXSZ, 5131 fw->init.textsz, BUS_DMASYNC_PREWRITE); 5132 5133 /* Tell adapter where to find initialization sections. */ 5134 if ((error = iwn_nic_lock(sc)) != 0) 5135 return error; 5136 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 5137 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz); 5138 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 5139 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 5140 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 5141 iwn_nic_unlock(sc); 5142 5143 /* Load firmware boot code. */ 5144 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 5145 if (error != 0) { 5146 printf("%s: could not load boot firmware\n", 5147 sc->sc_dev.dv_xname); 5148 return error; 5149 } 5150 /* Now press "execute". */ 5151 IWN_WRITE(sc, IWN_RESET, 0); 5152 5153 /* Wait at most one second for first alive notification. */ 5154 if ((error = tsleep(sc, PCATCH, "iwninit", hz)) != 0) { 5155 printf("%s: timeout waiting for adapter to initialize\n", 5156 sc->sc_dev.dv_xname); 5157 return error; 5158 } 5159 5160 /* Retrieve current temperature for initial TX power calibration. */ 5161 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz; 5162 sc->temp = iwn4965_get_temperature(sc); 5163 5164 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 5165 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 5166 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, fw->main.datasz, 5167 BUS_DMASYNC_PREWRITE); 5168 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 5169 fw->main.text, fw->main.textsz); 5170 bus_dmamap_sync(sc->sc_dmat, dma->map, IWN4965_FW_DATA_MAXSZ, 5171 fw->main.textsz, BUS_DMASYNC_PREWRITE); 5172 5173 /* Tell adapter where to find runtime sections. */ 5174 if ((error = iwn_nic_lock(sc)) != 0) 5175 return error; 5176 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 5177 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz); 5178 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 5179 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 5180 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, 5181 IWN_FW_UPDATED | fw->main.textsz); 5182 iwn_nic_unlock(sc); 5183 5184 return 0; 5185 } 5186 5187 int 5188 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst, 5189 const uint8_t *section, int size) 5190 { 5191 struct iwn_dma_info *dma = &sc->fw_dma; 5192 int error; 5193 5194 /* Copy firmware section into pre-allocated DMA-safe memory. */ 5195 memcpy(dma->vaddr, section, size); 5196 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, size, BUS_DMASYNC_PREWRITE); 5197 5198 if ((error = iwn_nic_lock(sc)) != 0) 5199 return error; 5200 5201 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 5202 IWN_FH_TX_CONFIG_DMA_PAUSE); 5203 5204 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst); 5205 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL), 5206 IWN_LOADDR(dma->paddr)); 5207 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL), 5208 IWN_HIADDR(dma->paddr) << 28 | size); 5209 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL), 5210 IWN_FH_TXBUF_STATUS_TBNUM(1) | 5211 IWN_FH_TXBUF_STATUS_TBIDX(1) | 5212 IWN_FH_TXBUF_STATUS_TFBD_VALID); 5213 5214 /* Kick Flow Handler to start DMA transfer. */ 5215 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 5216 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD); 5217 5218 iwn_nic_unlock(sc); 5219 5220 /* Wait at most five seconds for FH DMA transfer to complete. */ 5221 return tsleep(sc, PCATCH, "iwninit", 5 * hz); 5222 } 5223 5224 int 5225 iwn5000_load_firmware(struct iwn_softc *sc) 5226 { 5227 struct iwn_fw_part *fw; 5228 int error; 5229 5230 /* Load the initialization firmware on first boot only. */ 5231 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ? 5232 &sc->fw.main : &sc->fw.init; 5233 5234 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE, 5235 fw->text, fw->textsz); 5236 if (error != 0) { 5237 printf("%s: could not load firmware %s section\n", 5238 sc->sc_dev.dv_xname, ".text"); 5239 return error; 5240 } 5241 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE, 5242 fw->data, fw->datasz); 5243 if (error != 0) { 5244 printf("%s: could not load firmware %s section\n", 5245 sc->sc_dev.dv_xname, ".data"); 5246 return error; 5247 } 5248 5249 /* Now press "execute". */ 5250 IWN_WRITE(sc, IWN_RESET, 0); 5251 return 0; 5252 } 5253 5254 /* 5255 * Extract text and data sections from a legacy firmware image. 5256 */ 5257 int 5258 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw) 5259 { 5260 const uint32_t *ptr; 5261 size_t hdrlen = 24; 5262 uint32_t rev; 5263 5264 ptr = (const uint32_t *)fw->data; 5265 rev = letoh32(*ptr++); 5266 5267 /* Check firmware API version. */ 5268 if (IWN_FW_API(rev) <= 1) { 5269 printf("%s: bad firmware, need API version >=2\n", 5270 sc->sc_dev.dv_xname); 5271 return EINVAL; 5272 } 5273 if (IWN_FW_API(rev) >= 3) { 5274 /* Skip build number (version 2 header). */ 5275 hdrlen += 4; 5276 ptr++; 5277 } 5278 if (fw->size < hdrlen) { 5279 printf("%s: firmware too short: %d bytes\n", 5280 sc->sc_dev.dv_xname, fw->size); 5281 return EINVAL; 5282 } 5283 fw->main.textsz = letoh32(*ptr++); 5284 fw->main.datasz = letoh32(*ptr++); 5285 fw->init.textsz = letoh32(*ptr++); 5286 fw->init.datasz = letoh32(*ptr++); 5287 fw->boot.textsz = letoh32(*ptr++); 5288 5289 /* Check that all firmware sections fit. */ 5290 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz + 5291 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 5292 printf("%s: firmware too short: %d bytes\n", 5293 sc->sc_dev.dv_xname, fw->size); 5294 return EINVAL; 5295 } 5296 5297 /* Get pointers to firmware sections. */ 5298 fw->main.text = (const uint8_t *)ptr; 5299 fw->main.data = fw->main.text + fw->main.textsz; 5300 fw->init.text = fw->main.data + fw->main.datasz; 5301 fw->init.data = fw->init.text + fw->init.textsz; 5302 fw->boot.text = fw->init.data + fw->init.datasz; 5303 return 0; 5304 } 5305 5306 /* 5307 * Extract text and data sections from a TLV firmware image. 5308 */ 5309 int 5310 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw, 5311 uint16_t alt) 5312 { 5313 const struct iwn_fw_tlv_hdr *hdr; 5314 const struct iwn_fw_tlv *tlv; 5315 const uint8_t *ptr, *end; 5316 uint64_t altmask; 5317 uint32_t len; 5318 5319 if (fw->size < sizeof (*hdr)) { 5320 printf("%s: firmware too short: %d bytes\n", 5321 sc->sc_dev.dv_xname, fw->size); 5322 return EINVAL; 5323 } 5324 hdr = (const struct iwn_fw_tlv_hdr *)fw->data; 5325 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) { 5326 printf("%s: bad firmware signature 0x%08x\n", 5327 sc->sc_dev.dv_xname, letoh32(hdr->signature)); 5328 return EINVAL; 5329 } 5330 DPRINTF(("FW: \"%.64s\", build 0x%x\n", hdr->descr, 5331 letoh32(hdr->build))); 5332 5333 /* 5334 * Select the closest supported alternative that is less than 5335 * or equal to the specified one. 5336 */ 5337 altmask = letoh64(hdr->altmask); 5338 while (alt > 0 && !(altmask & (1ULL << alt))) 5339 alt--; /* Downgrade. */ 5340 DPRINTF(("using alternative %d\n", alt)); 5341 5342 ptr = (const uint8_t *)(hdr + 1); 5343 end = (const uint8_t *)(fw->data + fw->size); 5344 5345 /* Parse type-length-value fields. */ 5346 while (ptr + sizeof (*tlv) <= end) { 5347 tlv = (const struct iwn_fw_tlv *)ptr; 5348 len = letoh32(tlv->len); 5349 5350 ptr += sizeof (*tlv); 5351 if (ptr + len > end) { 5352 printf("%s: firmware too short: %d bytes\n", 5353 sc->sc_dev.dv_xname, fw->size); 5354 return EINVAL; 5355 } 5356 /* Skip other alternatives. */ 5357 if (tlv->alt != 0 && tlv->alt != htole16(alt)) 5358 goto next; 5359 5360 switch (letoh16(tlv->type)) { 5361 case IWN_FW_TLV_MAIN_TEXT: 5362 fw->main.text = ptr; 5363 fw->main.textsz = len; 5364 break; 5365 case IWN_FW_TLV_MAIN_DATA: 5366 fw->main.data = ptr; 5367 fw->main.datasz = len; 5368 break; 5369 case IWN_FW_TLV_INIT_TEXT: 5370 fw->init.text = ptr; 5371 fw->init.textsz = len; 5372 break; 5373 case IWN_FW_TLV_INIT_DATA: 5374 fw->init.data = ptr; 5375 fw->init.datasz = len; 5376 break; 5377 case IWN_FW_TLV_BOOT_TEXT: 5378 fw->boot.text = ptr; 5379 fw->boot.textsz = len; 5380 break; 5381 default: 5382 DPRINTF(("TLV type %d not handled\n", 5383 letoh16(tlv->type))); 5384 break; 5385 } 5386 next: /* TLV fields are 32-bit aligned. */ 5387 ptr += (len + 3) & ~3; 5388 } 5389 return 0; 5390 } 5391 5392 int 5393 iwn_read_firmware(struct iwn_softc *sc) 5394 { 5395 struct iwn_fw_info *fw = &sc->fw; 5396 int error; 5397 5398 memset(fw, 0, sizeof (*fw)); 5399 5400 /* Read firmware image from filesystem. */ 5401 if ((error = loadfirmware(sc->fwname, &fw->data, &fw->size)) != 0) { 5402 printf("%s: error, %d, could not read firmware %s\n", 5403 sc->sc_dev.dv_xname, error, sc->fwname); 5404 return error; 5405 } 5406 if (fw->size < sizeof (uint32_t)) { 5407 printf("%s: firmware too short: %d bytes\n", 5408 sc->sc_dev.dv_xname, fw->size); 5409 free(fw->data, M_DEVBUF); 5410 return EINVAL; 5411 } 5412 5413 /* Retrieve text and data sections. */ 5414 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */ 5415 error = iwn_read_firmware_leg(sc, fw); 5416 else 5417 error = iwn_read_firmware_tlv(sc, fw, 1); 5418 if (error != 0) { 5419 printf("%s: could not read firmware sections\n", 5420 sc->sc_dev.dv_xname); 5421 free(fw->data, M_DEVBUF); 5422 return error; 5423 } 5424 5425 /* Make sure text and data sections fit in hardware memory. */ 5426 if (fw->main.textsz > sc->fw_text_maxsz || 5427 fw->main.datasz > sc->fw_data_maxsz || 5428 fw->init.textsz > sc->fw_text_maxsz || 5429 fw->init.datasz > sc->fw_data_maxsz || 5430 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ || 5431 (fw->boot.textsz & 3) != 0) { 5432 printf("%s: firmware sections too large\n", 5433 sc->sc_dev.dv_xname); 5434 free(fw->data, M_DEVBUF); 5435 return EINVAL; 5436 } 5437 5438 /* We can proceed with loading the firmware. */ 5439 return 0; 5440 } 5441 5442 int 5443 iwn_clock_wait(struct iwn_softc *sc) 5444 { 5445 int ntries; 5446 5447 /* Set "initialization complete" bit. */ 5448 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 5449 5450 /* Wait for clock stabilization. */ 5451 for (ntries = 0; ntries < 2500; ntries++) { 5452 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY) 5453 return 0; 5454 DELAY(10); 5455 } 5456 printf("%s: timeout waiting for clock stabilization\n", 5457 sc->sc_dev.dv_xname); 5458 return ETIMEDOUT; 5459 } 5460 5461 int 5462 iwn_apm_init(struct iwn_softc *sc) 5463 { 5464 pcireg_t reg; 5465 int error; 5466 5467 /* Disable L0s exit timer (NMI bug workaround). */ 5468 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER); 5469 /* Don't wait for ICH L0s (ICH bug workaround). */ 5470 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX); 5471 5472 /* Set FH wait threshold to max (HW bug under stress workaround). */ 5473 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000); 5474 5475 /* Enable HAP INTA to move adapter from L1a to L0s. */ 5476 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A); 5477 5478 /* Retrieve PCIe Active State Power Management (ASPM). */ 5479 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 5480 sc->sc_cap_off + PCI_PCIE_LCSR); 5481 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 5482 if (reg & PCI_PCIE_LCSR_ASPM_L1) /* L1 Entry enabled. */ 5483 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 5484 else 5485 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 5486 5487 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 5488 sc->hw_type <= IWN_HW_REV_TYPE_1000) 5489 IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT); 5490 5491 /* Wait for clock stabilization before accessing prph. */ 5492 if ((error = iwn_clock_wait(sc)) != 0) 5493 return error; 5494 5495 if ((error = iwn_nic_lock(sc)) != 0) 5496 return error; 5497 if (sc->hw_type == IWN_HW_REV_TYPE_4965) { 5498 /* Enable DMA and BSM (Bootstrap State Machine). */ 5499 iwn_prph_write(sc, IWN_APMG_CLK_EN, 5500 IWN_APMG_CLK_CTRL_DMA_CLK_RQT | 5501 IWN_APMG_CLK_CTRL_BSM_CLK_RQT); 5502 } else { 5503 /* Enable DMA. */ 5504 iwn_prph_write(sc, IWN_APMG_CLK_EN, 5505 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 5506 } 5507 DELAY(20); 5508 /* Disable L1-Active. */ 5509 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS); 5510 iwn_nic_unlock(sc); 5511 5512 return 0; 5513 } 5514 5515 void 5516 iwn_apm_stop_master(struct iwn_softc *sc) 5517 { 5518 int ntries; 5519 5520 /* Stop busmaster DMA activity. */ 5521 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER); 5522 for (ntries = 0; ntries < 100; ntries++) { 5523 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED) 5524 return; 5525 DELAY(10); 5526 } 5527 printf("%s: timeout waiting for master\n", sc->sc_dev.dv_xname); 5528 } 5529 5530 void 5531 iwn_apm_stop(struct iwn_softc *sc) 5532 { 5533 iwn_apm_stop_master(sc); 5534 5535 /* Reset the entire device. */ 5536 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW); 5537 DELAY(10); 5538 /* Clear "initialization complete" bit. */ 5539 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 5540 } 5541 5542 int 5543 iwn4965_nic_config(struct iwn_softc *sc) 5544 { 5545 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) { 5546 /* 5547 * I don't believe this to be correct but this is what the 5548 * vendor driver is doing. Probably the bits should not be 5549 * shifted in IWN_RFCFG_*. 5550 */ 5551 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 5552 IWN_RFCFG_TYPE(sc->rfcfg) | 5553 IWN_RFCFG_STEP(sc->rfcfg) | 5554 IWN_RFCFG_DASH(sc->rfcfg)); 5555 } 5556 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 5557 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 5558 return 0; 5559 } 5560 5561 int 5562 iwn5000_nic_config(struct iwn_softc *sc) 5563 { 5564 uint32_t tmp; 5565 int error; 5566 5567 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) { 5568 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 5569 IWN_RFCFG_TYPE(sc->rfcfg) | 5570 IWN_RFCFG_STEP(sc->rfcfg) | 5571 IWN_RFCFG_DASH(sc->rfcfg)); 5572 } 5573 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 5574 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 5575 5576 if ((error = iwn_nic_lock(sc)) != 0) 5577 return error; 5578 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS); 5579 5580 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 5581 /* 5582 * Select first Switching Voltage Regulator (1.32V) to 5583 * solve a stability issue related to noisy DC2DC line 5584 * in the silicon of 1000 Series. 5585 */ 5586 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR); 5587 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK; 5588 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32; 5589 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp); 5590 } 5591 iwn_nic_unlock(sc); 5592 5593 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) { 5594 /* Use internal power amplifier only. */ 5595 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA); 5596 } 5597 if ((sc->hw_type == IWN_HW_REV_TYPE_6050 || 5598 sc->hw_type == IWN_HW_REV_TYPE_6005) && sc->calib_ver >= 6) { 5599 /* Indicate that ROM calibration version is >=6. */ 5600 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6); 5601 } 5602 if (sc->hw_type == IWN_HW_REV_TYPE_6005) 5603 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_6050_1X2); 5604 return 0; 5605 } 5606 5607 /* 5608 * Take NIC ownership over Intel Active Management Technology (AMT). 5609 */ 5610 int 5611 iwn_hw_prepare(struct iwn_softc *sc) 5612 { 5613 int ntries; 5614 5615 /* Check if hardware is ready. */ 5616 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 5617 for (ntries = 0; ntries < 5; ntries++) { 5618 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 5619 IWN_HW_IF_CONFIG_NIC_READY) 5620 return 0; 5621 DELAY(10); 5622 } 5623 5624 /* Hardware not ready, force into ready state. */ 5625 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE); 5626 for (ntries = 0; ntries < 15000; ntries++) { 5627 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) & 5628 IWN_HW_IF_CONFIG_PREPARE_DONE)) 5629 break; 5630 DELAY(10); 5631 } 5632 if (ntries == 15000) 5633 return ETIMEDOUT; 5634 5635 /* Hardware should be ready now. */ 5636 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 5637 for (ntries = 0; ntries < 5; ntries++) { 5638 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 5639 IWN_HW_IF_CONFIG_NIC_READY) 5640 return 0; 5641 DELAY(10); 5642 } 5643 return ETIMEDOUT; 5644 } 5645 5646 int 5647 iwn_hw_init(struct iwn_softc *sc) 5648 { 5649 struct iwn_ops *ops = &sc->ops; 5650 int error, chnl, qid; 5651 5652 /* Clear pending interrupts. */ 5653 IWN_WRITE(sc, IWN_INT, 0xffffffff); 5654 5655 if ((error = iwn_apm_init(sc)) != 0) { 5656 printf("%s: could not power ON adapter\n", 5657 sc->sc_dev.dv_xname); 5658 return error; 5659 } 5660 5661 /* Select VMAIN power source. */ 5662 if ((error = iwn_nic_lock(sc)) != 0) 5663 return error; 5664 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK); 5665 iwn_nic_unlock(sc); 5666 5667 /* Perform adapter-specific initialization. */ 5668 if ((error = ops->nic_config(sc)) != 0) 5669 return error; 5670 5671 /* Initialize RX ring. */ 5672 if ((error = iwn_nic_lock(sc)) != 0) 5673 return error; 5674 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 5675 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0); 5676 /* Set physical address of RX ring (256-byte aligned). */ 5677 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8); 5678 /* Set physical address of RX status (16-byte aligned). */ 5679 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4); 5680 /* Enable RX. */ 5681 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 5682 IWN_FH_RX_CONFIG_ENA | 5683 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */ 5684 IWN_FH_RX_CONFIG_IRQ_DST_HOST | 5685 IWN_FH_RX_CONFIG_SINGLE_FRAME | 5686 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) | 5687 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG)); 5688 iwn_nic_unlock(sc); 5689 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7); 5690 5691 if ((error = iwn_nic_lock(sc)) != 0) 5692 return error; 5693 5694 /* Initialize TX scheduler. */ 5695 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 5696 5697 /* Set physical address of "keep warm" page (16-byte aligned). */ 5698 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4); 5699 5700 /* Initialize TX rings. */ 5701 for (qid = 0; qid < sc->ntxqs; qid++) { 5702 struct iwn_tx_ring *txq = &sc->txq[qid]; 5703 5704 /* Set physical address of TX ring (256-byte aligned). */ 5705 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid), 5706 txq->desc_dma.paddr >> 8); 5707 } 5708 iwn_nic_unlock(sc); 5709 5710 /* Enable DMA channels. */ 5711 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 5712 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 5713 IWN_FH_TX_CONFIG_DMA_ENA | 5714 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA); 5715 } 5716 5717 /* Clear "radio off" and "commands blocked" bits. */ 5718 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 5719 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED); 5720 5721 /* Clear pending interrupts. */ 5722 IWN_WRITE(sc, IWN_INT, 0xffffffff); 5723 /* Enable interrupt coalescing. */ 5724 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8); 5725 /* Enable interrupts. */ 5726 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 5727 5728 /* _Really_ make sure "radio off" bit is cleared! */ 5729 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 5730 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 5731 5732 /* Enable shadow registers. */ 5733 if (sc->hw_type >= IWN_HW_REV_TYPE_6000) 5734 IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff); 5735 5736 if ((error = ops->load_firmware(sc)) != 0) { 5737 printf("%s: could not load firmware\n", sc->sc_dev.dv_xname); 5738 return error; 5739 } 5740 /* Wait at most one second for firmware alive notification. */ 5741 if ((error = tsleep(sc, PCATCH, "iwninit", hz)) != 0) { 5742 printf("%s: timeout waiting for adapter to initialize\n", 5743 sc->sc_dev.dv_xname); 5744 return error; 5745 } 5746 /* Do post-firmware initialization. */ 5747 return ops->post_alive(sc); 5748 } 5749 5750 void 5751 iwn_hw_stop(struct iwn_softc *sc) 5752 { 5753 int chnl, qid, ntries; 5754 5755 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO); 5756 5757 /* Disable interrupts. */ 5758 IWN_WRITE(sc, IWN_INT_MASK, 0); 5759 IWN_WRITE(sc, IWN_INT, 0xffffffff); 5760 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff); 5761 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 5762 5763 /* Make sure we no longer hold the NIC lock. */ 5764 iwn_nic_unlock(sc); 5765 5766 /* Stop TX scheduler. */ 5767 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 5768 5769 /* Stop all DMA channels. */ 5770 if (iwn_nic_lock(sc) == 0) { 5771 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 5772 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0); 5773 for (ntries = 0; ntries < 200; ntries++) { 5774 if (IWN_READ(sc, IWN_FH_TX_STATUS) & 5775 IWN_FH_TX_STATUS_IDLE(chnl)) 5776 break; 5777 DELAY(10); 5778 } 5779 } 5780 iwn_nic_unlock(sc); 5781 } 5782 5783 /* Stop RX ring. */ 5784 iwn_reset_rx_ring(sc, &sc->rxq); 5785 5786 /* Reset all TX rings. */ 5787 for (qid = 0; qid < sc->ntxqs; qid++) 5788 iwn_reset_tx_ring(sc, &sc->txq[qid]); 5789 5790 if (iwn_nic_lock(sc) == 0) { 5791 iwn_prph_write(sc, IWN_APMG_CLK_DIS, 5792 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 5793 iwn_nic_unlock(sc); 5794 } 5795 DELAY(5); 5796 /* Power OFF adapter. */ 5797 iwn_apm_stop(sc); 5798 } 5799 5800 int 5801 iwn_init(struct ifnet *ifp) 5802 { 5803 struct iwn_softc *sc = ifp->if_softc; 5804 struct ieee80211com *ic = &sc->sc_ic; 5805 int error; 5806 5807 if ((error = iwn_hw_prepare(sc)) != 0) { 5808 printf("%s: hardware not ready\n", sc->sc_dev.dv_xname); 5809 goto fail; 5810 } 5811 5812 /* Check that the radio is not disabled by hardware switch. */ 5813 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) { 5814 printf("%s: radio is disabled by hardware switch\n", 5815 sc->sc_dev.dv_xname); 5816 error = EPERM; /* :-) */ 5817 goto fail; 5818 } 5819 5820 /* Read firmware images from the filesystem. */ 5821 if ((error = iwn_read_firmware(sc)) != 0) { 5822 printf("%s: could not read firmware\n", sc->sc_dev.dv_xname); 5823 goto fail; 5824 } 5825 5826 /* Initialize interrupt mask to default value. */ 5827 sc->int_mask = IWN_INT_MASK_DEF; 5828 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 5829 5830 /* Initialize hardware and upload firmware. */ 5831 error = iwn_hw_init(sc); 5832 free(sc->fw.data, M_DEVBUF); 5833 if (error != 0) { 5834 printf("%s: could not initialize hardware\n", 5835 sc->sc_dev.dv_xname); 5836 goto fail; 5837 } 5838 5839 /* Configure adapter now that it is ready. */ 5840 if ((error = iwn_config(sc)) != 0) { 5841 printf("%s: could not configure device\n", 5842 sc->sc_dev.dv_xname); 5843 goto fail; 5844 } 5845 5846 ifp->if_flags &= ~IFF_OACTIVE; 5847 ifp->if_flags |= IFF_RUNNING; 5848 5849 if (ic->ic_opmode != IEEE80211_M_MONITOR) 5850 ieee80211_begin_scan(ifp); 5851 else 5852 ieee80211_new_state(ic, IEEE80211_S_RUN, -1); 5853 5854 return 0; 5855 5856 fail: iwn_stop(ifp, 1); 5857 return error; 5858 } 5859 5860 void 5861 iwn_stop(struct ifnet *ifp, int disable) 5862 { 5863 struct iwn_softc *sc = ifp->if_softc; 5864 struct ieee80211com *ic = &sc->sc_ic; 5865 5866 ifp->if_timer = sc->sc_tx_timer = 0; 5867 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 5868 5869 /* In case we were scanning, release the scan "lock". */ 5870 ic->ic_scan_lock = IEEE80211_SCAN_UNLOCKED; 5871 5872 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 5873 5874 /* Power OFF hardware. */ 5875 iwn_hw_stop(sc); 5876 } 5877