1 /* $OpenBSD: if_iwn.c,v 1.169 2016/07/20 16:24:37 stsp Exp $ */ 2 3 /*- 4 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /* 20 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network 21 * adapters. 22 */ 23 24 #include "bpfilter.h" 25 26 #include <sys/param.h> 27 #include <sys/sockio.h> 28 #include <sys/mbuf.h> 29 #include <sys/kernel.h> 30 #include <sys/socket.h> 31 #include <sys/systm.h> 32 #include <sys/malloc.h> 33 #include <sys/conf.h> 34 #include <sys/device.h> 35 #include <sys/task.h> 36 #include <sys/endian.h> 37 38 #include <machine/bus.h> 39 #include <machine/intr.h> 40 41 #include <dev/pci/pcireg.h> 42 #include <dev/pci/pcivar.h> 43 #include <dev/pci/pcidevs.h> 44 45 #if NBPFILTER > 0 46 #include <net/bpf.h> 47 #endif 48 #include <net/if.h> 49 #include <net/if_dl.h> 50 #include <net/if_media.h> 51 52 #include <netinet/in.h> 53 #include <netinet/if_ether.h> 54 55 #include <net80211/ieee80211_var.h> 56 #include <net80211/ieee80211_amrr.h> 57 #include <net80211/ieee80211_radiotap.h> 58 59 #include <dev/pci/if_iwnreg.h> 60 #include <dev/pci/if_iwnvar.h> 61 62 static const struct pci_matchid iwn_devices[] = { 63 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_4965_1 }, 64 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_4965_2 }, 65 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5100_1 }, 66 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5100_2 }, 67 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5150_1 }, 68 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5150_2 }, 69 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5300_1 }, 70 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5300_2 }, 71 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5350_1 }, 72 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5350_2 }, 73 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_1000_1 }, 74 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_1000_2 }, 75 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6300_1 }, 76 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6300_2 }, 77 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6200_1 }, 78 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6200_2 }, 79 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6050_1 }, 80 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6050_2 }, 81 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6005_1 }, 82 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6005_2 }, 83 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6030_1 }, 84 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6030_2 }, 85 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_1030_1 }, 86 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_1030_2 }, 87 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_100_1 }, 88 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_100_2 }, 89 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_130_1 }, 90 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_130_2 }, 91 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6235_1 }, 92 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6235_2 }, 93 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_2230_1 }, 94 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_2230_2 }, 95 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_2200_1 }, 96 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_2200_2 }, 97 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_135_1 }, 98 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_135_2 }, 99 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_105_1 }, 100 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_105_2 }, 101 }; 102 103 int iwn_match(struct device *, void *, void *); 104 void iwn_attach(struct device *, struct device *, void *); 105 int iwn4965_attach(struct iwn_softc *, pci_product_id_t); 106 int iwn5000_attach(struct iwn_softc *, pci_product_id_t); 107 #if NBPFILTER > 0 108 void iwn_radiotap_attach(struct iwn_softc *); 109 #endif 110 int iwn_detach(struct device *, int); 111 int iwn_activate(struct device *, int); 112 void iwn_wakeup(struct iwn_softc *); 113 void iwn_init_task(void *); 114 int iwn_nic_lock(struct iwn_softc *); 115 int iwn_eeprom_lock(struct iwn_softc *); 116 int iwn_init_otprom(struct iwn_softc *); 117 int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int); 118 int iwn_dma_contig_alloc(bus_dma_tag_t, struct iwn_dma_info *, 119 void **, bus_size_t, bus_size_t); 120 void iwn_dma_contig_free(struct iwn_dma_info *); 121 int iwn_alloc_sched(struct iwn_softc *); 122 void iwn_free_sched(struct iwn_softc *); 123 int iwn_alloc_kw(struct iwn_softc *); 124 void iwn_free_kw(struct iwn_softc *); 125 int iwn_alloc_ict(struct iwn_softc *); 126 void iwn_free_ict(struct iwn_softc *); 127 int iwn_alloc_fwmem(struct iwn_softc *); 128 void iwn_free_fwmem(struct iwn_softc *); 129 int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 130 void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 131 void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 132 int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *, 133 int); 134 void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 135 void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 136 void iwn5000_ict_reset(struct iwn_softc *); 137 int iwn_read_eeprom(struct iwn_softc *); 138 void iwn4965_read_eeprom(struct iwn_softc *); 139 void iwn4965_print_power_group(struct iwn_softc *, int); 140 void iwn5000_read_eeprom(struct iwn_softc *); 141 void iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t); 142 void iwn_read_eeprom_enhinfo(struct iwn_softc *); 143 struct ieee80211_node *iwn_node_alloc(struct ieee80211com *); 144 void iwn_newassoc(struct ieee80211com *, struct ieee80211_node *, 145 int); 146 int iwn_media_change(struct ifnet *); 147 int iwn_newstate(struct ieee80211com *, enum ieee80211_state, int); 148 void iwn_iter_func(void *, struct ieee80211_node *); 149 void iwn_calib_timeout(void *); 150 int iwn_ccmp_decap(struct iwn_softc *, struct mbuf *, 151 struct ieee80211_node *); 152 void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *, 153 struct iwn_rx_data *); 154 void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *, 155 struct iwn_rx_data *); 156 void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *, 157 struct iwn_rx_data *); 158 void iwn5000_rx_calib_results(struct iwn_softc *, 159 struct iwn_rx_desc *, struct iwn_rx_data *); 160 void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *, 161 struct iwn_rx_data *); 162 void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 163 struct iwn_rx_data *); 164 void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 165 struct iwn_rx_data *); 166 void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int, 167 uint8_t); 168 void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *); 169 void iwn_notif_intr(struct iwn_softc *); 170 void iwn_wakeup_intr(struct iwn_softc *); 171 void iwn_fatal_intr(struct iwn_softc *); 172 int iwn_intr(void *); 173 void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t, 174 uint16_t); 175 void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t, 176 uint16_t); 177 void iwn5000_reset_sched(struct iwn_softc *, int, int); 178 int iwn_tx(struct iwn_softc *, struct mbuf *, 179 struct ieee80211_node *); 180 void iwn_start(struct ifnet *); 181 void iwn_watchdog(struct ifnet *); 182 int iwn_ioctl(struct ifnet *, u_long, caddr_t); 183 int iwn_cmd(struct iwn_softc *, int, const void *, int, int); 184 int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *, 185 int); 186 int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *, 187 int); 188 int iwn_set_link_quality(struct iwn_softc *, 189 struct ieee80211_node *); 190 int iwn_add_broadcast_node(struct iwn_softc *, int, int); 191 void iwn_updateedca(struct ieee80211com *); 192 void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t); 193 int iwn_set_critical_temp(struct iwn_softc *); 194 int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *); 195 void iwn4965_power_calibration(struct iwn_softc *, int); 196 int iwn4965_set_txpower(struct iwn_softc *, int); 197 int iwn5000_set_txpower(struct iwn_softc *, int); 198 int iwn4965_get_rssi(const struct iwn_rx_stat *); 199 int iwn5000_get_rssi(const struct iwn_rx_stat *); 200 int iwn_get_noise(const struct iwn_rx_general_stats *); 201 int iwn4965_get_temperature(struct iwn_softc *); 202 int iwn5000_get_temperature(struct iwn_softc *); 203 int iwn_init_sensitivity(struct iwn_softc *); 204 void iwn_collect_noise(struct iwn_softc *, 205 const struct iwn_rx_general_stats *); 206 int iwn4965_init_gains(struct iwn_softc *); 207 int iwn5000_init_gains(struct iwn_softc *); 208 int iwn4965_set_gains(struct iwn_softc *); 209 int iwn5000_set_gains(struct iwn_softc *); 210 void iwn_tune_sensitivity(struct iwn_softc *, 211 const struct iwn_rx_stats *); 212 int iwn_send_sensitivity(struct iwn_softc *); 213 int iwn_set_pslevel(struct iwn_softc *, int, int, int); 214 int iwn_send_temperature_offset(struct iwn_softc *); 215 int iwn_send_btcoex(struct iwn_softc *); 216 int iwn_send_advanced_btcoex(struct iwn_softc *); 217 int iwn5000_runtime_calib(struct iwn_softc *); 218 int iwn_config(struct iwn_softc *); 219 uint16_t iwn_get_active_dwell_time(struct iwn_softc *, uint16_t, uint8_t); 220 uint16_t iwn_limit_dwell(struct iwn_softc *, uint16_t); 221 uint16_t iwn_get_passive_dwell_time(struct iwn_softc *, uint16_t); 222 int iwn_scan(struct iwn_softc *, uint16_t); 223 int iwn_auth(struct iwn_softc *); 224 int iwn_run(struct iwn_softc *); 225 int iwn_set_key(struct ieee80211com *, struct ieee80211_node *, 226 struct ieee80211_key *); 227 void iwn_delete_key(struct ieee80211com *, struct ieee80211_node *, 228 struct ieee80211_key *); 229 void iwn_update_htprot(struct ieee80211com *, 230 struct ieee80211_node *); 231 int iwn_ampdu_rx_start(struct ieee80211com *, 232 struct ieee80211_node *, uint8_t); 233 void iwn_ampdu_rx_stop(struct ieee80211com *, 234 struct ieee80211_node *, uint8_t); 235 int iwn_ampdu_tx_start(struct ieee80211com *, 236 struct ieee80211_node *, uint8_t); 237 void iwn_ampdu_tx_stop(struct ieee80211com *, 238 struct ieee80211_node *, uint8_t); 239 void iwn4965_ampdu_tx_start(struct iwn_softc *, 240 struct ieee80211_node *, uint8_t, uint16_t); 241 void iwn4965_ampdu_tx_stop(struct iwn_softc *, 242 uint8_t, uint16_t); 243 void iwn5000_ampdu_tx_start(struct iwn_softc *, 244 struct ieee80211_node *, uint8_t, uint16_t); 245 void iwn5000_ampdu_tx_stop(struct iwn_softc *, 246 uint8_t, uint16_t); 247 int iwn5000_query_calibration(struct iwn_softc *); 248 int iwn5000_send_calibration(struct iwn_softc *); 249 int iwn5000_send_wimax_coex(struct iwn_softc *); 250 int iwn5000_crystal_calib(struct iwn_softc *); 251 int iwn6000_temp_offset_calib(struct iwn_softc *); 252 int iwn2000_temp_offset_calib(struct iwn_softc *); 253 int iwn4965_post_alive(struct iwn_softc *); 254 int iwn5000_post_alive(struct iwn_softc *); 255 int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *, 256 int); 257 int iwn4965_load_firmware(struct iwn_softc *); 258 int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t, 259 const uint8_t *, int); 260 int iwn5000_load_firmware(struct iwn_softc *); 261 int iwn_read_firmware_leg(struct iwn_softc *, 262 struct iwn_fw_info *); 263 int iwn_read_firmware_tlv(struct iwn_softc *, 264 struct iwn_fw_info *, uint16_t); 265 int iwn_read_firmware(struct iwn_softc *); 266 int iwn_clock_wait(struct iwn_softc *); 267 int iwn_apm_init(struct iwn_softc *); 268 void iwn_apm_stop_master(struct iwn_softc *); 269 void iwn_apm_stop(struct iwn_softc *); 270 int iwn4965_nic_config(struct iwn_softc *); 271 int iwn5000_nic_config(struct iwn_softc *); 272 int iwn_hw_prepare(struct iwn_softc *); 273 int iwn_hw_init(struct iwn_softc *); 274 void iwn_hw_stop(struct iwn_softc *); 275 int iwn_init(struct ifnet *); 276 void iwn_stop(struct ifnet *, int); 277 278 #ifdef IWN_DEBUG 279 #define DPRINTF(x) do { if (iwn_debug > 0) printf x; } while (0) 280 #define DPRINTFN(n, x) do { if (iwn_debug >= (n)) printf x; } while (0) 281 int iwn_debug = 1; 282 #else 283 #define DPRINTF(x) 284 #define DPRINTFN(n, x) 285 #endif 286 287 struct cfdriver iwn_cd = { 288 NULL, "iwn", DV_IFNET 289 }; 290 291 struct cfattach iwn_ca = { 292 sizeof (struct iwn_softc), iwn_match, iwn_attach, iwn_detach, 293 iwn_activate 294 }; 295 296 int 297 iwn_match(struct device *parent, void *match, void *aux) 298 { 299 return pci_matchbyid((struct pci_attach_args *)aux, iwn_devices, 300 nitems(iwn_devices)); 301 } 302 303 void 304 iwn_attach(struct device *parent, struct device *self, void *aux) 305 { 306 struct iwn_softc *sc = (struct iwn_softc *)self; 307 struct ieee80211com *ic = &sc->sc_ic; 308 struct ifnet *ifp = &ic->ic_if; 309 struct pci_attach_args *pa = aux; 310 const char *intrstr; 311 pci_intr_handle_t ih; 312 pcireg_t memtype, reg; 313 int i, error; 314 315 sc->sc_pct = pa->pa_pc; 316 sc->sc_pcitag = pa->pa_tag; 317 sc->sc_dmat = pa->pa_dmat; 318 319 /* 320 * Get the offset of the PCI Express Capability Structure in PCI 321 * Configuration Space. 322 */ 323 error = pci_get_capability(sc->sc_pct, sc->sc_pcitag, 324 PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL); 325 if (error == 0) { 326 printf(": PCIe capability structure not found!\n"); 327 return; 328 } 329 330 /* Clear device-specific "PCI retry timeout" register (41h). */ 331 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40); 332 if (reg & 0xff00) 333 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00); 334 335 /* Hardware bug workaround. */ 336 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 337 if (reg & PCI_COMMAND_INTERRUPT_DISABLE) { 338 DPRINTF(("PCIe INTx Disable set\n")); 339 reg &= ~PCI_COMMAND_INTERRUPT_DISABLE; 340 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 341 PCI_COMMAND_STATUS_REG, reg); 342 } 343 344 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IWN_PCI_BAR0); 345 error = pci_mapreg_map(pa, IWN_PCI_BAR0, memtype, 0, &sc->sc_st, 346 &sc->sc_sh, NULL, &sc->sc_sz, 0); 347 if (error != 0) { 348 printf(": can't map mem space\n"); 349 return; 350 } 351 352 /* Install interrupt handler. */ 353 if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) { 354 printf(": can't map interrupt\n"); 355 return; 356 } 357 intrstr = pci_intr_string(sc->sc_pct, ih); 358 sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET, iwn_intr, sc, 359 sc->sc_dev.dv_xname); 360 if (sc->sc_ih == NULL) { 361 printf(": can't establish interrupt"); 362 if (intrstr != NULL) 363 printf(" at %s", intrstr); 364 printf("\n"); 365 return; 366 } 367 printf(": %s", intrstr); 368 369 /* Read hardware revision and attach. */ 370 sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> 4) & 0x1f; 371 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 372 error = iwn4965_attach(sc, PCI_PRODUCT(pa->pa_id)); 373 else 374 error = iwn5000_attach(sc, PCI_PRODUCT(pa->pa_id)); 375 if (error != 0) { 376 printf(": could not attach device\n"); 377 return; 378 } 379 380 if ((error = iwn_hw_prepare(sc)) != 0) { 381 printf(": hardware not ready\n"); 382 return; 383 } 384 385 /* Read MAC address, channels, etc from EEPROM. */ 386 if ((error = iwn_read_eeprom(sc)) != 0) { 387 printf(": could not read EEPROM\n"); 388 return; 389 } 390 391 /* Allocate DMA memory for firmware transfers. */ 392 if ((error = iwn_alloc_fwmem(sc)) != 0) { 393 printf(": could not allocate memory for firmware\n"); 394 return; 395 } 396 397 /* Allocate "Keep Warm" page. */ 398 if ((error = iwn_alloc_kw(sc)) != 0) { 399 printf(": could not allocate keep warm page\n"); 400 goto fail1; 401 } 402 403 /* Allocate ICT table for 5000 Series. */ 404 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 405 (error = iwn_alloc_ict(sc)) != 0) { 406 printf(": could not allocate ICT table\n"); 407 goto fail2; 408 } 409 410 /* Allocate TX scheduler "rings". */ 411 if ((error = iwn_alloc_sched(sc)) != 0) { 412 printf(": could not allocate TX scheduler rings\n"); 413 goto fail3; 414 } 415 416 /* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */ 417 for (i = 0; i < sc->ntxqs; i++) { 418 if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 419 printf(": could not allocate TX ring %d\n", i); 420 goto fail4; 421 } 422 } 423 424 /* Allocate RX ring. */ 425 if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) { 426 printf(": could not allocate RX ring\n"); 427 goto fail4; 428 } 429 430 /* Clear pending interrupts. */ 431 IWN_WRITE(sc, IWN_INT, 0xffffffff); 432 433 /* Count the number of available chains. */ 434 sc->ntxchains = 435 ((sc->txchainmask >> 2) & 1) + 436 ((sc->txchainmask >> 1) & 1) + 437 ((sc->txchainmask >> 0) & 1); 438 sc->nrxchains = 439 ((sc->rxchainmask >> 2) & 1) + 440 ((sc->rxchainmask >> 1) & 1) + 441 ((sc->rxchainmask >> 0) & 1); 442 printf(", MIMO %dT%dR, %.4s, address %s\n", sc->ntxchains, 443 sc->nrxchains, sc->eeprom_domain, ether_sprintf(ic->ic_myaddr)); 444 445 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 446 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 447 ic->ic_state = IEEE80211_S_INIT; 448 449 /* Set device capabilities. */ 450 ic->ic_caps = 451 IEEE80211_C_WEP | /* WEP */ 452 IEEE80211_C_RSN | /* WPA/RSN */ 453 IEEE80211_C_MONITOR | /* monitor mode supported */ 454 IEEE80211_C_SHSLOT | /* short slot time supported */ 455 IEEE80211_C_SHPREAMBLE | /* short preamble supported */ 456 IEEE80211_C_PMGT; /* power saving supported */ 457 458 /* No optional HT features supported for now, */ 459 ic->ic_htcaps = 0; 460 ic->ic_htxcaps = 0; 461 ic->ic_txbfcaps = 0; 462 ic->ic_aselcaps = 0; 463 ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */); 464 #ifdef notyet 465 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 466 /* Set HT capabilities. */ 467 ic->ic_htcaps = 468 #if IWN_RBUF_SIZE == 8192 469 IEEE80211_HTCAP_AMSDU7935 | 470 #endif 471 IEEE80211_HTCAP_CBW20_40 | 472 IEEE80211_HTCAP_SGI20 | 473 IEEE80211_HTCAP_SGI40; 474 if (sc->hw_type != IWN_HW_REV_TYPE_4965) 475 ic->ic_htcaps |= IEEE80211_HTCAP_GF; 476 if (sc->hw_type == IWN_HW_REV_TYPE_6050) 477 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DYN; 478 else 479 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DIS; 480 } 481 #endif /* notyet */ 482 483 /* Set supported legacy rates. */ 484 ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b; 485 ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g; 486 if (sc->sc_flags & IWN_FLAG_HAS_5GHZ) { 487 ic->ic_sup_rates[IEEE80211_MODE_11A] = 488 ieee80211_std_rateset_11a; 489 } 490 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 491 /* Set supported HT rates. */ 492 ic->ic_sup_mcs[0] = 0xff; /* MCS 0-7 */ 493 #ifdef notyet 494 if (sc->nrxchains > 1) 495 ic->ic_sup_mcs[1] = 0xff; /* MCS 8-15 */ 496 if (sc->nrxchains > 2) 497 ic->ic_sup_mcs[2] = 0xff; /* MCS 16-23 */ 498 #endif 499 } 500 501 /* IBSS channel undefined for now. */ 502 ic->ic_ibss_chan = &ic->ic_channels[0]; 503 504 ifp->if_softc = sc; 505 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 506 ifp->if_ioctl = iwn_ioctl; 507 ifp->if_start = iwn_start; 508 ifp->if_watchdog = iwn_watchdog; 509 memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 510 511 if_attach(ifp); 512 ieee80211_ifattach(ifp); 513 ic->ic_node_alloc = iwn_node_alloc; 514 ic->ic_newassoc = iwn_newassoc; 515 ic->ic_updateedca = iwn_updateedca; 516 ic->ic_set_key = iwn_set_key; 517 ic->ic_delete_key = iwn_delete_key; 518 ic->ic_update_htprot = iwn_update_htprot; 519 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start; 520 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop; 521 #ifdef notyet 522 ic->ic_ampdu_tx_start = iwn_ampdu_tx_start; 523 ic->ic_ampdu_tx_stop = iwn_ampdu_tx_stop; 524 #endif 525 526 /* Override 802.11 state transition machine. */ 527 sc->sc_newstate = ic->ic_newstate; 528 ic->ic_newstate = iwn_newstate; 529 ieee80211_media_init(ifp, iwn_media_change, ieee80211_media_status); 530 531 sc->amrr.amrr_min_success_threshold = 1; 532 sc->amrr.amrr_max_success_threshold = 15; 533 534 #if NBPFILTER > 0 535 iwn_radiotap_attach(sc); 536 #endif 537 timeout_set(&sc->calib_to, iwn_calib_timeout, sc); 538 task_set(&sc->init_task, iwn_init_task, sc); 539 return; 540 541 /* Free allocated memory if something failed during attachment. */ 542 fail4: while (--i >= 0) 543 iwn_free_tx_ring(sc, &sc->txq[i]); 544 iwn_free_sched(sc); 545 fail3: if (sc->ict != NULL) 546 iwn_free_ict(sc); 547 fail2: iwn_free_kw(sc); 548 fail1: iwn_free_fwmem(sc); 549 } 550 551 int 552 iwn4965_attach(struct iwn_softc *sc, pci_product_id_t pid) 553 { 554 struct iwn_ops *ops = &sc->ops; 555 556 ops->load_firmware = iwn4965_load_firmware; 557 ops->read_eeprom = iwn4965_read_eeprom; 558 ops->post_alive = iwn4965_post_alive; 559 ops->nic_config = iwn4965_nic_config; 560 ops->update_sched = iwn4965_update_sched; 561 ops->get_temperature = iwn4965_get_temperature; 562 ops->get_rssi = iwn4965_get_rssi; 563 ops->set_txpower = iwn4965_set_txpower; 564 ops->init_gains = iwn4965_init_gains; 565 ops->set_gains = iwn4965_set_gains; 566 ops->add_node = iwn4965_add_node; 567 ops->tx_done = iwn4965_tx_done; 568 ops->ampdu_tx_start = iwn4965_ampdu_tx_start; 569 ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop; 570 sc->ntxqs = IWN4965_NTXQUEUES; 571 sc->ndmachnls = IWN4965_NDMACHNLS; 572 sc->broadcast_id = IWN4965_ID_BROADCAST; 573 sc->rxonsz = IWN4965_RXONSZ; 574 sc->schedsz = IWN4965_SCHEDSZ; 575 sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ; 576 sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ; 577 sc->fwsz = IWN4965_FWSZ; 578 sc->sched_txfact_addr = IWN4965_SCHED_TXFACT; 579 sc->limits = &iwn4965_sensitivity_limits; 580 sc->fwname = "iwn-4965"; 581 /* Override chains masks, ROM is known to be broken. */ 582 sc->txchainmask = IWN_ANT_AB; 583 sc->rxchainmask = IWN_ANT_ABC; 584 585 return 0; 586 } 587 588 int 589 iwn5000_attach(struct iwn_softc *sc, pci_product_id_t pid) 590 { 591 struct iwn_ops *ops = &sc->ops; 592 593 ops->load_firmware = iwn5000_load_firmware; 594 ops->read_eeprom = iwn5000_read_eeprom; 595 ops->post_alive = iwn5000_post_alive; 596 ops->nic_config = iwn5000_nic_config; 597 ops->update_sched = iwn5000_update_sched; 598 ops->get_temperature = iwn5000_get_temperature; 599 ops->get_rssi = iwn5000_get_rssi; 600 ops->set_txpower = iwn5000_set_txpower; 601 ops->init_gains = iwn5000_init_gains; 602 ops->set_gains = iwn5000_set_gains; 603 ops->add_node = iwn5000_add_node; 604 ops->tx_done = iwn5000_tx_done; 605 ops->ampdu_tx_start = iwn5000_ampdu_tx_start; 606 ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop; 607 sc->ntxqs = IWN5000_NTXQUEUES; 608 sc->ndmachnls = IWN5000_NDMACHNLS; 609 sc->broadcast_id = IWN5000_ID_BROADCAST; 610 sc->rxonsz = IWN5000_RXONSZ; 611 sc->schedsz = IWN5000_SCHEDSZ; 612 sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ; 613 sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ; 614 sc->fwsz = IWN5000_FWSZ; 615 sc->sched_txfact_addr = IWN5000_SCHED_TXFACT; 616 617 switch (sc->hw_type) { 618 case IWN_HW_REV_TYPE_5100: 619 sc->limits = &iwn5000_sensitivity_limits; 620 sc->fwname = "iwn-5000"; 621 /* Override chains masks, ROM is known to be broken. */ 622 sc->txchainmask = IWN_ANT_B; 623 sc->rxchainmask = IWN_ANT_AB; 624 break; 625 case IWN_HW_REV_TYPE_5150: 626 sc->limits = &iwn5150_sensitivity_limits; 627 sc->fwname = "iwn-5150"; 628 break; 629 case IWN_HW_REV_TYPE_5300: 630 case IWN_HW_REV_TYPE_5350: 631 sc->limits = &iwn5000_sensitivity_limits; 632 sc->fwname = "iwn-5000"; 633 break; 634 case IWN_HW_REV_TYPE_1000: 635 sc->limits = &iwn1000_sensitivity_limits; 636 sc->fwname = "iwn-1000"; 637 break; 638 case IWN_HW_REV_TYPE_6000: 639 sc->limits = &iwn6000_sensitivity_limits; 640 sc->fwname = "iwn-6000"; 641 if (pid == PCI_PRODUCT_INTEL_WL_6200_1 || 642 pid == PCI_PRODUCT_INTEL_WL_6200_2) { 643 sc->sc_flags |= IWN_FLAG_INTERNAL_PA; 644 /* Override chains masks, ROM is known to be broken. */ 645 sc->txchainmask = IWN_ANT_BC; 646 sc->rxchainmask = IWN_ANT_BC; 647 } 648 break; 649 case IWN_HW_REV_TYPE_6050: 650 sc->limits = &iwn6000_sensitivity_limits; 651 sc->fwname = "iwn-6050"; 652 break; 653 case IWN_HW_REV_TYPE_6005: 654 sc->limits = &iwn6000_sensitivity_limits; 655 if (pid != PCI_PRODUCT_INTEL_WL_6005_1 && 656 pid != PCI_PRODUCT_INTEL_WL_6005_2) { 657 sc->fwname = "iwn-6030"; 658 sc->sc_flags |= IWN_FLAG_ADV_BT_COEX; 659 } else 660 sc->fwname = "iwn-6005"; 661 break; 662 case IWN_HW_REV_TYPE_2030: 663 sc->limits = &iwn2000_sensitivity_limits; 664 sc->fwname = "iwn-2030"; 665 sc->sc_flags |= IWN_FLAG_ADV_BT_COEX; 666 break; 667 case IWN_HW_REV_TYPE_2000: 668 sc->limits = &iwn2000_sensitivity_limits; 669 sc->fwname = "iwn-2000"; 670 break; 671 case IWN_HW_REV_TYPE_135: 672 sc->limits = &iwn2000_sensitivity_limits; 673 sc->fwname = "iwn-135"; 674 sc->sc_flags |= IWN_FLAG_ADV_BT_COEX; 675 break; 676 case IWN_HW_REV_TYPE_105: 677 sc->limits = &iwn2000_sensitivity_limits; 678 sc->fwname = "iwn-105"; 679 break; 680 default: 681 printf(": adapter type %d not supported\n", sc->hw_type); 682 return ENOTSUP; 683 } 684 return 0; 685 } 686 687 #if NBPFILTER > 0 688 /* 689 * Attach the interface to 802.11 radiotap. 690 */ 691 void 692 iwn_radiotap_attach(struct iwn_softc *sc) 693 { 694 bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO, 695 sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN); 696 697 sc->sc_rxtap_len = sizeof sc->sc_rxtapu; 698 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); 699 sc->sc_rxtap.wr_ihdr.it_present = htole32(IWN_RX_RADIOTAP_PRESENT); 700 701 sc->sc_txtap_len = sizeof sc->sc_txtapu; 702 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); 703 sc->sc_txtap.wt_ihdr.it_present = htole32(IWN_TX_RADIOTAP_PRESENT); 704 } 705 #endif 706 707 int 708 iwn_detach(struct device *self, int flags) 709 { 710 struct iwn_softc *sc = (struct iwn_softc *)self; 711 struct ifnet *ifp = &sc->sc_ic.ic_if; 712 int qid; 713 714 timeout_del(&sc->calib_to); 715 task_del(systq, &sc->init_task); 716 717 /* Uninstall interrupt handler. */ 718 if (sc->sc_ih != NULL) 719 pci_intr_disestablish(sc->sc_pct, sc->sc_ih); 720 721 /* Free DMA resources. */ 722 iwn_free_rx_ring(sc, &sc->rxq); 723 for (qid = 0; qid < sc->ntxqs; qid++) 724 iwn_free_tx_ring(sc, &sc->txq[qid]); 725 iwn_free_sched(sc); 726 iwn_free_kw(sc); 727 if (sc->ict != NULL) 728 iwn_free_ict(sc); 729 iwn_free_fwmem(sc); 730 731 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz); 732 733 ieee80211_ifdetach(ifp); 734 if_detach(ifp); 735 736 return 0; 737 } 738 739 int 740 iwn_activate(struct device *self, int act) 741 { 742 struct iwn_softc *sc = (struct iwn_softc *)self; 743 struct ifnet *ifp = &sc->sc_ic.ic_if; 744 745 switch (act) { 746 case DVACT_SUSPEND: 747 if (ifp->if_flags & IFF_RUNNING) 748 iwn_stop(ifp, 0); 749 break; 750 case DVACT_WAKEUP: 751 iwn_wakeup(sc); 752 break; 753 } 754 755 return 0; 756 } 757 758 void 759 iwn_wakeup(struct iwn_softc *sc) 760 { 761 pcireg_t reg; 762 763 /* Clear device-specific "PCI retry timeout" register (41h). */ 764 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40); 765 if (reg & 0xff00) 766 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00); 767 iwn_init_task(sc); 768 } 769 770 void 771 iwn_init_task(void *arg1) 772 { 773 struct iwn_softc *sc = arg1; 774 struct ifnet *ifp = &sc->sc_ic.ic_if; 775 int s; 776 777 s = splnet(); 778 while (sc->sc_flags & IWN_FLAG_BUSY) 779 tsleep(&sc->sc_flags, 0, "iwnpwr", 0); 780 sc->sc_flags |= IWN_FLAG_BUSY; 781 782 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP) 783 iwn_init(ifp); 784 785 sc->sc_flags &= ~IWN_FLAG_BUSY; 786 wakeup(&sc->sc_flags); 787 splx(s); 788 } 789 790 int 791 iwn_nic_lock(struct iwn_softc *sc) 792 { 793 int ntries; 794 795 /* Request exclusive access to NIC. */ 796 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 797 798 /* Spin until we actually get the lock. */ 799 for (ntries = 0; ntries < 1000; ntries++) { 800 if ((IWN_READ(sc, IWN_GP_CNTRL) & 801 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) == 802 IWN_GP_CNTRL_MAC_ACCESS_ENA) 803 return 0; 804 DELAY(10); 805 } 806 return ETIMEDOUT; 807 } 808 809 static __inline void 810 iwn_nic_unlock(struct iwn_softc *sc) 811 { 812 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 813 } 814 815 static __inline uint32_t 816 iwn_prph_read(struct iwn_softc *sc, uint32_t addr) 817 { 818 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr); 819 IWN_BARRIER_READ_WRITE(sc); 820 return IWN_READ(sc, IWN_PRPH_RDATA); 821 } 822 823 static __inline void 824 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 825 { 826 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr); 827 IWN_BARRIER_WRITE(sc); 828 IWN_WRITE(sc, IWN_PRPH_WDATA, data); 829 } 830 831 static __inline void 832 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 833 { 834 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask); 835 } 836 837 static __inline void 838 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 839 { 840 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask); 841 } 842 843 static __inline void 844 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr, 845 const uint32_t *data, int count) 846 { 847 for (; count > 0; count--, data++, addr += 4) 848 iwn_prph_write(sc, addr, *data); 849 } 850 851 static __inline uint32_t 852 iwn_mem_read(struct iwn_softc *sc, uint32_t addr) 853 { 854 IWN_WRITE(sc, IWN_MEM_RADDR, addr); 855 IWN_BARRIER_READ_WRITE(sc); 856 return IWN_READ(sc, IWN_MEM_RDATA); 857 } 858 859 static __inline void 860 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 861 { 862 IWN_WRITE(sc, IWN_MEM_WADDR, addr); 863 IWN_BARRIER_WRITE(sc); 864 IWN_WRITE(sc, IWN_MEM_WDATA, data); 865 } 866 867 static __inline void 868 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data) 869 { 870 uint32_t tmp; 871 872 tmp = iwn_mem_read(sc, addr & ~3); 873 if (addr & 3) 874 tmp = (tmp & 0x0000ffff) | data << 16; 875 else 876 tmp = (tmp & 0xffff0000) | data; 877 iwn_mem_write(sc, addr & ~3, tmp); 878 } 879 880 static __inline void 881 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data, 882 int count) 883 { 884 for (; count > 0; count--, addr += 4) 885 *data++ = iwn_mem_read(sc, addr); 886 } 887 888 static __inline void 889 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val, 890 int count) 891 { 892 for (; count > 0; count--, addr += 4) 893 iwn_mem_write(sc, addr, val); 894 } 895 896 int 897 iwn_eeprom_lock(struct iwn_softc *sc) 898 { 899 int i, ntries; 900 901 for (i = 0; i < 100; i++) { 902 /* Request exclusive access to EEPROM. */ 903 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 904 IWN_HW_IF_CONFIG_EEPROM_LOCKED); 905 906 /* Spin until we actually get the lock. */ 907 for (ntries = 0; ntries < 100; ntries++) { 908 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 909 IWN_HW_IF_CONFIG_EEPROM_LOCKED) 910 return 0; 911 DELAY(10); 912 } 913 } 914 return ETIMEDOUT; 915 } 916 917 static __inline void 918 iwn_eeprom_unlock(struct iwn_softc *sc) 919 { 920 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED); 921 } 922 923 /* 924 * Initialize access by host to One Time Programmable ROM. 925 * NB: This kind of ROM can be found on 1000 or 6000 Series only. 926 */ 927 int 928 iwn_init_otprom(struct iwn_softc *sc) 929 { 930 uint16_t prev, base, next; 931 int count, error; 932 933 /* Wait for clock stabilization before accessing prph. */ 934 if ((error = iwn_clock_wait(sc)) != 0) 935 return error; 936 937 if ((error = iwn_nic_lock(sc)) != 0) 938 return error; 939 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 940 DELAY(5); 941 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 942 iwn_nic_unlock(sc); 943 944 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */ 945 if (sc->hw_type != IWN_HW_REV_TYPE_1000) { 946 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT, 947 IWN_RESET_LINK_PWR_MGMT_DIS); 948 } 949 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER); 950 /* Clear ECC status. */ 951 IWN_SETBITS(sc, IWN_OTP_GP, 952 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS); 953 954 /* 955 * Find the block before last block (contains the EEPROM image) 956 * for HW without OTP shadow RAM. 957 */ 958 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 959 /* Switch to absolute addressing mode. */ 960 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS); 961 base = 0; 962 for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) { 963 error = iwn_read_prom_data(sc, base, &next, 2); 964 if (error != 0) 965 return error; 966 if (next == 0) /* End of linked-list. */ 967 break; 968 prev = base; 969 base = letoh16(next); 970 } 971 if (count == 0 || count == IWN1000_OTP_NBLOCKS) 972 return EIO; 973 /* Skip "next" word. */ 974 sc->prom_base = prev + 1; 975 } 976 return 0; 977 } 978 979 int 980 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count) 981 { 982 uint8_t *out = data; 983 uint32_t val, tmp; 984 int ntries; 985 986 addr += sc->prom_base; 987 for (; count > 0; count -= 2, addr++) { 988 IWN_WRITE(sc, IWN_EEPROM, addr << 2); 989 for (ntries = 0; ntries < 10; ntries++) { 990 val = IWN_READ(sc, IWN_EEPROM); 991 if (val & IWN_EEPROM_READ_VALID) 992 break; 993 DELAY(5); 994 } 995 if (ntries == 10) { 996 printf("%s: timeout reading ROM at 0x%x\n", 997 sc->sc_dev.dv_xname, addr); 998 return ETIMEDOUT; 999 } 1000 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1001 /* OTPROM, check for ECC errors. */ 1002 tmp = IWN_READ(sc, IWN_OTP_GP); 1003 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) { 1004 printf("%s: OTPROM ECC error at 0x%x\n", 1005 sc->sc_dev.dv_xname, addr); 1006 return EIO; 1007 } 1008 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) { 1009 /* Correctable ECC error, clear bit. */ 1010 IWN_SETBITS(sc, IWN_OTP_GP, 1011 IWN_OTP_GP_ECC_CORR_STTS); 1012 } 1013 } 1014 *out++ = val >> 16; 1015 if (count > 1) 1016 *out++ = val >> 24; 1017 } 1018 return 0; 1019 } 1020 1021 int 1022 iwn_dma_contig_alloc(bus_dma_tag_t tag, struct iwn_dma_info *dma, void **kvap, 1023 bus_size_t size, bus_size_t alignment) 1024 { 1025 int nsegs, error; 1026 1027 dma->tag = tag; 1028 dma->size = size; 1029 1030 error = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT, 1031 &dma->map); 1032 if (error != 0) 1033 goto fail; 1034 1035 error = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs, 1036 BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1037 if (error != 0) 1038 goto fail; 1039 1040 error = bus_dmamem_map(tag, &dma->seg, 1, size, &dma->vaddr, 1041 BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 1042 if (error != 0) 1043 goto fail; 1044 1045 error = bus_dmamap_load_raw(tag, dma->map, &dma->seg, 1, size, 1046 BUS_DMA_NOWAIT); 1047 if (error != 0) 1048 goto fail; 1049 1050 bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE); 1051 1052 dma->paddr = dma->map->dm_segs[0].ds_addr; 1053 if (kvap != NULL) 1054 *kvap = dma->vaddr; 1055 1056 return 0; 1057 1058 fail: iwn_dma_contig_free(dma); 1059 return error; 1060 } 1061 1062 void 1063 iwn_dma_contig_free(struct iwn_dma_info *dma) 1064 { 1065 if (dma->map != NULL) { 1066 if (dma->vaddr != NULL) { 1067 bus_dmamap_sync(dma->tag, dma->map, 0, dma->size, 1068 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1069 bus_dmamap_unload(dma->tag, dma->map); 1070 bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size); 1071 bus_dmamem_free(dma->tag, &dma->seg, 1); 1072 dma->vaddr = NULL; 1073 } 1074 bus_dmamap_destroy(dma->tag, dma->map); 1075 dma->map = NULL; 1076 } 1077 } 1078 1079 int 1080 iwn_alloc_sched(struct iwn_softc *sc) 1081 { 1082 /* TX scheduler rings must be aligned on a 1KB boundary. */ 1083 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma, 1084 (void **)&sc->sched, sc->schedsz, 1024); 1085 } 1086 1087 void 1088 iwn_free_sched(struct iwn_softc *sc) 1089 { 1090 iwn_dma_contig_free(&sc->sched_dma); 1091 } 1092 1093 int 1094 iwn_alloc_kw(struct iwn_softc *sc) 1095 { 1096 /* "Keep Warm" page must be aligned on a 4KB boundary. */ 1097 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, NULL, 4096, 1098 4096); 1099 } 1100 1101 void 1102 iwn_free_kw(struct iwn_softc *sc) 1103 { 1104 iwn_dma_contig_free(&sc->kw_dma); 1105 } 1106 1107 int 1108 iwn_alloc_ict(struct iwn_softc *sc) 1109 { 1110 /* ICT table must be aligned on a 4KB boundary. */ 1111 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, 1112 (void **)&sc->ict, IWN_ICT_SIZE, 4096); 1113 } 1114 1115 void 1116 iwn_free_ict(struct iwn_softc *sc) 1117 { 1118 iwn_dma_contig_free(&sc->ict_dma); 1119 } 1120 1121 int 1122 iwn_alloc_fwmem(struct iwn_softc *sc) 1123 { 1124 /* Must be aligned on a 16-byte boundary. */ 1125 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, NULL, 1126 sc->fwsz, 16); 1127 } 1128 1129 void 1130 iwn_free_fwmem(struct iwn_softc *sc) 1131 { 1132 iwn_dma_contig_free(&sc->fw_dma); 1133 } 1134 1135 int 1136 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1137 { 1138 bus_size_t size; 1139 int i, error; 1140 1141 ring->cur = 0; 1142 1143 /* Allocate RX descriptors (256-byte aligned). */ 1144 size = IWN_RX_RING_COUNT * sizeof (uint32_t); 1145 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, 1146 (void **)&ring->desc, size, 256); 1147 if (error != 0) { 1148 printf("%s: could not allocate RX ring DMA memory\n", 1149 sc->sc_dev.dv_xname); 1150 goto fail; 1151 } 1152 1153 /* Allocate RX status area (16-byte aligned). */ 1154 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, 1155 (void **)&ring->stat, sizeof (struct iwn_rx_status), 16); 1156 if (error != 0) { 1157 printf("%s: could not allocate RX status DMA memory\n", 1158 sc->sc_dev.dv_xname); 1159 goto fail; 1160 } 1161 1162 /* 1163 * Allocate and map RX buffers. 1164 */ 1165 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1166 struct iwn_rx_data *data = &ring->data[i]; 1167 1168 error = bus_dmamap_create(sc->sc_dmat, IWN_RBUF_SIZE, 1, 1169 IWN_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1170 &data->map); 1171 if (error != 0) { 1172 printf("%s: could not create RX buf DMA map\n", 1173 sc->sc_dev.dv_xname); 1174 goto fail; 1175 } 1176 1177 data->m = MCLGETI(NULL, M_DONTWAIT, NULL, IWN_RBUF_SIZE); 1178 if (data->m == NULL) { 1179 printf("%s: could not allocate RX mbuf\n", 1180 sc->sc_dev.dv_xname); 1181 error = ENOBUFS; 1182 goto fail; 1183 } 1184 1185 error = bus_dmamap_load(sc->sc_dmat, data->map, 1186 mtod(data->m, void *), IWN_RBUF_SIZE, NULL, 1187 BUS_DMA_NOWAIT | BUS_DMA_READ); 1188 if (error != 0) { 1189 printf("%s: can't map mbuf (error %d)\n", 1190 sc->sc_dev.dv_xname, error); 1191 goto fail; 1192 } 1193 1194 /* Set physical address of RX buffer (256-byte aligned). */ 1195 ring->desc[i] = htole32(data->map->dm_segs[0].ds_addr >> 8); 1196 } 1197 1198 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0, size, 1199 BUS_DMASYNC_PREWRITE); 1200 1201 return 0; 1202 1203 fail: iwn_free_rx_ring(sc, ring); 1204 return error; 1205 } 1206 1207 void 1208 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1209 { 1210 int ntries; 1211 1212 if (iwn_nic_lock(sc) == 0) { 1213 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 1214 for (ntries = 0; ntries < 1000; ntries++) { 1215 if (IWN_READ(sc, IWN_FH_RX_STATUS) & 1216 IWN_FH_RX_STATUS_IDLE) 1217 break; 1218 DELAY(10); 1219 } 1220 iwn_nic_unlock(sc); 1221 } 1222 ring->cur = 0; 1223 sc->last_rx_valid = 0; 1224 } 1225 1226 void 1227 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1228 { 1229 int i; 1230 1231 iwn_dma_contig_free(&ring->desc_dma); 1232 iwn_dma_contig_free(&ring->stat_dma); 1233 1234 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1235 struct iwn_rx_data *data = &ring->data[i]; 1236 1237 if (data->m != NULL) { 1238 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1239 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1240 bus_dmamap_unload(sc->sc_dmat, data->map); 1241 m_freem(data->m); 1242 } 1243 if (data->map != NULL) 1244 bus_dmamap_destroy(sc->sc_dmat, data->map); 1245 } 1246 } 1247 1248 int 1249 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid) 1250 { 1251 bus_addr_t paddr; 1252 bus_size_t size; 1253 int i, error; 1254 1255 ring->qid = qid; 1256 ring->queued = 0; 1257 ring->cur = 0; 1258 1259 /* Allocate TX descriptors (256-byte aligned). */ 1260 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc); 1261 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, 1262 (void **)&ring->desc, size, 256); 1263 if (error != 0) { 1264 printf("%s: could not allocate TX ring DMA memory\n", 1265 sc->sc_dev.dv_xname); 1266 goto fail; 1267 } 1268 /* 1269 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need 1270 * to allocate commands space for other rings. 1271 * XXX Do we really need to allocate descriptors for other rings? 1272 */ 1273 if (qid > 4) 1274 return 0; 1275 1276 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd); 1277 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, 1278 (void **)&ring->cmd, size, 4); 1279 if (error != 0) { 1280 printf("%s: could not allocate TX cmd DMA memory\n", 1281 sc->sc_dev.dv_xname); 1282 goto fail; 1283 } 1284 1285 paddr = ring->cmd_dma.paddr; 1286 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1287 struct iwn_tx_data *data = &ring->data[i]; 1288 1289 data->cmd_paddr = paddr; 1290 data->scratch_paddr = paddr + 12; 1291 paddr += sizeof (struct iwn_tx_cmd); 1292 1293 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1294 IWN_MAX_SCATTER - 1, MCLBYTES, 0, BUS_DMA_NOWAIT, 1295 &data->map); 1296 if (error != 0) { 1297 printf("%s: could not create TX buf DMA map\n", 1298 sc->sc_dev.dv_xname); 1299 goto fail; 1300 } 1301 } 1302 return 0; 1303 1304 fail: iwn_free_tx_ring(sc, ring); 1305 return error; 1306 } 1307 1308 void 1309 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1310 { 1311 int i; 1312 1313 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1314 struct iwn_tx_data *data = &ring->data[i]; 1315 1316 if (data->m != NULL) { 1317 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1318 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1319 bus_dmamap_unload(sc->sc_dmat, data->map); 1320 m_freem(data->m); 1321 data->m = NULL; 1322 } 1323 } 1324 /* Clear TX descriptors. */ 1325 memset(ring->desc, 0, ring->desc_dma.size); 1326 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0, 1327 ring->desc_dma.size, BUS_DMASYNC_PREWRITE); 1328 sc->qfullmsk &= ~(1 << ring->qid); 1329 ring->queued = 0; 1330 ring->cur = 0; 1331 } 1332 1333 void 1334 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1335 { 1336 int i; 1337 1338 iwn_dma_contig_free(&ring->desc_dma); 1339 iwn_dma_contig_free(&ring->cmd_dma); 1340 1341 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1342 struct iwn_tx_data *data = &ring->data[i]; 1343 1344 if (data->m != NULL) { 1345 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1346 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1347 bus_dmamap_unload(sc->sc_dmat, data->map); 1348 m_freem(data->m); 1349 } 1350 if (data->map != NULL) 1351 bus_dmamap_destroy(sc->sc_dmat, data->map); 1352 } 1353 } 1354 1355 void 1356 iwn5000_ict_reset(struct iwn_softc *sc) 1357 { 1358 /* Disable interrupts. */ 1359 IWN_WRITE(sc, IWN_INT_MASK, 0); 1360 1361 /* Reset ICT table. */ 1362 memset(sc->ict, 0, IWN_ICT_SIZE); 1363 sc->ict_cur = 0; 1364 1365 /* Set physical address of ICT table (4KB aligned). */ 1366 DPRINTF(("enabling ICT\n")); 1367 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE | 1368 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12); 1369 1370 /* Enable periodic RX interrupt. */ 1371 sc->int_mask |= IWN_INT_RX_PERIODIC; 1372 /* Switch to ICT interrupt mode in driver. */ 1373 sc->sc_flags |= IWN_FLAG_USE_ICT; 1374 1375 /* Re-enable interrupts. */ 1376 IWN_WRITE(sc, IWN_INT, 0xffffffff); 1377 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 1378 } 1379 1380 int 1381 iwn_read_eeprom(struct iwn_softc *sc) 1382 { 1383 struct iwn_ops *ops = &sc->ops; 1384 struct ieee80211com *ic = &sc->sc_ic; 1385 uint16_t val; 1386 int error; 1387 1388 /* Check whether adapter has an EEPROM or an OTPROM. */ 1389 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 && 1390 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP)) 1391 sc->sc_flags |= IWN_FLAG_HAS_OTPROM; 1392 DPRINTF(("%s found\n", (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? 1393 "OTPROM" : "EEPROM")); 1394 1395 /* Adapter has to be powered on for EEPROM access to work. */ 1396 if ((error = iwn_apm_init(sc)) != 0) { 1397 printf("%s: could not power ON adapter\n", 1398 sc->sc_dev.dv_xname); 1399 return error; 1400 } 1401 1402 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) { 1403 printf("%s: bad ROM signature\n", sc->sc_dev.dv_xname); 1404 return EIO; 1405 } 1406 if ((error = iwn_eeprom_lock(sc)) != 0) { 1407 printf("%s: could not lock ROM (error=%d)\n", 1408 sc->sc_dev.dv_xname, error); 1409 return error; 1410 } 1411 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1412 if ((error = iwn_init_otprom(sc)) != 0) { 1413 printf("%s: could not initialize OTPROM\n", 1414 sc->sc_dev.dv_xname); 1415 return error; 1416 } 1417 } 1418 1419 iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2); 1420 DPRINTF(("SKU capabilities=0x%04x\n", letoh16(val))); 1421 /* Check if HT support is bonded out. */ 1422 if (val & htole16(IWN_EEPROM_SKU_CAP_11N)) 1423 sc->sc_flags |= IWN_FLAG_HAS_11N; 1424 1425 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2); 1426 sc->rfcfg = letoh16(val); 1427 DPRINTF(("radio config=0x%04x\n", sc->rfcfg)); 1428 /* Read Tx/Rx chains from ROM unless it's known to be broken. */ 1429 if (sc->txchainmask == 0) 1430 sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg); 1431 if (sc->rxchainmask == 0) 1432 sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg); 1433 1434 /* Read MAC address. */ 1435 iwn_read_prom_data(sc, IWN_EEPROM_MAC, ic->ic_myaddr, 6); 1436 1437 /* Read adapter-specific information from EEPROM. */ 1438 ops->read_eeprom(sc); 1439 1440 iwn_apm_stop(sc); /* Power OFF adapter. */ 1441 1442 iwn_eeprom_unlock(sc); 1443 return 0; 1444 } 1445 1446 void 1447 iwn4965_read_eeprom(struct iwn_softc *sc) 1448 { 1449 uint32_t addr; 1450 uint16_t val; 1451 int i; 1452 1453 /* Read regulatory domain (4 ASCII characters). */ 1454 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4); 1455 1456 /* Read the list of authorized channels (20MHz ones only). */ 1457 for (i = 0; i < 5; i++) { 1458 addr = iwn4965_regulatory_bands[i]; 1459 iwn_read_eeprom_channels(sc, i, addr); 1460 } 1461 1462 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */ 1463 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2); 1464 sc->maxpwr2GHz = val & 0xff; 1465 sc->maxpwr5GHz = val >> 8; 1466 /* Check that EEPROM values are within valid range. */ 1467 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50) 1468 sc->maxpwr5GHz = 38; 1469 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50) 1470 sc->maxpwr2GHz = 38; 1471 DPRINTF(("maxpwr 2GHz=%d 5GHz=%d\n", sc->maxpwr2GHz, sc->maxpwr5GHz)); 1472 1473 /* Read samples for each TX power group. */ 1474 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands, 1475 sizeof sc->bands); 1476 1477 /* Read voltage at which samples were taken. */ 1478 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2); 1479 sc->eeprom_voltage = (int16_t)letoh16(val); 1480 DPRINTF(("voltage=%d (in 0.3V)\n", sc->eeprom_voltage)); 1481 1482 #ifdef IWN_DEBUG 1483 /* Print samples. */ 1484 if (iwn_debug > 0) { 1485 for (i = 0; i < IWN_NBANDS; i++) 1486 iwn4965_print_power_group(sc, i); 1487 } 1488 #endif 1489 } 1490 1491 #ifdef IWN_DEBUG 1492 void 1493 iwn4965_print_power_group(struct iwn_softc *sc, int i) 1494 { 1495 struct iwn4965_eeprom_band *band = &sc->bands[i]; 1496 struct iwn4965_eeprom_chan_samples *chans = band->chans; 1497 int j, c; 1498 1499 printf("===band %d===\n", i); 1500 printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi); 1501 printf("chan1 num=%d\n", chans[0].num); 1502 for (c = 0; c < 2; c++) { 1503 for (j = 0; j < IWN_NSAMPLES; j++) { 1504 printf("chain %d, sample %d: temp=%d gain=%d " 1505 "power=%d pa_det=%d\n", c, j, 1506 chans[0].samples[c][j].temp, 1507 chans[0].samples[c][j].gain, 1508 chans[0].samples[c][j].power, 1509 chans[0].samples[c][j].pa_det); 1510 } 1511 } 1512 printf("chan2 num=%d\n", chans[1].num); 1513 for (c = 0; c < 2; c++) { 1514 for (j = 0; j < IWN_NSAMPLES; j++) { 1515 printf("chain %d, sample %d: temp=%d gain=%d " 1516 "power=%d pa_det=%d\n", c, j, 1517 chans[1].samples[c][j].temp, 1518 chans[1].samples[c][j].gain, 1519 chans[1].samples[c][j].power, 1520 chans[1].samples[c][j].pa_det); 1521 } 1522 } 1523 } 1524 #endif 1525 1526 void 1527 iwn5000_read_eeprom(struct iwn_softc *sc) 1528 { 1529 struct iwn5000_eeprom_calib_hdr hdr; 1530 int32_t volt; 1531 uint32_t base, addr; 1532 uint16_t val; 1533 int i; 1534 1535 /* Read regulatory domain (4 ASCII characters). */ 1536 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 1537 base = letoh16(val); 1538 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN, 1539 sc->eeprom_domain, 4); 1540 1541 /* Read the list of authorized channels (20MHz ones only). */ 1542 for (i = 0; i < 5; i++) { 1543 addr = base + iwn5000_regulatory_bands[i]; 1544 iwn_read_eeprom_channels(sc, i, addr); 1545 } 1546 1547 /* Read enhanced TX power information for 6000 Series. */ 1548 if (sc->hw_type >= IWN_HW_REV_TYPE_6000) 1549 iwn_read_eeprom_enhinfo(sc); 1550 1551 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2); 1552 base = letoh16(val); 1553 iwn_read_prom_data(sc, base, &hdr, sizeof hdr); 1554 DPRINTF(("calib version=%u pa type=%u voltage=%u\n", 1555 hdr.version, hdr.pa_type, letoh16(hdr.volt))); 1556 sc->calib_ver = hdr.version; 1557 1558 if (sc->hw_type == IWN_HW_REV_TYPE_2030 || 1559 sc->hw_type == IWN_HW_REV_TYPE_2000 || 1560 sc->hw_type == IWN_HW_REV_TYPE_135 || 1561 sc->hw_type == IWN_HW_REV_TYPE_105) { 1562 sc->eeprom_voltage = letoh16(hdr.volt); 1563 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 1564 sc->eeprom_temp = letoh16(val); 1565 iwn_read_prom_data(sc, base + IWN2000_EEPROM_RAWTEMP, &val, 2); 1566 sc->eeprom_rawtemp = letoh16(val); 1567 } 1568 1569 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 1570 /* Compute temperature offset. */ 1571 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 1572 sc->eeprom_temp = letoh16(val); 1573 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 1574 volt = letoh16(val); 1575 sc->temp_off = sc->eeprom_temp - (volt / -5); 1576 DPRINTF(("temp=%d volt=%d offset=%dK\n", 1577 sc->eeprom_temp, volt, sc->temp_off)); 1578 } else { 1579 /* Read crystal calibration. */ 1580 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL, 1581 &sc->eeprom_crystal, sizeof (uint32_t)); 1582 DPRINTF(("crystal calibration 0x%08x\n", 1583 letoh32(sc->eeprom_crystal))); 1584 } 1585 } 1586 1587 void 1588 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr) 1589 { 1590 struct ieee80211com *ic = &sc->sc_ic; 1591 const struct iwn_chan_band *band = &iwn_bands[n]; 1592 struct iwn_eeprom_chan channels[IWN_MAX_CHAN_PER_BAND]; 1593 uint8_t chan; 1594 int i; 1595 1596 iwn_read_prom_data(sc, addr, channels, 1597 band->nchan * sizeof (struct iwn_eeprom_chan)); 1598 1599 for (i = 0; i < band->nchan; i++) { 1600 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) 1601 continue; 1602 1603 chan = band->chan[i]; 1604 1605 if (n == 0) { /* 2GHz band */ 1606 ic->ic_channels[chan].ic_freq = 1607 ieee80211_ieee2mhz(chan, IEEE80211_CHAN_2GHZ); 1608 ic->ic_channels[chan].ic_flags = 1609 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM | 1610 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; 1611 1612 } else { /* 5GHz band */ 1613 /* 1614 * Some adapters support channels 7, 8, 11 and 12 1615 * both in the 2GHz and 4.9GHz bands. 1616 * Because of limitations in our net80211 layer, 1617 * we don't support them in the 4.9GHz band. 1618 */ 1619 if (chan <= 14) 1620 continue; 1621 1622 ic->ic_channels[chan].ic_freq = 1623 ieee80211_ieee2mhz(chan, IEEE80211_CHAN_5GHZ); 1624 ic->ic_channels[chan].ic_flags = IEEE80211_CHAN_A; 1625 /* We have at least one valid 5GHz channel. */ 1626 sc->sc_flags |= IWN_FLAG_HAS_5GHZ; 1627 } 1628 1629 /* Is active scan allowed on this channel? */ 1630 if (!(channels[i].flags & IWN_EEPROM_CHAN_ACTIVE)) { 1631 ic->ic_channels[chan].ic_flags |= 1632 IEEE80211_CHAN_PASSIVE; 1633 } 1634 1635 /* Save maximum allowed TX power for this channel. */ 1636 sc->maxpwr[chan] = channels[i].maxpwr; 1637 1638 if (sc->sc_flags & IWN_FLAG_HAS_11N) 1639 ic->ic_channels[chan].ic_flags |= IEEE80211_CHAN_HT; 1640 1641 DPRINTF(("adding chan %d flags=0x%x maxpwr=%d\n", 1642 chan, channels[i].flags, sc->maxpwr[chan])); 1643 } 1644 } 1645 1646 void 1647 iwn_read_eeprom_enhinfo(struct iwn_softc *sc) 1648 { 1649 struct iwn_eeprom_enhinfo enhinfo[35]; 1650 uint16_t val, base; 1651 int8_t maxpwr; 1652 int i; 1653 1654 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 1655 base = letoh16(val); 1656 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO, 1657 enhinfo, sizeof enhinfo); 1658 1659 memset(sc->enh_maxpwr, 0, sizeof sc->enh_maxpwr); 1660 for (i = 0; i < nitems(enhinfo); i++) { 1661 if (enhinfo[i].chan == 0 || enhinfo[i].reserved != 0) 1662 continue; /* Skip invalid entries. */ 1663 1664 maxpwr = 0; 1665 if (sc->txchainmask & IWN_ANT_A) 1666 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]); 1667 if (sc->txchainmask & IWN_ANT_B) 1668 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]); 1669 if (sc->txchainmask & IWN_ANT_C) 1670 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]); 1671 if (sc->ntxchains == 2) 1672 maxpwr = MAX(maxpwr, enhinfo[i].mimo2); 1673 else if (sc->ntxchains == 3) 1674 maxpwr = MAX(maxpwr, enhinfo[i].mimo3); 1675 maxpwr /= 2; /* Convert half-dBm to dBm. */ 1676 1677 DPRINTF(("enhinfo %d, maxpwr=%d\n", i, maxpwr)); 1678 sc->enh_maxpwr[i] = maxpwr; 1679 } 1680 } 1681 1682 struct ieee80211_node * 1683 iwn_node_alloc(struct ieee80211com *ic) 1684 { 1685 return malloc(sizeof (struct iwn_node), M_DEVBUF, M_NOWAIT | M_ZERO); 1686 } 1687 1688 void 1689 iwn_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, int isnew) 1690 { 1691 struct iwn_softc *sc = ic->ic_if.if_softc; 1692 struct iwn_node *wn = (void *)ni; 1693 uint8_t rate; 1694 int ridx, i; 1695 1696 ieee80211_amrr_node_init(&sc->amrr, &wn->amn); 1697 /* Start at lowest available bit-rate, AMRR will raise. */ 1698 ni->ni_txrate = 0; 1699 ni->ni_txmcs = 0; 1700 1701 for (i = 0; i < ni->ni_rates.rs_nrates; i++) { 1702 rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL; 1703 /* Map 802.11 rate to HW rate index. */ 1704 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) { 1705 if (iwn_rates[ridx].plcp != IWN_PLCP_INVALID && 1706 iwn_rates[ridx].rate == rate) 1707 break; 1708 } 1709 wn->ridx[i] = ridx; 1710 } 1711 } 1712 1713 int 1714 iwn_media_change(struct ifnet *ifp) 1715 { 1716 struct iwn_softc *sc = ifp->if_softc; 1717 struct ieee80211com *ic = &sc->sc_ic; 1718 uint8_t rate, ridx; 1719 int error; 1720 1721 error = ieee80211_media_change(ifp); 1722 if (error != ENETRESET) 1723 return error; 1724 1725 if (ic->ic_fixed_mcs != -1) 1726 sc->fixed_ridx = iwn_mcs2ridx[ic->ic_fixed_mcs]; 1727 if (ic->ic_fixed_rate != -1) { 1728 rate = ic->ic_sup_rates[ic->ic_curmode]. 1729 rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL; 1730 /* Map 802.11 rate to HW rate index. */ 1731 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) 1732 if (iwn_rates[ridx].plcp != IWN_PLCP_INVALID && 1733 iwn_rates[ridx].rate == rate) 1734 break; 1735 sc->fixed_ridx = ridx; 1736 } 1737 1738 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 1739 (IFF_UP | IFF_RUNNING)) { 1740 iwn_stop(ifp, 0); 1741 error = iwn_init(ifp); 1742 } 1743 return error; 1744 } 1745 1746 int 1747 iwn_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg) 1748 { 1749 struct ifnet *ifp = &ic->ic_if; 1750 struct iwn_softc *sc = ifp->if_softc; 1751 int error; 1752 1753 timeout_del(&sc->calib_to); 1754 1755 switch (nstate) { 1756 case IEEE80211_S_SCAN: 1757 /* Make the link LED blink while we're scanning. */ 1758 iwn_set_led(sc, IWN_LED_LINK, 10, 10); 1759 1760 if ((error = iwn_scan(sc, IEEE80211_CHAN_2GHZ)) != 0) { 1761 printf("%s: could not initiate scan\n", 1762 sc->sc_dev.dv_xname); 1763 return error; 1764 } 1765 ic->ic_state = nstate; 1766 return 0; 1767 1768 case IEEE80211_S_ASSOC: 1769 if (ic->ic_state != IEEE80211_S_RUN) 1770 break; 1771 /* FALLTHROUGH */ 1772 case IEEE80211_S_AUTH: 1773 /* Reset state to handle reassociations correctly. */ 1774 sc->rxon.associd = 0; 1775 sc->rxon.filter &= ~htole32(IWN_FILTER_BSS); 1776 sc->calib.state = IWN_CALIB_STATE_INIT; 1777 1778 if ((error = iwn_auth(sc)) != 0) { 1779 printf("%s: could not move to auth state\n", 1780 sc->sc_dev.dv_xname); 1781 return error; 1782 } 1783 break; 1784 1785 case IEEE80211_S_RUN: 1786 if ((error = iwn_run(sc)) != 0) { 1787 printf("%s: could not move to run state\n", 1788 sc->sc_dev.dv_xname); 1789 return error; 1790 } 1791 break; 1792 1793 case IEEE80211_S_INIT: 1794 sc->calib.state = IWN_CALIB_STATE_INIT; 1795 break; 1796 } 1797 1798 return sc->sc_newstate(ic, nstate, arg); 1799 } 1800 1801 void 1802 iwn_iter_func(void *arg, struct ieee80211_node *ni) 1803 { 1804 struct iwn_softc *sc = arg; 1805 struct iwn_node *wn = (struct iwn_node *)ni; 1806 1807 ieee80211_amrr_choose(&sc->amrr, ni, &wn->amn); 1808 } 1809 1810 void 1811 iwn_calib_timeout(void *arg) 1812 { 1813 struct iwn_softc *sc = arg; 1814 struct ieee80211com *ic = &sc->sc_ic; 1815 int s; 1816 1817 s = splnet(); 1818 if (ic->ic_fixed_rate == -1) { 1819 if (ic->ic_opmode == IEEE80211_M_STA) 1820 iwn_iter_func(sc, ic->ic_bss); 1821 else 1822 ieee80211_iterate_nodes(ic, iwn_iter_func, sc); 1823 } 1824 /* Force automatic TX power calibration every 60 secs. */ 1825 if (++sc->calib_cnt >= 120) { 1826 uint32_t flags = 0; 1827 1828 DPRINTFN(2, ("sending request for statistics\n")); 1829 (void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, 1830 sizeof flags, 1); 1831 sc->calib_cnt = 0; 1832 } 1833 splx(s); 1834 1835 /* Automatic rate control triggered every 500ms. */ 1836 timeout_add_msec(&sc->calib_to, 500); 1837 } 1838 1839 int 1840 iwn_ccmp_decap(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 1841 { 1842 struct ieee80211com *ic = &sc->sc_ic; 1843 struct ieee80211_key *k = &ni->ni_pairwise_key; 1844 struct ieee80211_frame *wh; 1845 struct ieee80211_rx_ba *ba; 1846 uint64_t pn, *prsc; 1847 uint8_t *ivp; 1848 uint8_t tid; 1849 int hdrlen, hasqos; 1850 1851 wh = mtod(m, struct ieee80211_frame *); 1852 hdrlen = ieee80211_get_hdrlen(wh); 1853 ivp = (uint8_t *)wh + hdrlen; 1854 1855 /* Check that ExtIV bit is be set. */ 1856 if (!(ivp[3] & IEEE80211_WEP_EXTIV)) { 1857 DPRINTF(("CCMP decap ExtIV not set\n")); 1858 return 1; 1859 } 1860 hasqos = ieee80211_has_qos(wh); 1861 tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0; 1862 ba = hasqos ? &ni->ni_rx_ba[tid] : NULL; 1863 prsc = &k->k_rsc[tid]; 1864 1865 /* Extract the 48-bit PN from the CCMP header. */ 1866 pn = (uint64_t)ivp[0] | 1867 (uint64_t)ivp[1] << 8 | 1868 (uint64_t)ivp[4] << 16 | 1869 (uint64_t)ivp[5] << 24 | 1870 (uint64_t)ivp[6] << 32 | 1871 (uint64_t)ivp[7] << 40; 1872 if (pn <= *prsc) { 1873 if (hasqos && ba->ba_state == IEEE80211_BA_AGREED) { 1874 /* 1875 * This is an A-MPDU subframe. 1876 * Such frames may be received out of order due to 1877 * legitimate retransmissions of failed subframes 1878 * in previous A-MPDUs. Duplicates will be handled 1879 * in ieee80211_input() as part of A-MPDU reordering. 1880 */ 1881 } else if (ieee80211_has_seq(wh)) { 1882 /* 1883 * Not necessarily a replayed frame since we did not 1884 * check the sequence number of the 802.11 header yet. 1885 */ 1886 int nrxseq, orxseq; 1887 1888 nrxseq = letoh16(*(u_int16_t *)wh->i_seq) >> 1889 IEEE80211_SEQ_SEQ_SHIFT; 1890 if (hasqos) 1891 orxseq = ni->ni_qos_rxseqs[tid]; 1892 else 1893 orxseq = ni->ni_rxseq; 1894 if (nrxseq < orxseq) { 1895 DPRINTF(("CCMP replayed (n=%d < o=%d)\n", 1896 nrxseq, orxseq)); 1897 ic->ic_stats.is_ccmp_replays++; 1898 return 1; 1899 } 1900 } else { 1901 DPRINTF(("CCMP replayed\n")); 1902 ic->ic_stats.is_ccmp_replays++; 1903 return 1; 1904 } 1905 } 1906 /* Update last seen packet number. */ 1907 *prsc = pn; 1908 1909 /* Clear Protected bit and strip IV. */ 1910 wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED; 1911 memmove(mtod(m, caddr_t) + IEEE80211_CCMP_HDRLEN, wh, hdrlen); 1912 m_adj(m, IEEE80211_CCMP_HDRLEN); 1913 /* Strip MIC. */ 1914 m_adj(m, -IEEE80211_CCMP_MICLEN); 1915 return 0; 1916 } 1917 1918 /* 1919 * Process an RX_PHY firmware notification. This is usually immediately 1920 * followed by an MPDU_RX_DONE notification. 1921 */ 1922 void 1923 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc, 1924 struct iwn_rx_data *data) 1925 { 1926 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1); 1927 1928 DPRINTFN(2, ("received PHY stats\n")); 1929 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 1930 sizeof (*stat), BUS_DMASYNC_POSTREAD); 1931 1932 /* Save RX statistics, they will be used on MPDU_RX_DONE. */ 1933 memcpy(&sc->last_rx_stat, stat, sizeof (*stat)); 1934 sc->last_rx_valid = IWN_LAST_RX_VALID; 1935 /* 1936 * The firmware does not send separate RX_PHY 1937 * notifications for A-MPDU subframes. 1938 */ 1939 if (stat->flags & htole16(IWN_STAT_FLAG_AGG)) 1940 sc->last_rx_valid |= IWN_LAST_RX_AMPDU; 1941 } 1942 1943 /* 1944 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification. 1945 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one. 1946 */ 1947 void 1948 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 1949 struct iwn_rx_data *data) 1950 { 1951 struct iwn_ops *ops = &sc->ops; 1952 struct ieee80211com *ic = &sc->sc_ic; 1953 struct ifnet *ifp = &ic->ic_if; 1954 struct iwn_rx_ring *ring = &sc->rxq; 1955 struct ieee80211_frame *wh; 1956 struct ieee80211_rxinfo rxi; 1957 struct ieee80211_node *ni; 1958 struct mbuf *m, *m1; 1959 struct iwn_rx_stat *stat; 1960 caddr_t head; 1961 uint32_t flags; 1962 int error, len, rssi; 1963 1964 if (desc->type == IWN_MPDU_RX_DONE) { 1965 /* Check for prior RX_PHY notification. */ 1966 if (!sc->last_rx_valid) { 1967 DPRINTF(("missing RX_PHY\n")); 1968 return; 1969 } 1970 sc->last_rx_valid &= ~IWN_LAST_RX_VALID; 1971 stat = &sc->last_rx_stat; 1972 if ((sc->last_rx_valid & IWN_LAST_RX_AMPDU) && 1973 (stat->flags & htole16(IWN_STAT_FLAG_AGG)) == 0) { 1974 DPRINTF(("missing RX_PHY (expecting A-MPDU)\n")); 1975 return; 1976 } 1977 if ((sc->last_rx_valid & IWN_LAST_RX_AMPDU) == 0 && 1978 (stat->flags & htole16(IWN_STAT_FLAG_AGG))) { 1979 DPRINTF(("missing RX_PHY (unexpected A-MPDU)\n")); 1980 return; 1981 } 1982 } else 1983 stat = (struct iwn_rx_stat *)(desc + 1); 1984 1985 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWN_RBUF_SIZE, 1986 BUS_DMASYNC_POSTREAD); 1987 1988 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) { 1989 printf("%s: invalid RX statistic header\n", 1990 sc->sc_dev.dv_xname); 1991 return; 1992 } 1993 if (desc->type == IWN_MPDU_RX_DONE) { 1994 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1); 1995 head = (caddr_t)(mpdu + 1); 1996 len = letoh16(mpdu->len); 1997 } else { 1998 head = (caddr_t)(stat + 1) + stat->cfg_phy_len; 1999 len = letoh16(stat->len); 2000 } 2001 2002 flags = letoh32(*(uint32_t *)(head + len)); 2003 2004 /* Discard frames with a bad FCS early. */ 2005 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) { 2006 DPRINTFN(2, ("RX flags error %x\n", flags)); 2007 ifp->if_ierrors++; 2008 return; 2009 } 2010 /* Discard frames that are too short. */ 2011 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 2012 /* Allow control frames in monitor mode. */ 2013 if (len < sizeof (struct ieee80211_frame_cts)) { 2014 DPRINTF(("frame too short: %d\n", len)); 2015 ic->ic_stats.is_rx_tooshort++; 2016 ifp->if_ierrors++; 2017 return; 2018 } 2019 } else if (len < sizeof (*wh)) { 2020 DPRINTF(("frame too short: %d\n", len)); 2021 ic->ic_stats.is_rx_tooshort++; 2022 ifp->if_ierrors++; 2023 return; 2024 } 2025 2026 m1 = MCLGETI(NULL, M_DONTWAIT, NULL, IWN_RBUF_SIZE); 2027 if (m1 == NULL) { 2028 ic->ic_stats.is_rx_nombuf++; 2029 ifp->if_ierrors++; 2030 return; 2031 } 2032 bus_dmamap_unload(sc->sc_dmat, data->map); 2033 2034 error = bus_dmamap_load(sc->sc_dmat, data->map, mtod(m1, void *), 2035 IWN_RBUF_SIZE, NULL, BUS_DMA_NOWAIT | BUS_DMA_READ); 2036 if (error != 0) { 2037 m_freem(m1); 2038 2039 /* Try to reload the old mbuf. */ 2040 error = bus_dmamap_load(sc->sc_dmat, data->map, 2041 mtod(data->m, void *), IWN_RBUF_SIZE, NULL, 2042 BUS_DMA_NOWAIT | BUS_DMA_READ); 2043 if (error != 0) { 2044 panic("%s: could not load old RX mbuf", 2045 sc->sc_dev.dv_xname); 2046 } 2047 /* Physical address may have changed. */ 2048 ring->desc[ring->cur] = 2049 htole32(data->map->dm_segs[0].ds_addr >> 8); 2050 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 2051 ring->cur * sizeof (uint32_t), sizeof (uint32_t), 2052 BUS_DMASYNC_PREWRITE); 2053 ifp->if_ierrors++; 2054 return; 2055 } 2056 2057 m = data->m; 2058 data->m = m1; 2059 /* Update RX descriptor. */ 2060 ring->desc[ring->cur] = htole32(data->map->dm_segs[0].ds_addr >> 8); 2061 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 2062 ring->cur * sizeof (uint32_t), sizeof (uint32_t), 2063 BUS_DMASYNC_PREWRITE); 2064 2065 /* Finalize mbuf. */ 2066 m->m_data = head; 2067 m->m_pkthdr.len = m->m_len = len; 2068 2069 /* 2070 * Grab a reference to the source node. Note that control frames are 2071 * shorter than struct ieee80211_frame but ieee80211_find_rxnode() 2072 * is being careful about control frames. 2073 */ 2074 wh = mtod(m, struct ieee80211_frame *); 2075 if (len < sizeof (*wh) && 2076 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 2077 ic->ic_stats.is_rx_tooshort++; 2078 ifp->if_ierrors++; 2079 m_freem(m); 2080 return; 2081 } 2082 ni = ieee80211_find_rxnode(ic, wh); 2083 2084 rxi.rxi_flags = 0; 2085 if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) 2086 && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) && 2087 !IEEE80211_IS_MULTICAST(wh->i_addr1) && 2088 (ni->ni_flags & IEEE80211_NODE_RXPROT) && 2089 ni->ni_pairwise_key.k_cipher == IEEE80211_CIPHER_CCMP) { 2090 if ((flags & IWN_RX_CIPHER_MASK) != IWN_RX_CIPHER_CCMP) { 2091 ic->ic_stats.is_ccmp_dec_errs++; 2092 ifp->if_ierrors++; 2093 m_freem(m); 2094 return; 2095 } 2096 /* Check whether decryption was successful or not. */ 2097 if ((desc->type == IWN_MPDU_RX_DONE && 2098 (flags & (IWN_RX_MPDU_DEC | IWN_RX_MPDU_MIC_OK)) != 2099 (IWN_RX_MPDU_DEC | IWN_RX_MPDU_MIC_OK)) || 2100 (desc->type != IWN_MPDU_RX_DONE && 2101 (flags & IWN_RX_DECRYPT_MASK) != IWN_RX_DECRYPT_OK)) { 2102 DPRINTF(("CCMP decryption failed 0x%x\n", flags)); 2103 ic->ic_stats.is_ccmp_dec_errs++; 2104 ifp->if_ierrors++; 2105 m_freem(m); 2106 return; 2107 } 2108 if (iwn_ccmp_decap(sc, m, ni) != 0) { 2109 ifp->if_ierrors++; 2110 m_freem(m); 2111 return; 2112 } 2113 rxi.rxi_flags |= IEEE80211_RXI_HWDEC; 2114 } 2115 2116 rssi = ops->get_rssi(stat); 2117 2118 #if NBPFILTER > 0 2119 if (sc->sc_drvbpf != NULL) { 2120 struct mbuf mb; 2121 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap; 2122 2123 tap->wr_flags = 0; 2124 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE)) 2125 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2126 tap->wr_chan_freq = 2127 htole16(ic->ic_channels[stat->chan].ic_freq); 2128 tap->wr_chan_flags = 2129 htole16(ic->ic_channels[stat->chan].ic_flags); 2130 tap->wr_dbm_antsignal = (int8_t)rssi; 2131 tap->wr_dbm_antnoise = (int8_t)sc->noise; 2132 tap->wr_tsft = stat->tstamp; 2133 switch (stat->rate) { 2134 /* CCK rates. */ 2135 case 10: tap->wr_rate = 2; break; 2136 case 20: tap->wr_rate = 4; break; 2137 case 55: tap->wr_rate = 11; break; 2138 case 110: tap->wr_rate = 22; break; 2139 /* OFDM rates. */ 2140 case 0xd: tap->wr_rate = 12; break; 2141 case 0xf: tap->wr_rate = 18; break; 2142 case 0x5: tap->wr_rate = 24; break; 2143 case 0x7: tap->wr_rate = 36; break; 2144 case 0x9: tap->wr_rate = 48; break; 2145 case 0xb: tap->wr_rate = 72; break; 2146 case 0x1: tap->wr_rate = 96; break; 2147 case 0x3: tap->wr_rate = 108; break; 2148 /* Unknown rate: should not happen. */ 2149 default: tap->wr_rate = 0; 2150 } 2151 2152 mb.m_data = (caddr_t)tap; 2153 mb.m_len = sc->sc_rxtap_len; 2154 mb.m_next = m; 2155 mb.m_nextpkt = NULL; 2156 mb.m_type = 0; 2157 mb.m_flags = 0; 2158 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN); 2159 } 2160 #endif 2161 2162 /* Send the frame to the 802.11 layer. */ 2163 rxi.rxi_rssi = rssi; 2164 rxi.rxi_tstamp = 0; /* unused */ 2165 ieee80211_input(ifp, m, ni, &rxi); 2166 2167 /* Node is no longer needed. */ 2168 ieee80211_release_node(ic, ni); 2169 } 2170 2171 /* Process an incoming Compressed BlockAck. */ 2172 void 2173 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2174 struct iwn_rx_data *data) 2175 { 2176 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1); 2177 struct iwn_tx_ring *txq; 2178 2179 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), sizeof (*ba), 2180 BUS_DMASYNC_POSTREAD); 2181 2182 txq = &sc->txq[letoh16(ba->qid)]; 2183 /* XXX TBD */ 2184 } 2185 2186 /* 2187 * Process a CALIBRATION_RESULT notification sent by the initialization 2188 * firmware on response to a CMD_CALIB_CONFIG command (5000 only). 2189 */ 2190 void 2191 iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2192 struct iwn_rx_data *data) 2193 { 2194 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1); 2195 int len, idx = -1; 2196 2197 /* Runtime firmware should not send such a notification. */ 2198 if (sc->sc_flags & IWN_FLAG_CALIB_DONE) 2199 return; 2200 2201 len = (letoh32(desc->len) & 0x3fff) - 4; 2202 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), len, 2203 BUS_DMASYNC_POSTREAD); 2204 2205 switch (calib->code) { 2206 case IWN5000_PHY_CALIB_DC: 2207 if (sc->hw_type == IWN_HW_REV_TYPE_5150 || 2208 sc->hw_type == IWN_HW_REV_TYPE_2030 || 2209 sc->hw_type == IWN_HW_REV_TYPE_2000 || 2210 sc->hw_type == IWN_HW_REV_TYPE_135 || 2211 sc->hw_type == IWN_HW_REV_TYPE_105) 2212 idx = 0; 2213 break; 2214 case IWN5000_PHY_CALIB_LO: 2215 idx = 1; 2216 break; 2217 case IWN5000_PHY_CALIB_TX_IQ: 2218 idx = 2; 2219 break; 2220 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC: 2221 if (sc->hw_type < IWN_HW_REV_TYPE_6000 && 2222 sc->hw_type != IWN_HW_REV_TYPE_5150) 2223 idx = 3; 2224 break; 2225 case IWN5000_PHY_CALIB_BASE_BAND: 2226 idx = 4; 2227 break; 2228 } 2229 if (idx == -1) /* Ignore other results. */ 2230 return; 2231 2232 /* Save calibration result. */ 2233 if (sc->calibcmd[idx].buf != NULL) 2234 free(sc->calibcmd[idx].buf, M_DEVBUF, 0); 2235 sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT); 2236 if (sc->calibcmd[idx].buf == NULL) { 2237 DPRINTF(("not enough memory for calibration result %d\n", 2238 calib->code)); 2239 return; 2240 } 2241 DPRINTF(("saving calibration result code=%d len=%d\n", 2242 calib->code, len)); 2243 sc->calibcmd[idx].len = len; 2244 memcpy(sc->calibcmd[idx].buf, calib, len); 2245 } 2246 2247 /* 2248 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification. 2249 * The latter is sent by the firmware after each received beacon. 2250 */ 2251 void 2252 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2253 struct iwn_rx_data *data) 2254 { 2255 struct iwn_ops *ops = &sc->ops; 2256 struct ieee80211com *ic = &sc->sc_ic; 2257 struct iwn_calib_state *calib = &sc->calib; 2258 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1); 2259 int temp; 2260 2261 /* Ignore statistics received during a scan. */ 2262 if (ic->ic_state != IEEE80211_S_RUN) 2263 return; 2264 2265 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2266 sizeof (*stats), BUS_DMASYNC_POSTREAD); 2267 2268 DPRINTFN(3, ("received statistics (cmd=%d)\n", desc->type)); 2269 sc->calib_cnt = 0; /* Reset TX power calibration timeout. */ 2270 2271 /* Test if temperature has changed. */ 2272 if (stats->general.temp != sc->rawtemp) { 2273 /* Convert "raw" temperature to degC. */ 2274 sc->rawtemp = stats->general.temp; 2275 temp = ops->get_temperature(sc); 2276 DPRINTFN(2, ("temperature=%dC\n", temp)); 2277 2278 /* Update TX power if need be (4965AGN only). */ 2279 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 2280 iwn4965_power_calibration(sc, temp); 2281 } 2282 2283 if (desc->type != IWN_BEACON_STATISTICS) 2284 return; /* Reply to a statistics request. */ 2285 2286 sc->noise = iwn_get_noise(&stats->rx.general); 2287 2288 /* Test that RSSI and noise are present in stats report. */ 2289 if (letoh32(stats->rx.general.flags) != 1) { 2290 DPRINTF(("received statistics without RSSI\n")); 2291 return; 2292 } 2293 2294 /* 2295 * XXX Differential gain calibration makes the 6005 firmware 2296 * crap out, so skip it for now. This effectively disables 2297 * sensitivity tuning as well. 2298 */ 2299 if (sc->hw_type == IWN_HW_REV_TYPE_6005) 2300 return; 2301 2302 if (calib->state == IWN_CALIB_STATE_ASSOC) 2303 iwn_collect_noise(sc, &stats->rx.general); 2304 else if (calib->state == IWN_CALIB_STATE_RUN) 2305 iwn_tune_sensitivity(sc, &stats->rx); 2306 } 2307 2308 /* 2309 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN 2310 * and 5000 adapters have different incompatible TX status formats. 2311 */ 2312 void 2313 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2314 struct iwn_rx_data *data) 2315 { 2316 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1); 2317 2318 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2319 sizeof (*stat), BUS_DMASYNC_POSTREAD); 2320 iwn_tx_done(sc, desc, stat->ackfailcnt, letoh32(stat->status) & 0xff); 2321 } 2322 2323 void 2324 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2325 struct iwn_rx_data *data) 2326 { 2327 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1); 2328 2329 #ifdef notyet 2330 /* Reset TX scheduler slot. */ 2331 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx); 2332 #endif 2333 2334 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2335 sizeof (*stat), BUS_DMASYNC_POSTREAD); 2336 iwn_tx_done(sc, desc, stat->ackfailcnt, letoh16(stat->status) & 0xff); 2337 } 2338 2339 /* 2340 * Adapter-independent backend for TX_DONE firmware notifications. 2341 */ 2342 void 2343 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt, 2344 uint8_t status) 2345 { 2346 struct ieee80211com *ic = &sc->sc_ic; 2347 struct ifnet *ifp = &ic->ic_if; 2348 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf]; 2349 struct iwn_tx_data *data = &ring->data[desc->idx]; 2350 struct iwn_node *wn = (struct iwn_node *)data->ni; 2351 2352 /* Update rate control statistics. */ 2353 wn->amn.amn_txcnt++; 2354 if (ackfailcnt > 0) 2355 wn->amn.amn_retrycnt++; 2356 2357 if (status != 1 && status != 2) { 2358 DPRINTF(("%s: status=0x%x\n", __func__, status)); 2359 ifp->if_oerrors++; 2360 } else 2361 ifp->if_opackets++; 2362 2363 /* Unmap and free mbuf. */ 2364 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize, 2365 BUS_DMASYNC_POSTWRITE); 2366 bus_dmamap_unload(sc->sc_dmat, data->map); 2367 m_freem(data->m); 2368 data->m = NULL; 2369 ieee80211_release_node(ic, data->ni); 2370 data->ni = NULL; 2371 2372 sc->sc_tx_timer = 0; 2373 if (--ring->queued < IWN_TX_RING_LOMARK) { 2374 sc->qfullmsk &= ~(1 << ring->qid); 2375 if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) { 2376 ifq_clr_oactive(&ifp->if_snd); 2377 (*ifp->if_start)(ifp); 2378 } 2379 } 2380 } 2381 2382 /* 2383 * Process a "command done" firmware notification. This is where we wakeup 2384 * processes waiting for a synchronous command completion. 2385 */ 2386 void 2387 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc) 2388 { 2389 struct iwn_tx_ring *ring = &sc->txq[4]; 2390 struct iwn_tx_data *data; 2391 2392 if ((desc->qid & 0xf) != 4) 2393 return; /* Not a command ack. */ 2394 2395 data = &ring->data[desc->idx]; 2396 2397 /* If the command was mapped in an mbuf, free it. */ 2398 if (data->m != NULL) { 2399 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 2400 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2401 bus_dmamap_unload(sc->sc_dmat, data->map); 2402 m_freem(data->m); 2403 data->m = NULL; 2404 } 2405 wakeup(&ring->desc[desc->idx]); 2406 } 2407 2408 /* 2409 * Process an INT_FH_RX or INT_SW_RX interrupt. 2410 */ 2411 void 2412 iwn_notif_intr(struct iwn_softc *sc) 2413 { 2414 struct iwn_ops *ops = &sc->ops; 2415 struct ieee80211com *ic = &sc->sc_ic; 2416 struct ifnet *ifp = &ic->ic_if; 2417 uint16_t hw; 2418 2419 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map, 2420 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD); 2421 2422 hw = letoh16(sc->rxq.stat->closed_count) & 0xfff; 2423 while (sc->rxq.cur != hw) { 2424 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 2425 struct iwn_rx_desc *desc; 2426 2427 bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof (*desc), 2428 BUS_DMASYNC_POSTREAD); 2429 desc = mtod(data->m, struct iwn_rx_desc *); 2430 2431 DPRINTFN(4, ("notification qid=%d idx=%d flags=%x type=%d\n", 2432 desc->qid & 0xf, desc->idx, desc->flags, desc->type)); 2433 2434 if (!(desc->qid & 0x80)) /* Reply to a command. */ 2435 iwn_cmd_done(sc, desc); 2436 2437 switch (desc->type) { 2438 case IWN_RX_PHY: 2439 iwn_rx_phy(sc, desc, data); 2440 break; 2441 2442 case IWN_RX_DONE: /* 4965AGN only. */ 2443 case IWN_MPDU_RX_DONE: 2444 /* An 802.11 frame has been received. */ 2445 iwn_rx_done(sc, desc, data); 2446 break; 2447 case IWN_RX_COMPRESSED_BA: 2448 /* A Compressed BlockAck has been received. */ 2449 iwn_rx_compressed_ba(sc, desc, data); 2450 break; 2451 case IWN_TX_DONE: 2452 /* An 802.11 frame has been transmitted. */ 2453 ops->tx_done(sc, desc, data); 2454 break; 2455 2456 case IWN_RX_STATISTICS: 2457 case IWN_BEACON_STATISTICS: 2458 iwn_rx_statistics(sc, desc, data); 2459 break; 2460 2461 case IWN_BEACON_MISSED: 2462 { 2463 struct iwn_beacon_missed *miss = 2464 (struct iwn_beacon_missed *)(desc + 1); 2465 2466 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2467 sizeof (*miss), BUS_DMASYNC_POSTREAD); 2468 /* 2469 * If more than 5 consecutive beacons are missed, 2470 * reinitialize the sensitivity state machine. 2471 */ 2472 DPRINTFN(2, ("beacons missed %d/%d\n", 2473 letoh32(miss->consecutive), letoh32(miss->total))); 2474 if (ic->ic_state == IEEE80211_S_RUN && 2475 letoh32(miss->consecutive) > 5) 2476 (void)iwn_init_sensitivity(sc); 2477 break; 2478 } 2479 case IWN_UC_READY: 2480 { 2481 struct iwn_ucode_info *uc = 2482 (struct iwn_ucode_info *)(desc + 1); 2483 2484 /* The microcontroller is ready. */ 2485 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2486 sizeof (*uc), BUS_DMASYNC_POSTREAD); 2487 DPRINTF(("microcode alive notification version=%d.%d " 2488 "subtype=%x alive=%x\n", uc->major, uc->minor, 2489 uc->subtype, letoh32(uc->valid))); 2490 2491 if (letoh32(uc->valid) != 1) { 2492 printf("%s: microcontroller initialization " 2493 "failed\n", sc->sc_dev.dv_xname); 2494 break; 2495 } 2496 if (uc->subtype == IWN_UCODE_INIT) { 2497 /* Save microcontroller report. */ 2498 memcpy(&sc->ucode_info, uc, sizeof (*uc)); 2499 } 2500 /* Save the address of the error log in SRAM. */ 2501 sc->errptr = letoh32(uc->errptr); 2502 break; 2503 } 2504 case IWN_STATE_CHANGED: 2505 { 2506 uint32_t *status = (uint32_t *)(desc + 1); 2507 2508 /* Enabled/disabled notification. */ 2509 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2510 sizeof (*status), BUS_DMASYNC_POSTREAD); 2511 DPRINTF(("state changed to %x\n", letoh32(*status))); 2512 2513 if (letoh32(*status) & 1) { 2514 /* The radio button has to be pushed. */ 2515 printf("%s: Radio transmitter is off\n", 2516 sc->sc_dev.dv_xname); 2517 /* Turn the interface down. */ 2518 ifp->if_flags &= ~IFF_UP; 2519 iwn_stop(ifp, 1); 2520 return; /* No further processing. */ 2521 } 2522 break; 2523 } 2524 case IWN_START_SCAN: 2525 { 2526 struct iwn_start_scan *scan = 2527 (struct iwn_start_scan *)(desc + 1); 2528 2529 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2530 sizeof (*scan), BUS_DMASYNC_POSTREAD); 2531 DPRINTFN(2, ("scanning channel %d status %x\n", 2532 scan->chan, letoh32(scan->status))); 2533 2534 /* Fix current channel. */ 2535 ic->ic_bss->ni_chan = &ic->ic_channels[scan->chan]; 2536 break; 2537 } 2538 case IWN_STOP_SCAN: 2539 { 2540 struct iwn_stop_scan *scan = 2541 (struct iwn_stop_scan *)(desc + 1); 2542 2543 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2544 sizeof (*scan), BUS_DMASYNC_POSTREAD); 2545 DPRINTF(("scan finished nchan=%d status=%d chan=%d\n", 2546 scan->nchan, scan->status, scan->chan)); 2547 2548 if (scan->status == 1 && scan->chan <= 14 && 2549 (sc->sc_flags & IWN_FLAG_HAS_5GHZ)) { 2550 /* 2551 * We just finished scanning 2GHz channels, 2552 * start scanning 5GHz ones. 2553 */ 2554 if (iwn_scan(sc, IEEE80211_CHAN_5GHZ) == 0) 2555 break; 2556 } 2557 ieee80211_end_scan(ifp); 2558 break; 2559 } 2560 case IWN5000_CALIBRATION_RESULT: 2561 iwn5000_rx_calib_results(sc, desc, data); 2562 break; 2563 2564 case IWN5000_CALIBRATION_DONE: 2565 sc->sc_flags |= IWN_FLAG_CALIB_DONE; 2566 wakeup(sc); 2567 break; 2568 } 2569 2570 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT; 2571 } 2572 2573 /* Tell the firmware what we have processed. */ 2574 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1; 2575 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7); 2576 } 2577 2578 /* 2579 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 2580 * from power-down sleep mode. 2581 */ 2582 void 2583 iwn_wakeup_intr(struct iwn_softc *sc) 2584 { 2585 int qid; 2586 2587 DPRINTF(("ucode wakeup from power-down sleep\n")); 2588 2589 /* Wakeup RX and TX rings. */ 2590 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7); 2591 for (qid = 0; qid < sc->ntxqs; qid++) { 2592 struct iwn_tx_ring *ring = &sc->txq[qid]; 2593 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur); 2594 } 2595 } 2596 2597 /* 2598 * Dump the error log of the firmware when a firmware panic occurs. Although 2599 * we can't debug the firmware because it is neither open source nor free, it 2600 * can help us to identify certain classes of problems. 2601 */ 2602 void 2603 iwn_fatal_intr(struct iwn_softc *sc) 2604 { 2605 struct iwn_fw_dump dump; 2606 int i; 2607 2608 /* Force a complete recalibration on next init. */ 2609 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE; 2610 2611 /* Check that the error log address is valid. */ 2612 if (sc->errptr < IWN_FW_DATA_BASE || 2613 sc->errptr + sizeof (dump) > 2614 IWN_FW_DATA_BASE + sc->fw_data_maxsz) { 2615 printf("%s: bad firmware error log address 0x%08x\n", 2616 sc->sc_dev.dv_xname, sc->errptr); 2617 return; 2618 } 2619 if (iwn_nic_lock(sc) != 0) { 2620 printf("%s: could not read firmware error log\n", 2621 sc->sc_dev.dv_xname); 2622 return; 2623 } 2624 /* Read firmware error log from SRAM. */ 2625 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump, 2626 sizeof (dump) / sizeof (uint32_t)); 2627 iwn_nic_unlock(sc); 2628 2629 if (dump.valid == 0) { 2630 printf("%s: firmware error log is empty\n", 2631 sc->sc_dev.dv_xname); 2632 return; 2633 } 2634 printf("firmware error log:\n"); 2635 printf(" error type = \"%s\" (0x%08X)\n", 2636 (dump.id < nitems(iwn_fw_errmsg)) ? 2637 iwn_fw_errmsg[dump.id] : "UNKNOWN", 2638 dump.id); 2639 printf(" program counter = 0x%08X\n", dump.pc); 2640 printf(" source line = 0x%08X\n", dump.src_line); 2641 printf(" error data = 0x%08X%08X\n", 2642 dump.error_data[0], dump.error_data[1]); 2643 printf(" branch link = 0x%08X%08X\n", 2644 dump.branch_link[0], dump.branch_link[1]); 2645 printf(" interrupt link = 0x%08X%08X\n", 2646 dump.interrupt_link[0], dump.interrupt_link[1]); 2647 printf(" time = %u\n", dump.time[0]); 2648 2649 /* Dump driver status (TX and RX rings) while we're here. */ 2650 printf("driver status:\n"); 2651 for (i = 0; i < sc->ntxqs; i++) { 2652 struct iwn_tx_ring *ring = &sc->txq[i]; 2653 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 2654 i, ring->qid, ring->cur, ring->queued); 2655 } 2656 printf(" rx ring: cur=%d\n", sc->rxq.cur); 2657 printf(" 802.11 state %d\n", sc->sc_ic.ic_state); 2658 } 2659 2660 int 2661 iwn_intr(void *arg) 2662 { 2663 struct iwn_softc *sc = arg; 2664 struct ifnet *ifp = &sc->sc_ic.ic_if; 2665 uint32_t r1, r2, tmp; 2666 2667 /* Disable interrupts. */ 2668 IWN_WRITE(sc, IWN_INT_MASK, 0); 2669 2670 /* Read interrupts from ICT (fast) or from registers (slow). */ 2671 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 2672 tmp = 0; 2673 while (sc->ict[sc->ict_cur] != 0) { 2674 tmp |= sc->ict[sc->ict_cur]; 2675 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */ 2676 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT; 2677 } 2678 tmp = letoh32(tmp); 2679 if (tmp == 0xffffffff) /* Shouldn't happen. */ 2680 tmp = 0; 2681 else if (tmp & 0xc0000) /* Workaround a HW bug. */ 2682 tmp |= 0x8000; 2683 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff); 2684 r2 = 0; /* Unused. */ 2685 } else { 2686 r1 = IWN_READ(sc, IWN_INT); 2687 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 2688 return 0; /* Hardware gone! */ 2689 r2 = IWN_READ(sc, IWN_FH_INT); 2690 } 2691 if (r1 == 0 && r2 == 0) { 2692 if (ifp->if_flags & IFF_UP) 2693 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 2694 return 0; /* Interrupt not for us. */ 2695 } 2696 2697 /* Acknowledge interrupts. */ 2698 IWN_WRITE(sc, IWN_INT, r1); 2699 if (!(sc->sc_flags & IWN_FLAG_USE_ICT)) 2700 IWN_WRITE(sc, IWN_FH_INT, r2); 2701 2702 if (r1 & IWN_INT_RF_TOGGLED) { 2703 tmp = IWN_READ(sc, IWN_GP_CNTRL); 2704 printf("%s: RF switch: radio %s\n", sc->sc_dev.dv_xname, 2705 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled"); 2706 } 2707 if (r1 & IWN_INT_CT_REACHED) { 2708 printf("%s: critical temperature reached!\n", 2709 sc->sc_dev.dv_xname); 2710 } 2711 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) { 2712 printf("%s: fatal firmware error\n", sc->sc_dev.dv_xname); 2713 /* Dump firmware error log and stop. */ 2714 iwn_fatal_intr(sc); 2715 iwn_stop(ifp, 1); 2716 task_add(systq, &sc->init_task); 2717 return 1; 2718 } 2719 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) || 2720 (r2 & IWN_FH_INT_RX)) { 2721 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 2722 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) 2723 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX); 2724 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 2725 IWN_INT_PERIODIC_DIS); 2726 iwn_notif_intr(sc); 2727 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) { 2728 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 2729 IWN_INT_PERIODIC_ENA); 2730 } 2731 } else 2732 iwn_notif_intr(sc); 2733 } 2734 2735 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) { 2736 if (sc->sc_flags & IWN_FLAG_USE_ICT) 2737 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX); 2738 wakeup(sc); /* FH DMA transfer completed. */ 2739 } 2740 2741 if (r1 & IWN_INT_ALIVE) 2742 wakeup(sc); /* Firmware is alive. */ 2743 2744 if (r1 & IWN_INT_WAKEUP) 2745 iwn_wakeup_intr(sc); 2746 2747 /* Re-enable interrupts. */ 2748 if (ifp->if_flags & IFF_UP) 2749 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 2750 2751 return 1; 2752 } 2753 2754 /* 2755 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and 2756 * 5000 adapters use a slightly different format). 2757 */ 2758 void 2759 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 2760 uint16_t len) 2761 { 2762 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx]; 2763 2764 *w = htole16(len + 8); 2765 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2766 (caddr_t)w - sc->sched_dma.vaddr, sizeof (uint16_t), 2767 BUS_DMASYNC_PREWRITE); 2768 if (idx < IWN_SCHED_WINSZ) { 2769 *(w + IWN_TX_RING_COUNT) = *w; 2770 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2771 (caddr_t)(w + IWN_TX_RING_COUNT) - sc->sched_dma.vaddr, 2772 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 2773 } 2774 } 2775 2776 void 2777 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 2778 uint16_t len) 2779 { 2780 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 2781 2782 *w = htole16(id << 12 | (len + 8)); 2783 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2784 (caddr_t)w - sc->sched_dma.vaddr, sizeof (uint16_t), 2785 BUS_DMASYNC_PREWRITE); 2786 if (idx < IWN_SCHED_WINSZ) { 2787 *(w + IWN_TX_RING_COUNT) = *w; 2788 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2789 (caddr_t)(w + IWN_TX_RING_COUNT) - sc->sched_dma.vaddr, 2790 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 2791 } 2792 } 2793 2794 void 2795 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx) 2796 { 2797 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 2798 2799 *w = (*w & htole16(0xf000)) | htole16(1); 2800 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2801 (caddr_t)w - sc->sched_dma.vaddr, sizeof (uint16_t), 2802 BUS_DMASYNC_PREWRITE); 2803 if (idx < IWN_SCHED_WINSZ) { 2804 *(w + IWN_TX_RING_COUNT) = *w; 2805 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2806 (caddr_t)(w + IWN_TX_RING_COUNT) - sc->sched_dma.vaddr, 2807 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 2808 } 2809 } 2810 2811 int 2812 iwn_tx(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 2813 { 2814 struct ieee80211com *ic = &sc->sc_ic; 2815 struct iwn_node *wn = (void *)ni; 2816 struct iwn_tx_ring *ring; 2817 struct iwn_tx_desc *desc; 2818 struct iwn_tx_data *data; 2819 struct iwn_tx_cmd *cmd; 2820 struct iwn_cmd_data *tx; 2821 const struct iwn_rate *rinfo; 2822 struct ieee80211_frame *wh; 2823 struct ieee80211_key *k = NULL; 2824 enum ieee80211_edca_ac ac; 2825 uint32_t flags; 2826 uint16_t qos; 2827 u_int hdrlen; 2828 bus_dma_segment_t *seg; 2829 uint8_t *ivp, tid, ridx, txant, type; 2830 int i, totlen, hasqos, error, pad; 2831 2832 wh = mtod(m, struct ieee80211_frame *); 2833 hdrlen = ieee80211_get_hdrlen(wh); 2834 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2835 2836 /* Select EDCA Access Category and TX ring for this frame. */ 2837 if ((hasqos = ieee80211_has_qos(wh))) { 2838 qos = ieee80211_get_qos(wh); 2839 tid = qos & IEEE80211_QOS_TID; 2840 ac = ieee80211_up_to_ac(ic, tid); 2841 } else { 2842 qos = 0; 2843 tid = 0; 2844 ac = EDCA_AC_BE; 2845 } 2846 2847 ring = &sc->txq[ac]; 2848 desc = &ring->desc[ring->cur]; 2849 data = &ring->data[ring->cur]; 2850 2851 /* Choose a TX rate index. */ 2852 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 2853 type != IEEE80211_FC0_TYPE_DATA) 2854 ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ? 2855 IWN_RIDX_OFDM6 : IWN_RIDX_CCK1; 2856 else if (ic->ic_fixed_mcs != -1) 2857 ridx = sc->fixed_ridx; 2858 else if (ic->ic_fixed_rate != -1) 2859 ridx = sc->fixed_ridx; 2860 else { 2861 if (ni->ni_flags & IEEE80211_NODE_HT) 2862 ridx = iwn_mcs2ridx[ni->ni_txmcs]; 2863 else 2864 ridx = wn->ridx[ni->ni_txrate]; 2865 } 2866 rinfo = &iwn_rates[ridx]; 2867 #if NBPFILTER > 0 2868 if (sc->sc_drvbpf != NULL) { 2869 struct mbuf mb; 2870 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 2871 2872 tap->wt_flags = 0; 2873 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq); 2874 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags); 2875 if ((ni->ni_flags & IEEE80211_NODE_HT) && 2876 !IEEE80211_IS_MULTICAST(wh->i_addr1) && 2877 type == IEEE80211_FC0_TYPE_DATA) { 2878 /* XXX need a way to pass current MCS in 11n mode */ 2879 tap->wt_rate = 0; 2880 } else 2881 tap->wt_rate = rinfo->rate; 2882 tap->wt_hwqueue = ac; 2883 if ((ic->ic_flags & IEEE80211_F_WEPON) && 2884 (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)) 2885 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2886 2887 mb.m_data = (caddr_t)tap; 2888 mb.m_len = sc->sc_txtap_len; 2889 mb.m_next = m; 2890 mb.m_nextpkt = NULL; 2891 mb.m_type = 0; 2892 mb.m_flags = 0; 2893 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT); 2894 } 2895 #endif 2896 2897 totlen = m->m_pkthdr.len; 2898 2899 /* Encrypt the frame if need be. */ 2900 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 2901 /* Retrieve key for TX. */ 2902 k = ieee80211_get_txkey(ic, wh, ni); 2903 if (k->k_cipher != IEEE80211_CIPHER_CCMP) { 2904 /* Do software encryption. */ 2905 if ((m = ieee80211_encrypt(ic, m, k)) == NULL) 2906 return ENOBUFS; 2907 /* 802.11 header may have moved. */ 2908 wh = mtod(m, struct ieee80211_frame *); 2909 totlen = m->m_pkthdr.len; 2910 2911 } else /* HW appends CCMP MIC. */ 2912 totlen += IEEE80211_CCMP_HDRLEN; 2913 } 2914 2915 /* Prepare TX firmware command. */ 2916 cmd = &ring->cmd[ring->cur]; 2917 cmd->code = IWN_CMD_TX_DATA; 2918 cmd->flags = 0; 2919 cmd->qid = ring->qid; 2920 cmd->idx = ring->cur; 2921 2922 tx = (struct iwn_cmd_data *)cmd->data; 2923 /* NB: No need to clear tx, all fields are reinitialized here. */ 2924 tx->scratch = 0; /* clear "scratch" area */ 2925 2926 flags = 0; 2927 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2928 /* Unicast frame, check if an ACK is expected. */ 2929 if (!hasqos || (qos & IEEE80211_QOS_ACK_POLICY_MASK) != 2930 IEEE80211_QOS_ACK_POLICY_NOACK) 2931 flags |= IWN_TX_NEED_ACK; 2932 } 2933 if ((wh->i_fc[0] & 2934 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == 2935 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR)) 2936 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */ 2937 2938 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2939 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */ 2940 2941 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 2942 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2943 /* NB: Group frames are sent using CCK in 802.11b/g/n (2GHz). */ 2944 if (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold) { 2945 flags |= IWN_TX_NEED_RTS; 2946 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 2947 ridx >= IWN_RIDX_OFDM6) { 2948 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 2949 flags |= IWN_TX_NEED_CTS; 2950 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 2951 flags |= IWN_TX_NEED_RTS; 2952 } 2953 else if (ni->ni_flags & IEEE80211_NODE_HT) 2954 flags |= IWN_TX_NEED_RTS; 2955 2956 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) { 2957 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 2958 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 2959 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS); 2960 flags |= IWN_TX_NEED_PROTECTION; 2961 } else 2962 flags |= IWN_TX_FULL_TXOP; 2963 } 2964 } 2965 2966 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 2967 type != IEEE80211_FC0_TYPE_DATA) 2968 tx->id = sc->broadcast_id; 2969 else 2970 tx->id = wn->id; 2971 2972 if (type == IEEE80211_FC0_TYPE_MGT) { 2973 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2974 2975 #ifndef IEEE80211_STA_ONLY 2976 /* Tell HW to set timestamp in probe responses. */ 2977 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2978 flags |= IWN_TX_INSERT_TSTAMP; 2979 #endif 2980 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2981 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2982 tx->timeout = htole16(3); 2983 else 2984 tx->timeout = htole16(2); 2985 } else 2986 tx->timeout = htole16(0); 2987 2988 if (hdrlen & 3) { 2989 /* First segment length must be a multiple of 4. */ 2990 flags |= IWN_TX_NEED_PADDING; 2991 pad = 4 - (hdrlen & 3); 2992 } else 2993 pad = 0; 2994 2995 tx->len = htole16(totlen); 2996 tx->tid = tid; 2997 tx->rts_ntries = 60; 2998 tx->data_ntries = 15; 2999 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 3000 3001 if ((ni->ni_flags & IEEE80211_NODE_HT) && 3002 tx->id != sc->broadcast_id) 3003 tx->plcp = rinfo->ht_plcp; 3004 else 3005 tx->plcp = rinfo->plcp; 3006 3007 if ((ni->ni_flags & IEEE80211_NODE_HT) && 3008 tx->id != sc->broadcast_id) 3009 tx->rflags = rinfo->ht_flags; 3010 else 3011 tx->rflags = rinfo->flags; 3012 if (tx->id == sc->broadcast_id) { 3013 /* Group or management frame. */ 3014 tx->linkq = 0; 3015 /* XXX Alternate between antenna A and B? */ 3016 txant = IWN_LSB(sc->txchainmask); 3017 tx->rflags |= IWN_RFLAG_ANT(txant); 3018 } else { 3019 if (ni->ni_flags & IEEE80211_NODE_HT) 3020 tx->linkq = 7 - ni->ni_txmcs; /* XXX revisit for MIMO */ 3021 else 3022 tx->linkq = ni->ni_rates.rs_nrates - ni->ni_txrate - 1; 3023 flags |= IWN_TX_LINKQ; /* enable MRR */ 3024 } 3025 /* Set physical address of "scratch area". */ 3026 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 3027 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 3028 3029 /* Copy 802.11 header in TX command. */ 3030 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 3031 3032 if (k != NULL && k->k_cipher == IEEE80211_CIPHER_CCMP) { 3033 /* Trim 802.11 header and prepend CCMP IV. */ 3034 m_adj(m, hdrlen - IEEE80211_CCMP_HDRLEN); 3035 ivp = mtod(m, uint8_t *); 3036 k->k_tsc++; 3037 ivp[0] = k->k_tsc; 3038 ivp[1] = k->k_tsc >> 8; 3039 ivp[2] = 0; 3040 ivp[3] = k->k_id << 6 | IEEE80211_WEP_EXTIV; 3041 ivp[4] = k->k_tsc >> 16; 3042 ivp[5] = k->k_tsc >> 24; 3043 ivp[6] = k->k_tsc >> 32; 3044 ivp[7] = k->k_tsc >> 40; 3045 3046 tx->security = IWN_CIPHER_CCMP; 3047 /* XXX flags |= IWN_TX_AMPDU_CCMP; */ 3048 memcpy(tx->key, k->k_key, k->k_len); 3049 3050 /* TX scheduler includes CCMP MIC len w/5000 Series. */ 3051 if (sc->hw_type != IWN_HW_REV_TYPE_4965) 3052 totlen += IEEE80211_CCMP_MICLEN; 3053 } else { 3054 /* Trim 802.11 header. */ 3055 m_adj(m, hdrlen); 3056 tx->security = 0; 3057 } 3058 tx->flags = htole32(flags); 3059 3060 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m, 3061 BUS_DMA_NOWAIT | BUS_DMA_WRITE); 3062 if (error != 0 && error != EFBIG) { 3063 printf("%s: can't map mbuf (error %d)\n", 3064 sc->sc_dev.dv_xname, error); 3065 m_freem(m); 3066 return error; 3067 } 3068 if (error != 0) { 3069 /* Too many DMA segments, linearize mbuf. */ 3070 if (m_defrag(m, M_DONTWAIT)) { 3071 m_freem(m); 3072 return ENOBUFS; 3073 } 3074 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m, 3075 BUS_DMA_NOWAIT | BUS_DMA_WRITE); 3076 if (error != 0) { 3077 printf("%s: can't map mbuf (error %d)\n", 3078 sc->sc_dev.dv_xname, error); 3079 m_freem(m); 3080 return error; 3081 } 3082 } 3083 3084 data->m = m; 3085 data->ni = ni; 3086 3087 DPRINTFN(4, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n", 3088 ring->qid, ring->cur, m->m_pkthdr.len, data->map->dm_nsegs)); 3089 3090 /* Fill TX descriptor. */ 3091 desc->nsegs = 1 + data->map->dm_nsegs; 3092 /* First DMA segment is used by the TX command. */ 3093 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 3094 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 3095 (4 + sizeof (*tx) + hdrlen + pad) << 4); 3096 /* Other DMA segments are for data payload. */ 3097 seg = data->map->dm_segs; 3098 for (i = 1; i <= data->map->dm_nsegs; i++) { 3099 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); 3100 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | 3101 seg->ds_len << 4); 3102 seg++; 3103 } 3104 3105 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize, 3106 BUS_DMASYNC_PREWRITE); 3107 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map, 3108 (caddr_t)cmd - ring->cmd_dma.vaddr, sizeof (*cmd), 3109 BUS_DMASYNC_PREWRITE); 3110 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 3111 (caddr_t)desc - ring->desc_dma.vaddr, sizeof (*desc), 3112 BUS_DMASYNC_PREWRITE); 3113 3114 #ifdef notyet 3115 /* Update TX scheduler. */ 3116 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 3117 #endif 3118 3119 /* Kick TX ring. */ 3120 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3121 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3122 3123 /* Mark TX ring as full if we reach a certain threshold. */ 3124 if (++ring->queued > IWN_TX_RING_HIMARK) 3125 sc->qfullmsk |= 1 << ring->qid; 3126 3127 return 0; 3128 } 3129 3130 void 3131 iwn_start(struct ifnet *ifp) 3132 { 3133 struct iwn_softc *sc = ifp->if_softc; 3134 struct ieee80211com *ic = &sc->sc_ic; 3135 struct ieee80211_node *ni; 3136 struct mbuf *m; 3137 3138 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd)) 3139 return; 3140 3141 for (;;) { 3142 if (sc->qfullmsk != 0) { 3143 ifq_set_oactive(&ifp->if_snd); 3144 break; 3145 } 3146 /* Send pending management frames first. */ 3147 m = mq_dequeue(&ic->ic_mgtq); 3148 if (m != NULL) { 3149 ni = m->m_pkthdr.ph_cookie; 3150 goto sendit; 3151 } 3152 if (ic->ic_state != IEEE80211_S_RUN) 3153 break; 3154 3155 /* Encapsulate and send data frames. */ 3156 IFQ_DEQUEUE(&ifp->if_snd, m); 3157 if (m == NULL) 3158 break; 3159 #if NBPFILTER > 0 3160 if (ifp->if_bpf != NULL) 3161 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 3162 #endif 3163 if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) 3164 continue; 3165 sendit: 3166 #if NBPFILTER > 0 3167 if (ic->ic_rawbpf != NULL) 3168 bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT); 3169 #endif 3170 if (iwn_tx(sc, m, ni) != 0) { 3171 ieee80211_release_node(ic, ni); 3172 ifp->if_oerrors++; 3173 continue; 3174 } 3175 3176 sc->sc_tx_timer = 5; 3177 ifp->if_timer = 1; 3178 } 3179 } 3180 3181 void 3182 iwn_watchdog(struct ifnet *ifp) 3183 { 3184 struct iwn_softc *sc = ifp->if_softc; 3185 3186 ifp->if_timer = 0; 3187 3188 if (sc->sc_tx_timer > 0) { 3189 if (--sc->sc_tx_timer == 0) { 3190 printf("%s: device timeout\n", sc->sc_dev.dv_xname); 3191 ifp->if_flags &= ~IFF_UP; 3192 iwn_stop(ifp, 1); 3193 ifp->if_oerrors++; 3194 return; 3195 } 3196 ifp->if_timer = 1; 3197 } 3198 3199 ieee80211_watchdog(ifp); 3200 } 3201 3202 int 3203 iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 3204 { 3205 struct iwn_softc *sc = ifp->if_softc; 3206 struct ieee80211com *ic = &sc->sc_ic; 3207 struct ifreq *ifr; 3208 int s, error = 0; 3209 3210 s = splnet(); 3211 /* 3212 * Prevent processes from entering this function while another 3213 * process is tsleep'ing in it. 3214 */ 3215 while ((sc->sc_flags & IWN_FLAG_BUSY) && error == 0) 3216 error = tsleep(&sc->sc_flags, PCATCH, "iwnioc", 0); 3217 if (error != 0) { 3218 splx(s); 3219 return error; 3220 } 3221 sc->sc_flags |= IWN_FLAG_BUSY; 3222 3223 switch (cmd) { 3224 case SIOCSIFADDR: 3225 ifp->if_flags |= IFF_UP; 3226 /* FALLTHROUGH */ 3227 case SIOCSIFFLAGS: 3228 if (ifp->if_flags & IFF_UP) { 3229 if (!(ifp->if_flags & IFF_RUNNING)) 3230 error = iwn_init(ifp); 3231 } else { 3232 if (ifp->if_flags & IFF_RUNNING) 3233 iwn_stop(ifp, 1); 3234 } 3235 break; 3236 3237 case SIOCADDMULTI: 3238 case SIOCDELMULTI: 3239 ifr = (struct ifreq *)data; 3240 error = (cmd == SIOCADDMULTI) ? 3241 ether_addmulti(ifr, &ic->ic_ac) : 3242 ether_delmulti(ifr, &ic->ic_ac); 3243 3244 if (error == ENETRESET) 3245 error = 0; 3246 break; 3247 3248 case SIOCS80211POWER: 3249 error = ieee80211_ioctl(ifp, cmd, data); 3250 if (error != ENETRESET) 3251 break; 3252 if (ic->ic_state == IEEE80211_S_RUN && 3253 sc->calib.state == IWN_CALIB_STATE_RUN) { 3254 if (ic->ic_flags & IEEE80211_F_PMGTON) 3255 error = iwn_set_pslevel(sc, 0, 3, 0); 3256 else /* back to CAM */ 3257 error = iwn_set_pslevel(sc, 0, 0, 0); 3258 } else { 3259 /* Defer until transition to IWN_CALIB_STATE_RUN. */ 3260 error = 0; 3261 } 3262 break; 3263 3264 default: 3265 error = ieee80211_ioctl(ifp, cmd, data); 3266 } 3267 3268 if (error == ENETRESET) { 3269 error = 0; 3270 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 3271 (IFF_UP | IFF_RUNNING)) { 3272 iwn_stop(ifp, 0); 3273 error = iwn_init(ifp); 3274 } 3275 } 3276 3277 sc->sc_flags &= ~IWN_FLAG_BUSY; 3278 wakeup(&sc->sc_flags); 3279 splx(s); 3280 return error; 3281 } 3282 3283 /* 3284 * Send a command to the firmware. 3285 */ 3286 int 3287 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async) 3288 { 3289 struct iwn_tx_ring *ring = &sc->txq[4]; 3290 struct iwn_tx_desc *desc; 3291 struct iwn_tx_data *data; 3292 struct iwn_tx_cmd *cmd; 3293 struct mbuf *m; 3294 bus_addr_t paddr; 3295 int totlen, error; 3296 3297 desc = &ring->desc[ring->cur]; 3298 data = &ring->data[ring->cur]; 3299 totlen = 4 + size; 3300 3301 if (size > sizeof cmd->data) { 3302 /* Command is too large to fit in a descriptor. */ 3303 if (totlen > MCLBYTES) 3304 return EINVAL; 3305 MGETHDR(m, M_DONTWAIT, MT_DATA); 3306 if (m == NULL) 3307 return ENOMEM; 3308 if (totlen > MHLEN) { 3309 MCLGET(m, M_DONTWAIT); 3310 if (!(m->m_flags & M_EXT)) { 3311 m_freem(m); 3312 return ENOMEM; 3313 } 3314 } 3315 cmd = mtod(m, struct iwn_tx_cmd *); 3316 error = bus_dmamap_load(sc->sc_dmat, data->map, cmd, totlen, 3317 NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE); 3318 if (error != 0) { 3319 m_freem(m); 3320 return error; 3321 } 3322 data->m = m; 3323 paddr = data->map->dm_segs[0].ds_addr; 3324 } else { 3325 cmd = &ring->cmd[ring->cur]; 3326 paddr = data->cmd_paddr; 3327 } 3328 3329 cmd->code = code; 3330 cmd->flags = 0; 3331 cmd->qid = ring->qid; 3332 cmd->idx = ring->cur; 3333 memcpy(cmd->data, buf, size); 3334 3335 desc->nsegs = 1; 3336 desc->segs[0].addr = htole32(IWN_LOADDR(paddr)); 3337 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4); 3338 3339 if (size > sizeof cmd->data) { 3340 bus_dmamap_sync(sc->sc_dmat, data->map, 0, totlen, 3341 BUS_DMASYNC_PREWRITE); 3342 } else { 3343 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map, 3344 (caddr_t)cmd - ring->cmd_dma.vaddr, totlen, 3345 BUS_DMASYNC_PREWRITE); 3346 } 3347 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 3348 (caddr_t)desc - ring->desc_dma.vaddr, sizeof (*desc), 3349 BUS_DMASYNC_PREWRITE); 3350 3351 #ifdef notyet 3352 /* Update TX scheduler. */ 3353 ops->update_sched(sc, ring->qid, ring->cur, 0, 0); 3354 #endif 3355 3356 /* Kick command ring. */ 3357 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3358 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3359 3360 return async ? 0 : tsleep(desc, PCATCH, "iwncmd", hz); 3361 } 3362 3363 int 3364 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 3365 { 3366 struct iwn4965_node_info hnode; 3367 caddr_t src, dst; 3368 3369 /* 3370 * We use the node structure for 5000 Series internally (it is 3371 * a superset of the one for 4965AGN). We thus copy the common 3372 * fields before sending the command. 3373 */ 3374 src = (caddr_t)node; 3375 dst = (caddr_t)&hnode; 3376 memcpy(dst, src, 48); 3377 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */ 3378 memcpy(dst + 48, src + 72, 20); 3379 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async); 3380 } 3381 3382 int 3383 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 3384 { 3385 /* Direct mapping. */ 3386 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async); 3387 } 3388 3389 int 3390 iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni) 3391 { 3392 struct iwn_node *wn = (void *)ni; 3393 struct ieee80211_rateset *rs = &ni->ni_rates; 3394 struct iwn_cmd_link_quality linkq; 3395 const struct iwn_rate *rinfo; 3396 uint8_t txant; 3397 int i, txrate; 3398 3399 /* Use the first valid TX antenna. */ 3400 txant = IWN_LSB(sc->txchainmask); 3401 3402 memset(&linkq, 0, sizeof linkq); 3403 linkq.id = wn->id; 3404 linkq.antmsk_1stream = txant; 3405 linkq.antmsk_2stream = IWN_ANT_AB; 3406 linkq.ampdu_max = IWN_AMPDU_MAX; 3407 linkq.ampdu_threshold = 3; 3408 linkq.ampdu_limit = htole16(4000); /* 4ms */ 3409 3410 if (ni->ni_flags & IEEE80211_NODE_HT) { 3411 /* Fill LQ table with MCS 7 - 0 (XXX revisit for MIMO) */ 3412 i = 0; 3413 for (txrate = 7; txrate >= 0; txrate--) { 3414 rinfo = &iwn_rates[iwn_mcs2ridx[txrate]]; 3415 linkq.retry[i].plcp = rinfo->ht_plcp; 3416 linkq.retry[i].rflags = rinfo->ht_flags; 3417 3418 /* XXX set correct ant mask for MIMO rates here */ 3419 linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant); 3420 3421 if (++i >= IWN_MAX_TX_RETRIES) 3422 break; 3423 } 3424 3425 /* Fill the rest with the lowest legacy rate. */ 3426 if (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) 3427 rinfo = &iwn_rates[IWN_RIDX_OFDM6]; 3428 else 3429 rinfo = &iwn_rates[IWN_RIDX_CCK1]; 3430 while (i < IWN_MAX_TX_RETRIES) { 3431 linkq.retry[i].plcp = rinfo->plcp; 3432 linkq.retry[i].rflags = rinfo->flags; 3433 linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant); 3434 i++; 3435 } 3436 } else { 3437 /* Start at highest available bit-rate. */ 3438 txrate = rs->rs_nrates - 1; 3439 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) { 3440 rinfo = &iwn_rates[wn->ridx[txrate]]; 3441 linkq.retry[i].plcp = rinfo->plcp; 3442 linkq.retry[i].rflags = rinfo->flags; 3443 linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant); 3444 /* Next retry at immediate lower bit-rate. */ 3445 if (txrate > 0) 3446 txrate--; 3447 } 3448 } 3449 3450 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1); 3451 } 3452 3453 /* 3454 * Broadcast node is used to send group-addressed and management frames. 3455 */ 3456 int 3457 iwn_add_broadcast_node(struct iwn_softc *sc, int async, int ridx) 3458 { 3459 struct iwn_ops *ops = &sc->ops; 3460 struct iwn_node_info node; 3461 struct iwn_cmd_link_quality linkq; 3462 const struct iwn_rate *rinfo; 3463 uint8_t txant; 3464 int i, error; 3465 3466 memset(&node, 0, sizeof node); 3467 IEEE80211_ADDR_COPY(node.macaddr, etherbroadcastaddr); 3468 node.id = sc->broadcast_id; 3469 DPRINTF(("adding broadcast node\n")); 3470 if ((error = ops->add_node(sc, &node, async)) != 0) 3471 return error; 3472 3473 /* Use the first valid TX antenna. */ 3474 txant = IWN_LSB(sc->txchainmask); 3475 3476 memset(&linkq, 0, sizeof linkq); 3477 linkq.id = sc->broadcast_id; 3478 linkq.antmsk_1stream = txant; 3479 linkq.antmsk_2stream = IWN_ANT_AB; 3480 linkq.ampdu_max = IWN_AMPDU_MAX_NO_AGG; 3481 linkq.ampdu_threshold = 3; 3482 linkq.ampdu_limit = htole16(4000); /* 4ms */ 3483 3484 /* Use lowest mandatory bit-rate. */ 3485 rinfo = &iwn_rates[ridx]; 3486 linkq.retry[0].plcp = rinfo->plcp; 3487 linkq.retry[0].rflags = rinfo->flags; 3488 linkq.retry[0].rflags |= IWN_RFLAG_ANT(txant); 3489 /* Use same bit-rate for all TX retries. */ 3490 for (i = 1; i < IWN_MAX_TX_RETRIES; i++) { 3491 linkq.retry[i].plcp = linkq.retry[0].plcp; 3492 linkq.retry[i].rflags = linkq.retry[0].rflags; 3493 } 3494 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async); 3495 } 3496 3497 void 3498 iwn_updateedca(struct ieee80211com *ic) 3499 { 3500 #define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 3501 struct iwn_softc *sc = ic->ic_softc; 3502 struct iwn_edca_params cmd; 3503 int aci; 3504 3505 memset(&cmd, 0, sizeof cmd); 3506 cmd.flags = htole32(IWN_EDCA_UPDATE); 3507 for (aci = 0; aci < EDCA_NUM_AC; aci++) { 3508 const struct ieee80211_edca_ac_params *ac = 3509 &ic->ic_edca_ac[aci]; 3510 cmd.ac[aci].aifsn = ac->ac_aifsn; 3511 cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->ac_ecwmin)); 3512 cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->ac_ecwmax)); 3513 cmd.ac[aci].txoplimit = 3514 htole16(IEEE80211_TXOP_TO_US(ac->ac_txoplimit)); 3515 } 3516 (void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 3517 #undef IWN_EXP2 3518 } 3519 3520 void 3521 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on) 3522 { 3523 struct iwn_cmd_led led; 3524 3525 /* Clear microcode LED ownership. */ 3526 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL); 3527 3528 led.which = which; 3529 led.unit = htole32(10000); /* on/off in unit of 100ms */ 3530 led.off = off; 3531 led.on = on; 3532 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1); 3533 } 3534 3535 /* 3536 * Set the critical temperature at which the firmware will stop the radio 3537 * and notify us. 3538 */ 3539 int 3540 iwn_set_critical_temp(struct iwn_softc *sc) 3541 { 3542 struct iwn_critical_temp crit; 3543 int32_t temp; 3544 3545 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF); 3546 3547 if (sc->hw_type == IWN_HW_REV_TYPE_5150) 3548 temp = (IWN_CTOK(110) - sc->temp_off) * -5; 3549 else if (sc->hw_type == IWN_HW_REV_TYPE_4965) 3550 temp = IWN_CTOK(110); 3551 else 3552 temp = 110; 3553 memset(&crit, 0, sizeof crit); 3554 crit.tempR = htole32(temp); 3555 DPRINTF(("setting critical temperature to %d\n", temp)); 3556 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0); 3557 } 3558 3559 int 3560 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni) 3561 { 3562 struct iwn_cmd_timing cmd; 3563 uint64_t val, mod; 3564 3565 memset(&cmd, 0, sizeof cmd); 3566 memcpy(&cmd.tstamp, ni->ni_tstamp, sizeof (uint64_t)); 3567 cmd.bintval = htole16(ni->ni_intval); 3568 cmd.lintval = htole16(10); 3569 3570 /* Compute remaining time until next beacon. */ 3571 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 3572 mod = letoh64(cmd.tstamp) % val; 3573 cmd.binitval = htole32((uint32_t)(val - mod)); 3574 3575 DPRINTF(("timing bintval=%u, tstamp=%llu, init=%u\n", 3576 ni->ni_intval, letoh64(cmd.tstamp), (uint32_t)(val - mod))); 3577 3578 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1); 3579 } 3580 3581 void 3582 iwn4965_power_calibration(struct iwn_softc *sc, int temp) 3583 { 3584 /* Adjust TX power if need be (delta >= 3 degC). */ 3585 DPRINTF(("temperature %d->%d\n", sc->temp, temp)); 3586 if (abs(temp - sc->temp) >= 3) { 3587 /* Record temperature of last calibration. */ 3588 sc->temp = temp; 3589 (void)iwn4965_set_txpower(sc, 1); 3590 } 3591 } 3592 3593 /* 3594 * Set TX power for current channel (each rate has its own power settings). 3595 * This function takes into account the regulatory information from EEPROM, 3596 * the current temperature and the current voltage. 3597 */ 3598 int 3599 iwn4965_set_txpower(struct iwn_softc *sc, int async) 3600 { 3601 /* Fixed-point arithmetic division using a n-bit fractional part. */ 3602 #define fdivround(a, b, n) \ 3603 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 3604 /* Linear interpolation. */ 3605 #define interpolate(x, x1, y1, x2, y2, n) \ 3606 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 3607 3608 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 }; 3609 struct ieee80211com *ic = &sc->sc_ic; 3610 struct iwn_ucode_info *uc = &sc->ucode_info; 3611 struct ieee80211_channel *ch; 3612 struct iwn4965_cmd_txpower cmd; 3613 struct iwn4965_eeprom_chan_samples *chans; 3614 const uint8_t *rf_gain, *dsp_gain; 3615 int32_t vdiff, tdiff; 3616 int i, c, grp, maxpwr; 3617 uint8_t chan; 3618 3619 /* Retrieve current channel from last RXON. */ 3620 chan = sc->rxon.chan; 3621 DPRINTF(("setting TX power for channel %d\n", chan)); 3622 ch = &ic->ic_channels[chan]; 3623 3624 memset(&cmd, 0, sizeof cmd); 3625 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1; 3626 cmd.chan = chan; 3627 3628 if (IEEE80211_IS_CHAN_5GHZ(ch)) { 3629 maxpwr = sc->maxpwr5GHz; 3630 rf_gain = iwn4965_rf_gain_5ghz; 3631 dsp_gain = iwn4965_dsp_gain_5ghz; 3632 } else { 3633 maxpwr = sc->maxpwr2GHz; 3634 rf_gain = iwn4965_rf_gain_2ghz; 3635 dsp_gain = iwn4965_dsp_gain_2ghz; 3636 } 3637 3638 /* Compute voltage compensation. */ 3639 vdiff = ((int32_t)letoh32(uc->volt) - sc->eeprom_voltage) / 7; 3640 if (vdiff > 0) 3641 vdiff *= 2; 3642 if (abs(vdiff) > 2) 3643 vdiff = 0; 3644 DPRINTF(("voltage compensation=%d (UCODE=%d, EEPROM=%d)\n", 3645 vdiff, letoh32(uc->volt), sc->eeprom_voltage)); 3646 3647 /* Get channel attenuation group. */ 3648 if (chan <= 20) /* 1-20 */ 3649 grp = 4; 3650 else if (chan <= 43) /* 34-43 */ 3651 grp = 0; 3652 else if (chan <= 70) /* 44-70 */ 3653 grp = 1; 3654 else if (chan <= 124) /* 71-124 */ 3655 grp = 2; 3656 else /* 125-200 */ 3657 grp = 3; 3658 DPRINTF(("chan %d, attenuation group=%d\n", chan, grp)); 3659 3660 /* Get channel sub-band. */ 3661 for (i = 0; i < IWN_NBANDS; i++) 3662 if (sc->bands[i].lo != 0 && 3663 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi) 3664 break; 3665 if (i == IWN_NBANDS) /* Can't happen in real-life. */ 3666 return EINVAL; 3667 chans = sc->bands[i].chans; 3668 DPRINTF(("chan %d sub-band=%d\n", chan, i)); 3669 3670 for (c = 0; c < 2; c++) { 3671 uint8_t power, gain, temp; 3672 int maxchpwr, pwr, ridx, idx; 3673 3674 power = interpolate(chan, 3675 chans[0].num, chans[0].samples[c][1].power, 3676 chans[1].num, chans[1].samples[c][1].power, 1); 3677 gain = interpolate(chan, 3678 chans[0].num, chans[0].samples[c][1].gain, 3679 chans[1].num, chans[1].samples[c][1].gain, 1); 3680 temp = interpolate(chan, 3681 chans[0].num, chans[0].samples[c][1].temp, 3682 chans[1].num, chans[1].samples[c][1].temp, 1); 3683 DPRINTF(("TX chain %d: power=%d gain=%d temp=%d\n", 3684 c, power, gain, temp)); 3685 3686 /* Compute temperature compensation. */ 3687 tdiff = ((sc->temp - temp) * 2) / tdiv[grp]; 3688 DPRINTF(("temperature compensation=%d (current=%d, " 3689 "EEPROM=%d)\n", tdiff, sc->temp, temp)); 3690 3691 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) { 3692 /* Convert dBm to half-dBm. */ 3693 maxchpwr = sc->maxpwr[chan] * 2; 3694 #ifdef notyet 3695 if (ridx > iwn_mcs2ridx[7] && ridx < iwn_mcs2ridx[16]) 3696 maxchpwr -= 6; /* MIMO 2T: -3dB */ 3697 #endif 3698 3699 pwr = maxpwr; 3700 3701 /* Adjust TX power based on rate. */ 3702 if ((ridx % 8) == 5) 3703 pwr -= 15; /* OFDM48: -7.5dB */ 3704 else if ((ridx % 8) == 6) 3705 pwr -= 17; /* OFDM54: -8.5dB */ 3706 else if ((ridx % 8) == 7) 3707 pwr -= 20; /* OFDM60: -10dB */ 3708 else 3709 pwr -= 10; /* Others: -5dB */ 3710 3711 /* Do not exceed channel max TX power. */ 3712 if (pwr > maxchpwr) 3713 pwr = maxchpwr; 3714 3715 idx = gain - (pwr - power) - tdiff - vdiff; 3716 if (ridx > iwn_mcs2ridx[7]) /* MIMO */ 3717 idx += (int32_t)letoh32(uc->atten[grp][c]); 3718 3719 if (cmd.band == 0) 3720 idx += 9; /* 5GHz */ 3721 if (ridx == IWN_RIDX_MAX) 3722 idx += 5; /* CCK */ 3723 3724 /* Make sure idx stays in a valid range. */ 3725 if (idx < 0) 3726 idx = 0; 3727 else if (idx > IWN4965_MAX_PWR_INDEX) 3728 idx = IWN4965_MAX_PWR_INDEX; 3729 3730 DPRINTF(("TX chain %d, rate idx %d: power=%d\n", 3731 c, ridx, idx)); 3732 cmd.power[ridx].rf_gain[c] = rf_gain[idx]; 3733 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx]; 3734 } 3735 } 3736 3737 DPRINTF(("setting TX power for chan %d\n", chan)); 3738 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async); 3739 3740 #undef interpolate 3741 #undef fdivround 3742 } 3743 3744 int 3745 iwn5000_set_txpower(struct iwn_softc *sc, int async) 3746 { 3747 struct iwn5000_cmd_txpower cmd; 3748 3749 /* 3750 * TX power calibration is handled automatically by the firmware 3751 * for 5000 Series. 3752 */ 3753 memset(&cmd, 0, sizeof cmd); 3754 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */ 3755 cmd.flags = IWN5000_TXPOWER_NO_CLOSED; 3756 cmd.srv_limit = IWN5000_TXPOWER_AUTO; 3757 DPRINTF(("setting TX power\n")); 3758 return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async); 3759 } 3760 3761 /* 3762 * Retrieve the maximum RSSI (in dBm) among receivers. 3763 */ 3764 int 3765 iwn4965_get_rssi(const struct iwn_rx_stat *stat) 3766 { 3767 struct iwn4965_rx_phystat *phy = (void *)stat->phybuf; 3768 uint8_t mask, agc; 3769 int rssi; 3770 3771 mask = (letoh16(phy->antenna) >> 4) & IWN_ANT_ABC; 3772 agc = (letoh16(phy->agc) >> 7) & 0x7f; 3773 3774 rssi = 0; 3775 if (mask & IWN_ANT_A) 3776 rssi = MAX(rssi, phy->rssi[0]); 3777 if (mask & IWN_ANT_B) 3778 rssi = MAX(rssi, phy->rssi[2]); 3779 if (mask & IWN_ANT_C) 3780 rssi = MAX(rssi, phy->rssi[4]); 3781 3782 return rssi - agc - IWN_RSSI_TO_DBM; 3783 } 3784 3785 int 3786 iwn5000_get_rssi(const struct iwn_rx_stat *stat) 3787 { 3788 struct iwn5000_rx_phystat *phy = (void *)stat->phybuf; 3789 uint8_t agc; 3790 int rssi; 3791 3792 agc = (letoh32(phy->agc) >> 9) & 0x7f; 3793 3794 rssi = MAX(letoh16(phy->rssi[0]) & 0xff, 3795 letoh16(phy->rssi[1]) & 0xff); 3796 rssi = MAX(letoh16(phy->rssi[2]) & 0xff, rssi); 3797 3798 return rssi - agc - IWN_RSSI_TO_DBM; 3799 } 3800 3801 /* 3802 * Retrieve the average noise (in dBm) among receivers. 3803 */ 3804 int 3805 iwn_get_noise(const struct iwn_rx_general_stats *stats) 3806 { 3807 int i, total, nbant, noise; 3808 3809 total = nbant = 0; 3810 for (i = 0; i < 3; i++) { 3811 if ((noise = letoh32(stats->noise[i]) & 0xff) == 0) 3812 continue; 3813 total += noise; 3814 nbant++; 3815 } 3816 /* There should be at least one antenna but check anyway. */ 3817 return (nbant == 0) ? -127 : (total / nbant) - 107; 3818 } 3819 3820 /* 3821 * Compute temperature (in degC) from last received statistics. 3822 */ 3823 int 3824 iwn4965_get_temperature(struct iwn_softc *sc) 3825 { 3826 struct iwn_ucode_info *uc = &sc->ucode_info; 3827 int32_t r1, r2, r3, r4, temp; 3828 3829 r1 = letoh32(uc->temp[0].chan20MHz); 3830 r2 = letoh32(uc->temp[1].chan20MHz); 3831 r3 = letoh32(uc->temp[2].chan20MHz); 3832 r4 = letoh32(sc->rawtemp); 3833 3834 if (r1 == r3) /* Prevents division by 0 (should not happen). */ 3835 return 0; 3836 3837 /* Sign-extend 23-bit R4 value to 32-bit. */ 3838 r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000; 3839 /* Compute temperature in Kelvin. */ 3840 temp = (259 * (r4 - r2)) / (r3 - r1); 3841 temp = (temp * 97) / 100 + 8; 3842 3843 DPRINTF(("temperature %dK/%dC\n", temp, IWN_KTOC(temp))); 3844 return IWN_KTOC(temp); 3845 } 3846 3847 int 3848 iwn5000_get_temperature(struct iwn_softc *sc) 3849 { 3850 int32_t temp; 3851 3852 /* 3853 * Temperature is not used by the driver for 5000 Series because 3854 * TX power calibration is handled by firmware. 3855 */ 3856 temp = letoh32(sc->rawtemp); 3857 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 3858 temp = (temp / -5) + sc->temp_off; 3859 temp = IWN_KTOC(temp); 3860 } 3861 return temp; 3862 } 3863 3864 /* 3865 * Initialize sensitivity calibration state machine. 3866 */ 3867 int 3868 iwn_init_sensitivity(struct iwn_softc *sc) 3869 { 3870 struct iwn_ops *ops = &sc->ops; 3871 struct iwn_calib_state *calib = &sc->calib; 3872 uint32_t flags; 3873 int error; 3874 3875 /* Reset calibration state machine. */ 3876 memset(calib, 0, sizeof (*calib)); 3877 calib->state = IWN_CALIB_STATE_INIT; 3878 calib->cck_state = IWN_CCK_STATE_HIFA; 3879 /* Set initial correlation values. */ 3880 calib->ofdm_x1 = sc->limits->min_ofdm_x1; 3881 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1; 3882 calib->ofdm_x4 = sc->limits->min_ofdm_x4; 3883 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4; 3884 calib->cck_x4 = 125; 3885 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4; 3886 calib->energy_cck = sc->limits->energy_cck; 3887 3888 /* Write initial sensitivity. */ 3889 if ((error = iwn_send_sensitivity(sc)) != 0) 3890 return error; 3891 3892 /* Write initial gains. */ 3893 if ((error = ops->init_gains(sc)) != 0) 3894 return error; 3895 3896 /* Request statistics at each beacon interval. */ 3897 flags = 0; 3898 DPRINTFN(2, ("sending request for statistics\n")); 3899 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1); 3900 } 3901 3902 /* 3903 * Collect noise and RSSI statistics for the first 20 beacons received 3904 * after association and use them to determine connected antennas and 3905 * to set differential gains. 3906 */ 3907 void 3908 iwn_collect_noise(struct iwn_softc *sc, 3909 const struct iwn_rx_general_stats *stats) 3910 { 3911 struct iwn_ops *ops = &sc->ops; 3912 struct iwn_calib_state *calib = &sc->calib; 3913 uint32_t val; 3914 int i; 3915 3916 /* Accumulate RSSI and noise for all 3 antennas. */ 3917 for (i = 0; i < 3; i++) { 3918 calib->rssi[i] += letoh32(stats->rssi[i]) & 0xff; 3919 calib->noise[i] += letoh32(stats->noise[i]) & 0xff; 3920 } 3921 /* NB: We update differential gains only once after 20 beacons. */ 3922 if (++calib->nbeacons < 20) 3923 return; 3924 3925 /* Determine highest average RSSI. */ 3926 val = MAX(calib->rssi[0], calib->rssi[1]); 3927 val = MAX(calib->rssi[2], val); 3928 3929 /* Determine which antennas are connected. */ 3930 sc->chainmask = sc->rxchainmask; 3931 for (i = 0; i < 3; i++) 3932 if (val - calib->rssi[i] > 15 * 20) 3933 sc->chainmask &= ~(1 << i); 3934 DPRINTF(("RX chains mask: theoretical=0x%x, actual=0x%x\n", 3935 sc->rxchainmask, sc->chainmask)); 3936 3937 /* If none of the TX antennas are connected, keep at least one. */ 3938 if ((sc->chainmask & sc->txchainmask) == 0) 3939 sc->chainmask |= IWN_LSB(sc->txchainmask); 3940 3941 (void)ops->set_gains(sc); 3942 calib->state = IWN_CALIB_STATE_RUN; 3943 3944 #ifdef notyet 3945 /* XXX Disable RX chains with no antennas connected. */ 3946 sc->rxon.rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask)); 3947 (void)iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1); 3948 #endif 3949 3950 /* Enable power-saving mode if requested by user. */ 3951 if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON) 3952 (void)iwn_set_pslevel(sc, 0, 3, 1); 3953 } 3954 3955 int 3956 iwn4965_init_gains(struct iwn_softc *sc) 3957 { 3958 struct iwn_phy_calib_gain cmd; 3959 3960 memset(&cmd, 0, sizeof cmd); 3961 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 3962 /* Differential gains initially set to 0 for all 3 antennas. */ 3963 DPRINTF(("setting initial differential gains\n")); 3964 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 3965 } 3966 3967 int 3968 iwn5000_init_gains(struct iwn_softc *sc) 3969 { 3970 struct iwn_phy_calib cmd; 3971 3972 memset(&cmd, 0, sizeof cmd); 3973 cmd.code = sc->reset_noise_gain; 3974 cmd.ngroups = 1; 3975 cmd.isvalid = 1; 3976 DPRINTF(("setting initial differential gains\n")); 3977 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 3978 } 3979 3980 int 3981 iwn4965_set_gains(struct iwn_softc *sc) 3982 { 3983 struct iwn_calib_state *calib = &sc->calib; 3984 struct iwn_phy_calib_gain cmd; 3985 int i, delta, noise; 3986 3987 /* Get minimal noise among connected antennas. */ 3988 noise = INT_MAX; /* NB: There's at least one antenna. */ 3989 for (i = 0; i < 3; i++) 3990 if (sc->chainmask & (1 << i)) 3991 noise = MIN(calib->noise[i], noise); 3992 3993 memset(&cmd, 0, sizeof cmd); 3994 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 3995 /* Set differential gains for connected antennas. */ 3996 for (i = 0; i < 3; i++) { 3997 if (sc->chainmask & (1 << i)) { 3998 /* Compute attenuation (in unit of 1.5dB). */ 3999 delta = (noise - (int32_t)calib->noise[i]) / 30; 4000 /* NB: delta <= 0 */ 4001 /* Limit to [-4.5dB,0]. */ 4002 cmd.gain[i] = MIN(abs(delta), 3); 4003 if (delta < 0) 4004 cmd.gain[i] |= 1 << 2; /* sign bit */ 4005 } 4006 } 4007 DPRINTF(("setting differential gains Ant A/B/C: %x/%x/%x (%x)\n", 4008 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask)); 4009 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4010 } 4011 4012 int 4013 iwn5000_set_gains(struct iwn_softc *sc) 4014 { 4015 struct iwn_calib_state *calib = &sc->calib; 4016 struct iwn_phy_calib_gain cmd; 4017 int i, ant, div, delta; 4018 4019 /* We collected 20 beacons and !=6050 need a 1.5 factor. */ 4020 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30; 4021 4022 memset(&cmd, 0, sizeof cmd); 4023 cmd.code = sc->noise_gain; 4024 cmd.ngroups = 1; 4025 cmd.isvalid = 1; 4026 /* Get first available RX antenna as referential. */ 4027 ant = IWN_LSB(sc->rxchainmask); 4028 /* Set differential gains for other antennas. */ 4029 for (i = ant + 1; i < 3; i++) { 4030 if (sc->chainmask & (1 << i)) { 4031 /* The delta is relative to antenna "ant". */ 4032 delta = ((int32_t)calib->noise[ant] - 4033 (int32_t)calib->noise[i]) / div; 4034 /* Limit to [-4.5dB,+4.5dB]. */ 4035 cmd.gain[i - 1] = MIN(abs(delta), 3); 4036 if (delta < 0) 4037 cmd.gain[i - 1] |= 1 << 2; /* sign bit */ 4038 } 4039 } 4040 DPRINTF(("setting differential gains: %x/%x (%x)\n", 4041 cmd.gain[0], cmd.gain[1], sc->chainmask)); 4042 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4043 } 4044 4045 /* 4046 * Tune RF RX sensitivity based on the number of false alarms detected 4047 * during the last beacon period. 4048 */ 4049 void 4050 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats) 4051 { 4052 #define inc(val, inc, max) \ 4053 if ((val) < (max)) { \ 4054 if ((val) < (max) - (inc)) \ 4055 (val) += (inc); \ 4056 else \ 4057 (val) = (max); \ 4058 needs_update = 1; \ 4059 } 4060 #define dec(val, dec, min) \ 4061 if ((val) > (min)) { \ 4062 if ((val) > (min) + (dec)) \ 4063 (val) -= (dec); \ 4064 else \ 4065 (val) = (min); \ 4066 needs_update = 1; \ 4067 } 4068 4069 const struct iwn_sensitivity_limits *limits = sc->limits; 4070 struct iwn_calib_state *calib = &sc->calib; 4071 uint32_t val, rxena, fa; 4072 uint32_t energy[3], energy_min; 4073 uint8_t noise[3], noise_ref; 4074 int i, needs_update = 0; 4075 4076 /* Check that we've been enabled long enough. */ 4077 if ((rxena = letoh32(stats->general.load)) == 0) 4078 return; 4079 4080 /* Compute number of false alarms since last call for OFDM. */ 4081 fa = letoh32(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm; 4082 fa += letoh32(stats->ofdm.fa) - calib->fa_ofdm; 4083 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ 4084 4085 /* Save counters values for next call. */ 4086 calib->bad_plcp_ofdm = letoh32(stats->ofdm.bad_plcp); 4087 calib->fa_ofdm = letoh32(stats->ofdm.fa); 4088 4089 if (fa > 50 * rxena) { 4090 /* High false alarm count, decrease sensitivity. */ 4091 DPRINTFN(2, ("OFDM high false alarm count: %u\n", fa)); 4092 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1); 4093 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1); 4094 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4); 4095 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4); 4096 4097 } else if (fa < 5 * rxena) { 4098 /* Low false alarm count, increase sensitivity. */ 4099 DPRINTFN(2, ("OFDM low false alarm count: %u\n", fa)); 4100 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1); 4101 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1); 4102 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4); 4103 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4); 4104 } 4105 4106 /* Compute maximum noise among 3 receivers. */ 4107 for (i = 0; i < 3; i++) 4108 noise[i] = (letoh32(stats->general.noise[i]) >> 8) & 0xff; 4109 val = MAX(noise[0], noise[1]); 4110 val = MAX(noise[2], val); 4111 /* Insert it into our samples table. */ 4112 calib->noise_samples[calib->cur_noise_sample] = val; 4113 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20; 4114 4115 /* Compute maximum noise among last 20 samples. */ 4116 noise_ref = calib->noise_samples[0]; 4117 for (i = 1; i < 20; i++) 4118 noise_ref = MAX(noise_ref, calib->noise_samples[i]); 4119 4120 /* Compute maximum energy among 3 receivers. */ 4121 for (i = 0; i < 3; i++) 4122 energy[i] = letoh32(stats->general.energy[i]); 4123 val = MIN(energy[0], energy[1]); 4124 val = MIN(energy[2], val); 4125 /* Insert it into our samples table. */ 4126 calib->energy_samples[calib->cur_energy_sample] = val; 4127 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10; 4128 4129 /* Compute minimum energy among last 10 samples. */ 4130 energy_min = calib->energy_samples[0]; 4131 for (i = 1; i < 10; i++) 4132 energy_min = MAX(energy_min, calib->energy_samples[i]); 4133 energy_min += 6; 4134 4135 /* Compute number of false alarms since last call for CCK. */ 4136 fa = letoh32(stats->cck.bad_plcp) - calib->bad_plcp_cck; 4137 fa += letoh32(stats->cck.fa) - calib->fa_cck; 4138 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ 4139 4140 /* Save counters values for next call. */ 4141 calib->bad_plcp_cck = letoh32(stats->cck.bad_plcp); 4142 calib->fa_cck = letoh32(stats->cck.fa); 4143 4144 if (fa > 50 * rxena) { 4145 /* High false alarm count, decrease sensitivity. */ 4146 DPRINTFN(2, ("CCK high false alarm count: %u\n", fa)); 4147 calib->cck_state = IWN_CCK_STATE_HIFA; 4148 calib->low_fa = 0; 4149 4150 if (calib->cck_x4 > 160) { 4151 calib->noise_ref = noise_ref; 4152 if (calib->energy_cck > 2) 4153 dec(calib->energy_cck, 2, energy_min); 4154 } 4155 if (calib->cck_x4 < 160) { 4156 calib->cck_x4 = 161; 4157 needs_update = 1; 4158 } else 4159 inc(calib->cck_x4, 3, limits->max_cck_x4); 4160 4161 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4); 4162 4163 } else if (fa < 5 * rxena) { 4164 /* Low false alarm count, increase sensitivity. */ 4165 DPRINTFN(2, ("CCK low false alarm count: %u\n", fa)); 4166 calib->cck_state = IWN_CCK_STATE_LOFA; 4167 calib->low_fa++; 4168 4169 if (calib->cck_state != IWN_CCK_STATE_INIT && 4170 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 || 4171 calib->low_fa > 100)) { 4172 inc(calib->energy_cck, 2, limits->min_energy_cck); 4173 dec(calib->cck_x4, 3, limits->min_cck_x4); 4174 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4); 4175 } 4176 } else { 4177 /* Not worth to increase or decrease sensitivity. */ 4178 DPRINTFN(2, ("CCK normal false alarm count: %u\n", fa)); 4179 calib->low_fa = 0; 4180 calib->noise_ref = noise_ref; 4181 4182 if (calib->cck_state == IWN_CCK_STATE_HIFA) { 4183 /* Previous interval had many false alarms. */ 4184 dec(calib->energy_cck, 8, energy_min); 4185 } 4186 calib->cck_state = IWN_CCK_STATE_INIT; 4187 } 4188 4189 if (needs_update) 4190 (void)iwn_send_sensitivity(sc); 4191 #undef dec 4192 #undef inc 4193 } 4194 4195 int 4196 iwn_send_sensitivity(struct iwn_softc *sc) 4197 { 4198 struct iwn_calib_state *calib = &sc->calib; 4199 struct iwn_enhanced_sensitivity_cmd cmd; 4200 int len; 4201 4202 memset(&cmd, 0, sizeof cmd); 4203 len = sizeof (struct iwn_sensitivity_cmd); 4204 cmd.which = IWN_SENSITIVITY_WORKTBL; 4205 /* OFDM modulation. */ 4206 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1); 4207 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1); 4208 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4); 4209 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4); 4210 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm); 4211 cmd.energy_ofdm_th = htole16(62); 4212 /* CCK modulation. */ 4213 cmd.corr_cck_x4 = htole16(calib->cck_x4); 4214 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4); 4215 cmd.energy_cck = htole16(calib->energy_cck); 4216 /* Barker modulation: use default values. */ 4217 cmd.corr_barker = htole16(190); 4218 cmd.corr_barker_mrc = htole16(390); 4219 if (!(sc->sc_flags & IWN_FLAG_ENH_SENS)) 4220 goto send; 4221 /* Enhanced sensitivity settings. */ 4222 len = sizeof (struct iwn_enhanced_sensitivity_cmd); 4223 cmd.ofdm_det_slope_mrc = htole16(668); 4224 cmd.ofdm_det_icept_mrc = htole16(4); 4225 cmd.ofdm_det_slope = htole16(486); 4226 cmd.ofdm_det_icept = htole16(37); 4227 cmd.cck_det_slope_mrc = htole16(853); 4228 cmd.cck_det_icept_mrc = htole16(4); 4229 cmd.cck_det_slope = htole16(476); 4230 cmd.cck_det_icept = htole16(99); 4231 send: 4232 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1); 4233 } 4234 4235 /* 4236 * Set STA mode power saving level (between 0 and 5). 4237 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 4238 */ 4239 int 4240 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async) 4241 { 4242 struct iwn_pmgt_cmd cmd; 4243 const struct iwn_pmgt *pmgt; 4244 uint32_t max, skip_dtim; 4245 pcireg_t reg; 4246 int i; 4247 4248 /* Select which PS parameters to use. */ 4249 if (dtim <= 2) 4250 pmgt = &iwn_pmgt[0][level]; 4251 else if (dtim <= 10) 4252 pmgt = &iwn_pmgt[1][level]; 4253 else 4254 pmgt = &iwn_pmgt[2][level]; 4255 4256 memset(&cmd, 0, sizeof cmd); 4257 if (level != 0) /* not CAM */ 4258 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP); 4259 if (level == 5) 4260 cmd.flags |= htole16(IWN_PS_FAST_PD); 4261 /* Retrieve PCIe Active State Power Management (ASPM). */ 4262 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 4263 sc->sc_cap_off + PCI_PCIE_LCSR); 4264 if (!(reg & PCI_PCIE_LCSR_ASPM_L0S)) /* L0s Entry disabled. */ 4265 cmd.flags |= htole16(IWN_PS_PCI_PMGT); 4266 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024); 4267 cmd.txtimeout = htole32(pmgt->txtimeout * 1024); 4268 4269 if (dtim == 0) { 4270 dtim = 1; 4271 skip_dtim = 0; 4272 } else 4273 skip_dtim = pmgt->skip_dtim; 4274 if (skip_dtim != 0) { 4275 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM); 4276 max = pmgt->intval[4]; 4277 if (max == (uint32_t)-1) 4278 max = dtim * (skip_dtim + 1); 4279 else if (max > dtim) 4280 max = (max / dtim) * dtim; 4281 } else 4282 max = dtim; 4283 for (i = 0; i < 5; i++) 4284 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 4285 4286 DPRINTF(("setting power saving level to %d\n", level)); 4287 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 4288 } 4289 4290 int 4291 iwn_send_btcoex(struct iwn_softc *sc) 4292 { 4293 struct iwn_bluetooth cmd; 4294 4295 memset(&cmd, 0, sizeof cmd); 4296 cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO; 4297 cmd.lead_time = IWN_BT_LEAD_TIME_DEF; 4298 cmd.max_kill = IWN_BT_MAX_KILL_DEF; 4299 DPRINTF(("configuring bluetooth coexistence\n")); 4300 return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 4301 } 4302 4303 int 4304 iwn_send_advanced_btcoex(struct iwn_softc *sc) 4305 { 4306 static const uint32_t btcoex_3wire[12] = { 4307 0xaaaaaaaa, 0xaaaaaaaa, 0xaeaaaaaa, 0xaaaaaaaa, 4308 0xcc00ff28, 0x0000aaaa, 0xcc00aaaa, 0x0000aaaa, 4309 0xc0004000, 0x00004000, 0xf0005000, 0xf0005000, 4310 }; 4311 struct iwn_btcoex_priotable btprio; 4312 struct iwn_btcoex_prot btprot; 4313 int error, i; 4314 4315 if (sc->hw_type == IWN_HW_REV_TYPE_2030 || 4316 sc->hw_type == IWN_HW_REV_TYPE_135) { 4317 struct iwn2000_btcoex_config btconfig; 4318 4319 memset(&btconfig, 0, sizeof btconfig); 4320 btconfig.flags = IWN_BT_COEX6000_CHAN_INHIBITION | 4321 (IWN_BT_COEX6000_MODE_3W << IWN_BT_COEX6000_MODE_SHIFT) | 4322 IWN_BT_SYNC_2_BT_DISABLE; 4323 btconfig.max_kill = 5; 4324 btconfig.bt3_t7_timer = 1; 4325 btconfig.kill_ack = htole32(0xffff0000); 4326 btconfig.kill_cts = htole32(0xffff0000); 4327 btconfig.sample_time = 2; 4328 btconfig.bt3_t2_timer = 0xc; 4329 for (i = 0; i < 12; i++) 4330 btconfig.lookup_table[i] = htole32(btcoex_3wire[i]); 4331 btconfig.valid = htole16(0xff); 4332 btconfig.prio_boost = htole32(0xf0); 4333 DPRINTF(("configuring advanced bluetooth coexistence\n")); 4334 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig, 4335 sizeof(btconfig), 1); 4336 if (error != 0) 4337 return (error); 4338 } else { 4339 struct iwn6000_btcoex_config btconfig; 4340 4341 memset(&btconfig, 0, sizeof btconfig); 4342 btconfig.flags = IWN_BT_COEX6000_CHAN_INHIBITION | 4343 (IWN_BT_COEX6000_MODE_3W << IWN_BT_COEX6000_MODE_SHIFT) | 4344 IWN_BT_SYNC_2_BT_DISABLE; 4345 btconfig.max_kill = 5; 4346 btconfig.bt3_t7_timer = 1; 4347 btconfig.kill_ack = htole32(0xffff0000); 4348 btconfig.kill_cts = htole32(0xffff0000); 4349 btconfig.sample_time = 2; 4350 btconfig.bt3_t2_timer = 0xc; 4351 for (i = 0; i < 12; i++) 4352 btconfig.lookup_table[i] = htole32(btcoex_3wire[i]); 4353 btconfig.valid = htole16(0xff); 4354 btconfig.prio_boost = 0xf0; 4355 DPRINTF(("configuring advanced bluetooth coexistence\n")); 4356 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig, 4357 sizeof(btconfig), 1); 4358 if (error != 0) 4359 return (error); 4360 } 4361 4362 memset(&btprio, 0, sizeof btprio); 4363 btprio.calib_init1 = 0x6; 4364 btprio.calib_init2 = 0x7; 4365 btprio.calib_periodic_low1 = 0x2; 4366 btprio.calib_periodic_low2 = 0x3; 4367 btprio.calib_periodic_high1 = 0x4; 4368 btprio.calib_periodic_high2 = 0x5; 4369 btprio.dtim = 0x6; 4370 btprio.scan52 = 0x8; 4371 btprio.scan24 = 0xa; 4372 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PRIOTABLE, &btprio, sizeof(btprio), 4373 1); 4374 if (error != 0) 4375 return (error); 4376 4377 /* Force BT state machine change */ 4378 memset(&btprot, 0, sizeof btprot); 4379 btprot.open = 1; 4380 btprot.type = 1; 4381 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1); 4382 if (error != 0) 4383 return (error); 4384 4385 btprot.open = 0; 4386 return (iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1)); 4387 } 4388 4389 int 4390 iwn5000_runtime_calib(struct iwn_softc *sc) 4391 { 4392 struct iwn5000_calib_config cmd; 4393 4394 memset(&cmd, 0, sizeof cmd); 4395 cmd.ucode.once.enable = 0xffffffff; 4396 cmd.ucode.once.start = IWN5000_CALIB_DC; 4397 DPRINTF(("configuring runtime calibration\n")); 4398 return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0); 4399 } 4400 4401 int 4402 iwn_config(struct iwn_softc *sc) 4403 { 4404 struct iwn_ops *ops = &sc->ops; 4405 struct ieee80211com *ic = &sc->sc_ic; 4406 struct ifnet *ifp = &ic->ic_if; 4407 uint32_t txmask; 4408 uint16_t rxchain; 4409 int error, ridx; 4410 4411 /* Set radio temperature sensor offset. */ 4412 if (sc->hw_type == IWN_HW_REV_TYPE_6005) { 4413 error = iwn6000_temp_offset_calib(sc); 4414 if (error != 0) { 4415 printf("%s: could not set temperature offset\n", 4416 sc->sc_dev.dv_xname); 4417 return error; 4418 } 4419 } 4420 4421 if (sc->hw_type == IWN_HW_REV_TYPE_2030 || 4422 sc->hw_type == IWN_HW_REV_TYPE_2000 || 4423 sc->hw_type == IWN_HW_REV_TYPE_135 || 4424 sc->hw_type == IWN_HW_REV_TYPE_105) { 4425 error = iwn2000_temp_offset_calib(sc); 4426 if (error != 0) { 4427 printf("%s: could not set temperature offset\n", 4428 sc->sc_dev.dv_xname); 4429 return error; 4430 } 4431 } 4432 4433 if (sc->hw_type == IWN_HW_REV_TYPE_6050 || 4434 sc->hw_type == IWN_HW_REV_TYPE_6005) { 4435 /* Configure runtime DC calibration. */ 4436 error = iwn5000_runtime_calib(sc); 4437 if (error != 0) { 4438 printf("%s: could not configure runtime calibration\n", 4439 sc->sc_dev.dv_xname); 4440 return error; 4441 } 4442 } 4443 4444 /* Configure valid TX chains for >=5000 Series. */ 4445 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4446 txmask = htole32(sc->txchainmask); 4447 DPRINTF(("configuring valid TX chains 0x%x\n", txmask)); 4448 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask, 4449 sizeof txmask, 0); 4450 if (error != 0) { 4451 printf("%s: could not configure valid TX chains\n", 4452 sc->sc_dev.dv_xname); 4453 return error; 4454 } 4455 } 4456 4457 /* Configure bluetooth coexistence. */ 4458 if (sc->sc_flags & IWN_FLAG_ADV_BT_COEX) 4459 error = iwn_send_advanced_btcoex(sc); 4460 else 4461 error = iwn_send_btcoex(sc); 4462 if (error != 0) { 4463 printf("%s: could not configure bluetooth coexistence\n", 4464 sc->sc_dev.dv_xname); 4465 return error; 4466 } 4467 4468 /* Set mode, channel, RX filter and enable RX. */ 4469 memset(&sc->rxon, 0, sizeof (struct iwn_rxon)); 4470 IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl)); 4471 IEEE80211_ADDR_COPY(sc->rxon.myaddr, ic->ic_myaddr); 4472 IEEE80211_ADDR_COPY(sc->rxon.wlap, ic->ic_myaddr); 4473 sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_ibss_chan); 4474 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 4475 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_ibss_chan)) { 4476 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 4477 if (ic->ic_flags & IEEE80211_F_USEPROT) 4478 sc->rxon.flags |= htole32(IWN_RXON_TGG_PROT); 4479 DPRINTF(("%s: 2ghz prot 0x%x\n", __func__, 4480 le32toh(sc->rxon.flags))); 4481 } 4482 switch (ic->ic_opmode) { 4483 case IEEE80211_M_STA: 4484 sc->rxon.mode = IWN_MODE_STA; 4485 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST); 4486 break; 4487 case IEEE80211_M_MONITOR: 4488 sc->rxon.mode = IWN_MODE_MONITOR; 4489 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST | 4490 IWN_FILTER_CTL | IWN_FILTER_PROMISC); 4491 break; 4492 default: 4493 /* Should not get there. */ 4494 break; 4495 } 4496 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ 4497 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ 4498 sc->rxon.ht_single_mask = 0xff; 4499 sc->rxon.ht_dual_mask = 0xff; 4500 sc->rxon.ht_triple_mask = 0xff; 4501 rxchain = 4502 IWN_RXCHAIN_VALID(sc->rxchainmask) | 4503 IWN_RXCHAIN_MIMO_COUNT(2) | 4504 IWN_RXCHAIN_IDLE_COUNT(2); 4505 sc->rxon.rxchain = htole16(rxchain); 4506 DPRINTF(("setting configuration\n")); 4507 DPRINTF(("%s: rxon chan %d flags %x cck %x ofdm %x\n", __func__, 4508 sc->rxon.chan, le32toh(sc->rxon.flags), sc->rxon.cck_mask, 4509 sc->rxon.ofdm_mask)); 4510 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 0); 4511 if (error != 0) { 4512 printf("%s: RXON command failed\n", sc->sc_dev.dv_xname); 4513 return error; 4514 } 4515 4516 ridx = (sc->sc_ic.ic_curmode == IEEE80211_MODE_11A) ? 4517 IWN_RIDX_OFDM6 : IWN_RIDX_CCK1; 4518 if ((error = iwn_add_broadcast_node(sc, 0, ridx)) != 0) { 4519 printf("%s: could not add broadcast node\n", 4520 sc->sc_dev.dv_xname); 4521 return error; 4522 } 4523 4524 /* Configuration has changed, set TX power accordingly. */ 4525 if ((error = ops->set_txpower(sc, 0)) != 0) { 4526 printf("%s: could not set TX power\n", sc->sc_dev.dv_xname); 4527 return error; 4528 } 4529 4530 if ((error = iwn_set_critical_temp(sc)) != 0) { 4531 printf("%s: could not set critical temperature\n", 4532 sc->sc_dev.dv_xname); 4533 return error; 4534 } 4535 4536 /* Set power saving level to CAM during initialization. */ 4537 if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) { 4538 printf("%s: could not set power saving level\n", 4539 sc->sc_dev.dv_xname); 4540 return error; 4541 } 4542 return 0; 4543 } 4544 4545 uint16_t 4546 iwn_get_active_dwell_time(struct iwn_softc *sc, 4547 uint16_t flags, uint8_t n_probes) 4548 { 4549 /* No channel? Default to 2GHz settings */ 4550 if (flags & IEEE80211_CHAN_2GHZ) { 4551 return (IWN_ACTIVE_DWELL_TIME_2GHZ + 4552 IWN_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1)); 4553 } 4554 4555 /* 5GHz dwell time */ 4556 return (IWN_ACTIVE_DWELL_TIME_5GHZ + 4557 IWN_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1)); 4558 } 4559 4560 /* 4561 * Limit the total dwell time to 85% of the beacon interval. 4562 * 4563 * Returns the dwell time in milliseconds. 4564 */ 4565 uint16_t 4566 iwn_limit_dwell(struct iwn_softc *sc, uint16_t dwell_time) 4567 { 4568 struct ieee80211com *ic = &sc->sc_ic; 4569 struct ieee80211_node *ni = ic->ic_bss; 4570 int bintval = 0; 4571 4572 /* bintval is in TU (1.024mS) */ 4573 if (ni != NULL) 4574 bintval = ni->ni_intval; 4575 4576 /* 4577 * If it's non-zero, we should calculate the minimum of 4578 * it and the DWELL_BASE. 4579 * 4580 * XXX Yes, the math should take into account that bintval 4581 * is 1.024mS, not 1mS.. 4582 */ 4583 if (bintval > 0) { 4584 return (MIN(IWN_PASSIVE_DWELL_BASE, ((bintval * 85) / 100))); 4585 } 4586 4587 /* No association context? Default */ 4588 return (IWN_PASSIVE_DWELL_BASE); 4589 } 4590 4591 uint16_t 4592 iwn_get_passive_dwell_time(struct iwn_softc *sc, uint16_t flags) 4593 { 4594 uint16_t passive; 4595 if (flags & IEEE80211_CHAN_2GHZ) { 4596 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_2GHZ; 4597 } else { 4598 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_5GHZ; 4599 } 4600 4601 /* Clamp to the beacon interval if we're associated */ 4602 return (iwn_limit_dwell(sc, passive)); 4603 } 4604 4605 int 4606 iwn_scan(struct iwn_softc *sc, uint16_t flags) 4607 { 4608 struct ieee80211com *ic = &sc->sc_ic; 4609 struct iwn_scan_hdr *hdr; 4610 struct iwn_cmd_data *tx; 4611 struct iwn_scan_essid *essid; 4612 struct iwn_scan_chan *chan; 4613 struct ieee80211_frame *wh; 4614 struct ieee80211_rateset *rs; 4615 struct ieee80211_channel *c; 4616 struct ifnet *ifp = &ic->ic_if; 4617 uint8_t *buf, *frm; 4618 uint16_t rxchain, dwell_active, dwell_passive; 4619 uint8_t txant; 4620 int buflen, error, is_active; 4621 4622 buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 4623 if (buf == NULL) { 4624 printf("%s: could not allocate buffer for scan command\n", 4625 sc->sc_dev.dv_xname); 4626 return ENOMEM; 4627 } 4628 hdr = (struct iwn_scan_hdr *)buf; 4629 /* 4630 * Move to the next channel if no frames are received within 10ms 4631 * after sending the probe request. 4632 */ 4633 hdr->quiet_time = htole16(10); /* timeout in milliseconds */ 4634 hdr->quiet_threshold = htole16(1); /* min # of packets */ 4635 4636 /* Select antennas for scanning. */ 4637 rxchain = 4638 IWN_RXCHAIN_VALID(sc->rxchainmask) | 4639 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) | 4640 IWN_RXCHAIN_DRIVER_FORCE; 4641 if ((flags & IEEE80211_CHAN_5GHZ) && 4642 sc->hw_type == IWN_HW_REV_TYPE_4965) { 4643 /* Ant A must be avoided in 5GHz because of an HW bug. */ 4644 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_BC); 4645 } else /* Use all available RX antennas. */ 4646 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask); 4647 hdr->rxchain = htole16(rxchain); 4648 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON); 4649 4650 tx = (struct iwn_cmd_data *)(hdr + 1); 4651 tx->flags = htole32(IWN_TX_AUTO_SEQ); 4652 tx->id = sc->broadcast_id; 4653 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 4654 4655 if (flags & IEEE80211_CHAN_5GHZ) { 4656 /* Send probe requests at 6Mbps. */ 4657 tx->plcp = iwn_rates[IWN_RIDX_OFDM6].plcp; 4658 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 4659 } else { 4660 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO); 4661 /* Send probe requests at 1Mbps. */ 4662 tx->plcp = iwn_rates[IWN_RIDX_CCK1].plcp; 4663 tx->rflags = IWN_RFLAG_CCK; 4664 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 4665 } 4666 /* Use the first valid TX antenna. */ 4667 txant = IWN_LSB(sc->txchainmask); 4668 tx->rflags |= IWN_RFLAG_ANT(txant); 4669 4670 /* 4671 * Only do active scanning if we're announcing a probe request 4672 * for a given SSID (or more, if we ever add it to the driver.) 4673 */ 4674 is_active = 0; 4675 4676 /* 4677 * If we're scanning for a specific SSID, add it to the command. 4678 */ 4679 essid = (struct iwn_scan_essid *)(tx + 1); 4680 if (ic->ic_des_esslen != 0) { 4681 essid[0].id = IEEE80211_ELEMID_SSID; 4682 essid[0].len = ic->ic_des_esslen; 4683 memcpy(essid[0].data, ic->ic_des_essid, ic->ic_des_esslen); 4684 4685 is_active = 1; 4686 } 4687 /* 4688 * Build a probe request frame. Most of the following code is a 4689 * copy & paste of what is done in net80211. 4690 */ 4691 wh = (struct ieee80211_frame *)(essid + 20); 4692 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 4693 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 4694 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 4695 IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl)); 4696 IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr); 4697 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr); 4698 IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr); 4699 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */ 4700 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */ 4701 4702 frm = (uint8_t *)(wh + 1); 4703 frm = ieee80211_add_ssid(frm, NULL, 0); 4704 frm = ieee80211_add_rates(frm, rs); 4705 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 4706 frm = ieee80211_add_xrates(frm, rs); 4707 if (ic->ic_flags & IEEE80211_F_HTON) 4708 frm = ieee80211_add_htcaps(frm, ic); 4709 4710 /* Set length of probe request. */ 4711 tx->len = htole16(frm - (uint8_t *)wh); 4712 4713 /* 4714 * If active scanning is requested but a certain channel is 4715 * marked passive, we can do active scanning if we detect 4716 * transmissions. 4717 * 4718 * There is an issue with some firmware versions that triggers 4719 * a sysassert on a "good CRC threshold" of zero (== disabled), 4720 * on a radar channel even though this means that we should NOT 4721 * send probes. 4722 * 4723 * The "good CRC threshold" is the number of frames that we 4724 * need to receive during our dwell time on a channel before 4725 * sending out probes -- setting this to a huge value will 4726 * mean we never reach it, but at the same time work around 4727 * the aforementioned issue. Thus use IWN_GOOD_CRC_TH_NEVER 4728 * here instead of IWN_GOOD_CRC_TH_DISABLED. 4729 * 4730 * This was fixed in later versions along with some other 4731 * scan changes, and the threshold behaves as a flag in those 4732 * versions. 4733 */ 4734 4735 /* 4736 * If we're doing active scanning, set the crc_threshold 4737 * to a suitable value. This is different to active veruss 4738 * passive scanning depending upon the channel flags; the 4739 * firmware will obey that particular check for us. 4740 */ 4741 if (sc->tlv_feature_flags & IWN_UCODE_TLV_FLAGS_NEWSCAN) 4742 hdr->crc_threshold = is_active ? 4743 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_DISABLED; 4744 else 4745 hdr->crc_threshold = is_active ? 4746 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_NEVER; 4747 4748 chan = (struct iwn_scan_chan *)frm; 4749 for (c = &ic->ic_channels[1]; 4750 c <= &ic->ic_channels[IEEE80211_CHAN_MAX]; c++) { 4751 if ((c->ic_flags & flags) != flags) 4752 continue; 4753 4754 chan->chan = htole16(ieee80211_chan2ieee(ic, c)); 4755 DPRINTFN(2, ("adding channel %d\n", chan->chan)); 4756 chan->flags = 0; 4757 if (ic->ic_des_esslen != 0) 4758 chan->flags |= htole32(IWN_CHAN_NPBREQS(1)); 4759 4760 if (c->ic_flags & IEEE80211_CHAN_PASSIVE) 4761 chan->flags |= htole32(IWN_CHAN_PASSIVE); 4762 else 4763 chan->flags |= htole32(IWN_CHAN_ACTIVE); 4764 4765 /* 4766 * Calculate the active/passive dwell times. 4767 */ 4768 4769 dwell_active = iwn_get_active_dwell_time(sc, flags, is_active); 4770 dwell_passive = iwn_get_passive_dwell_time(sc, flags); 4771 4772 /* Make sure they're valid */ 4773 if (dwell_passive <= dwell_active) 4774 dwell_passive = dwell_active + 1; 4775 4776 chan->active = htole16(dwell_active); 4777 chan->passive = htole16(dwell_passive); 4778 4779 chan->dsp_gain = 0x6e; 4780 if (IEEE80211_IS_CHAN_5GHZ(c)) { 4781 chan->rf_gain = 0x3b; 4782 } else { 4783 chan->rf_gain = 0x28; 4784 } 4785 hdr->nchan++; 4786 chan++; 4787 } 4788 4789 buflen = (uint8_t *)chan - buf; 4790 hdr->len = htole16(buflen); 4791 4792 DPRINTF(("sending scan command nchan=%d\n", hdr->nchan)); 4793 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1); 4794 free(buf, M_DEVBUF, IWN_SCAN_MAXSZ); 4795 return error; 4796 } 4797 4798 int 4799 iwn_auth(struct iwn_softc *sc) 4800 { 4801 struct iwn_ops *ops = &sc->ops; 4802 struct ieee80211com *ic = &sc->sc_ic; 4803 struct ieee80211_node *ni = ic->ic_bss; 4804 int error, ridx; 4805 4806 /* Update adapter configuration. */ 4807 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4808 sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan); 4809 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 4810 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) { 4811 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 4812 if (ic->ic_flags & IEEE80211_F_USEPROT) 4813 sc->rxon.flags |= htole32(IWN_RXON_TGG_PROT); 4814 DPRINTF(("%s: 2ghz prot 0x%x\n", __func__, 4815 le32toh(sc->rxon.flags))); 4816 } 4817 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4818 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT); 4819 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4820 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE); 4821 switch (ic->ic_curmode) { 4822 case IEEE80211_MODE_11A: 4823 sc->rxon.cck_mask = 0; 4824 sc->rxon.ofdm_mask = 0x15; 4825 break; 4826 case IEEE80211_MODE_11B: 4827 sc->rxon.cck_mask = 0x03; 4828 sc->rxon.ofdm_mask = 0; 4829 break; 4830 default: /* Assume 802.11b/g/n. */ 4831 sc->rxon.cck_mask = 0x0f; 4832 sc->rxon.ofdm_mask = 0x15; 4833 } 4834 DPRINTF(("%s: rxon chan %d flags %x cck %x ofdm %x\n", __func__, 4835 sc->rxon.chan, le32toh(sc->rxon.flags), sc->rxon.cck_mask, 4836 sc->rxon.ofdm_mask)); 4837 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1); 4838 if (error != 0) { 4839 printf("%s: RXON command failed\n", sc->sc_dev.dv_xname); 4840 return error; 4841 } 4842 4843 /* Configuration has changed, set TX power accordingly. */ 4844 if ((error = ops->set_txpower(sc, 1)) != 0) { 4845 printf("%s: could not set TX power\n", sc->sc_dev.dv_xname); 4846 return error; 4847 } 4848 /* 4849 * Reconfiguring RXON clears the firmware nodes table so we must 4850 * add the broadcast node again. 4851 */ 4852 ridx = IEEE80211_IS_CHAN_5GHZ(ni->ni_chan) ? 4853 IWN_RIDX_OFDM6 : IWN_RIDX_CCK1; 4854 if ((error = iwn_add_broadcast_node(sc, 1, ridx)) != 0) { 4855 printf("%s: could not add broadcast node\n", 4856 sc->sc_dev.dv_xname); 4857 return error; 4858 } 4859 return 0; 4860 } 4861 4862 int 4863 iwn_run(struct iwn_softc *sc) 4864 { 4865 struct iwn_ops *ops = &sc->ops; 4866 struct ieee80211com *ic = &sc->sc_ic; 4867 struct ieee80211_node *ni = ic->ic_bss; 4868 struct iwn_node_info node; 4869 int error; 4870 4871 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 4872 /* Link LED blinks while monitoring. */ 4873 iwn_set_led(sc, IWN_LED_LINK, 50, 50); 4874 return 0; 4875 } 4876 if ((error = iwn_set_timing(sc, ni)) != 0) { 4877 printf("%s: could not set timing\n", sc->sc_dev.dv_xname); 4878 return error; 4879 } 4880 4881 /* Update adapter configuration. */ 4882 sc->rxon.associd = htole16(IEEE80211_AID(ni->ni_associd)); 4883 /* Short preamble and slot time are negotiated when associating. */ 4884 sc->rxon.flags &= ~htole32(IWN_RXON_SHPREAMBLE | IWN_RXON_SHSLOT); 4885 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4886 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT); 4887 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4888 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE); 4889 sc->rxon.filter |= htole32(IWN_FILTER_BSS); 4890 4891 /* HT is negotiated when associating. */ 4892 if (ni->ni_flags & IEEE80211_NODE_HT) { 4893 enum ieee80211_htprot htprot = 4894 (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK); 4895 DPRINTF(("%s: htprot = %d\n", __func__, htprot)); 4896 sc->rxon.flags |= htole32(IWN_RXON_HT_PROTMODE(htprot)); 4897 } else 4898 sc->rxon.flags &= ~htole32(IWN_RXON_HT_PROTMODE(3)); 4899 4900 if (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) { 4901 /* 11a or 11n 5GHz */ 4902 sc->rxon.cck_mask = 0; 4903 sc->rxon.ofdm_mask = 0x15; 4904 } else if (ni->ni_flags & IEEE80211_NODE_HT) { 4905 /* 11n 2GHz */ 4906 sc->rxon.cck_mask = 0x0f; 4907 sc->rxon.ofdm_mask = 0x15; 4908 } else { 4909 if (ni->ni_rates.rs_nrates == 4) { 4910 /* 11b */ 4911 sc->rxon.cck_mask = 0x03; 4912 sc->rxon.ofdm_mask = 0; 4913 } else { 4914 /* assume 11g */ 4915 sc->rxon.cck_mask = 0x0f; 4916 sc->rxon.ofdm_mask = 0x15; 4917 } 4918 } 4919 DPRINTF(("%s: rxon chan %d flags %x cck %x ofdm %x\n", __func__, 4920 sc->rxon.chan, le32toh(sc->rxon.flags), sc->rxon.cck_mask, 4921 sc->rxon.ofdm_mask)); 4922 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1); 4923 if (error != 0) { 4924 printf("%s: could not update configuration\n", 4925 sc->sc_dev.dv_xname); 4926 return error; 4927 } 4928 4929 /* Configuration has changed, set TX power accordingly. */ 4930 if ((error = ops->set_txpower(sc, 1)) != 0) { 4931 printf("%s: could not set TX power\n", sc->sc_dev.dv_xname); 4932 return error; 4933 } 4934 4935 /* Fake a join to initialize the TX rate. */ 4936 ((struct iwn_node *)ni)->id = IWN_ID_BSS; 4937 iwn_newassoc(ic, ni, 1); 4938 4939 /* Add BSS node. */ 4940 memset(&node, 0, sizeof node); 4941 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 4942 node.id = IWN_ID_BSS; 4943 if (ni->ni_flags & IEEE80211_NODE_HT) { 4944 node.htmask = (IWN_AMDPU_SIZE_FACTOR_MASK | 4945 IWN_AMDPU_DENSITY_MASK); 4946 node.htflags = htole32( 4947 IWN_AMDPU_SIZE_FACTOR( 4948 (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_LE)) | 4949 IWN_AMDPU_DENSITY( 4950 (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) >> 2)); 4951 } 4952 DPRINTF(("adding BSS node\n")); 4953 error = ops->add_node(sc, &node, 1); 4954 if (error != 0) { 4955 printf("%s: could not add BSS node\n", sc->sc_dev.dv_xname); 4956 return error; 4957 } 4958 DPRINTF(("setting link quality for node %d\n", node.id)); 4959 if ((error = iwn_set_link_quality(sc, ni)) != 0) { 4960 printf("%s: could not setup link quality for node %d\n", 4961 sc->sc_dev.dv_xname, node.id); 4962 return error; 4963 } 4964 4965 if ((error = iwn_init_sensitivity(sc)) != 0) { 4966 printf("%s: could not set sensitivity\n", 4967 sc->sc_dev.dv_xname); 4968 return error; 4969 } 4970 /* Start periodic calibration timer. */ 4971 sc->calib.state = IWN_CALIB_STATE_ASSOC; 4972 sc->calib_cnt = 0; 4973 timeout_add_msec(&sc->calib_to, 500); 4974 4975 /* Link LED always on while associated. */ 4976 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 4977 return 0; 4978 } 4979 4980 /* 4981 * We support CCMP hardware encryption/decryption of unicast frames only. 4982 * HW support for TKIP really sucks. We should let TKIP die anyway. 4983 */ 4984 int 4985 iwn_set_key(struct ieee80211com *ic, struct ieee80211_node *ni, 4986 struct ieee80211_key *k) 4987 { 4988 struct iwn_softc *sc = ic->ic_softc; 4989 struct iwn_ops *ops = &sc->ops; 4990 struct iwn_node *wn = (void *)ni; 4991 struct iwn_node_info node; 4992 uint16_t kflags; 4993 4994 if ((k->k_flags & IEEE80211_KEY_GROUP) || 4995 k->k_cipher != IEEE80211_CIPHER_CCMP) 4996 return ieee80211_set_key(ic, ni, k); 4997 4998 kflags = IWN_KFLAG_CCMP | IWN_KFLAG_MAP | IWN_KFLAG_KID(k->k_id); 4999 if (k->k_flags & IEEE80211_KEY_GROUP) 5000 kflags |= IWN_KFLAG_GROUP; 5001 5002 memset(&node, 0, sizeof node); 5003 node.id = (k->k_flags & IEEE80211_KEY_GROUP) ? 5004 sc->broadcast_id : wn->id; 5005 node.control = IWN_NODE_UPDATE; 5006 node.flags = IWN_FLAG_SET_KEY; 5007 node.kflags = htole16(kflags); 5008 node.kid = k->k_id; 5009 memcpy(node.key, k->k_key, k->k_len); 5010 DPRINTF(("set key id=%d for node %d\n", k->k_id, node.id)); 5011 return ops->add_node(sc, &node, 1); 5012 } 5013 5014 void 5015 iwn_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni, 5016 struct ieee80211_key *k) 5017 { 5018 struct iwn_softc *sc = ic->ic_softc; 5019 struct iwn_ops *ops = &sc->ops; 5020 struct iwn_node *wn = (void *)ni; 5021 struct iwn_node_info node; 5022 5023 if ((k->k_flags & IEEE80211_KEY_GROUP) || 5024 k->k_cipher != IEEE80211_CIPHER_CCMP) { 5025 /* See comment about other ciphers above. */ 5026 ieee80211_delete_key(ic, ni, k); 5027 return; 5028 } 5029 if (ic->ic_state != IEEE80211_S_RUN) 5030 return; /* Nothing to do. */ 5031 memset(&node, 0, sizeof node); 5032 node.id = (k->k_flags & IEEE80211_KEY_GROUP) ? 5033 sc->broadcast_id : wn->id; 5034 node.control = IWN_NODE_UPDATE; 5035 node.flags = IWN_FLAG_SET_KEY; 5036 node.kflags = htole16(IWN_KFLAG_INVALID); 5037 node.kid = 0xff; 5038 DPRINTF(("delete keys for node %d\n", node.id)); 5039 (void)ops->add_node(sc, &node, 1); 5040 } 5041 5042 /* 5043 * This function is called by upper layer when HT protection settings in 5044 * beacons have changed. 5045 */ 5046 void 5047 iwn_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni) 5048 { 5049 struct iwn_softc *sc = ic->ic_softc; 5050 struct iwn_ops *ops = &sc->ops; 5051 enum ieee80211_htprot htprot; 5052 struct iwn_rxon_assoc rxon_assoc; 5053 int s, error; 5054 5055 /* Update HT protection mode setting. */ 5056 htprot = (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK) >> 5057 IEEE80211_HTOP1_PROT_SHIFT; 5058 sc->rxon.flags &= ~htole32(IWN_RXON_HT_PROTMODE(3)); 5059 sc->rxon.flags |= htole32(IWN_RXON_HT_PROTMODE(htprot)); 5060 5061 /* Update RXON config. */ 5062 memset(&rxon_assoc, 0, sizeof(rxon_assoc)); 5063 rxon_assoc.flags = sc->rxon.flags; 5064 rxon_assoc.filter = sc->rxon.filter; 5065 rxon_assoc.ofdm_mask = sc->rxon.ofdm_mask; 5066 rxon_assoc.cck_mask = sc->rxon.cck_mask; 5067 rxon_assoc.ht_single_mask = sc->rxon.ht_single_mask; 5068 rxon_assoc.ht_dual_mask = sc->rxon.ht_dual_mask; 5069 rxon_assoc.ht_triple_mask = sc->rxon.ht_triple_mask; 5070 rxon_assoc.rxchain = sc->rxon.rxchain; 5071 rxon_assoc.acquisition = sc->rxon.acquisition; 5072 5073 s = splnet(); 5074 5075 error = iwn_cmd(sc, IWN_CMD_RXON_ASSOC, &rxon_assoc, 5076 sizeof(rxon_assoc), 1); 5077 if (error != 0) 5078 printf("%s: RXON_ASSOC command failed\n", sc->sc_dev.dv_xname); 5079 5080 DELAY(100); 5081 5082 /* All RXONs wipe the firmware's txpower table. Restore it. */ 5083 error = ops->set_txpower(sc, 1); 5084 if (error != 0) 5085 printf("%s: could not set TX power\n", sc->sc_dev.dv_xname); 5086 5087 DELAY(100); 5088 5089 /* Restore power saving level */ 5090 if (ic->ic_flags & IEEE80211_F_PMGTON) 5091 error = iwn_set_pslevel(sc, 0, 3, 1); 5092 else 5093 error = iwn_set_pslevel(sc, 0, 0, 1); 5094 if (error != 0) 5095 printf("%s: could not set PS level\n", sc->sc_dev.dv_xname); 5096 5097 splx(s); 5098 } 5099 5100 /* 5101 * This function is called by upper layer when an ADDBA request is received 5102 * from another STA and before the ADDBA response is sent. 5103 */ 5104 int 5105 iwn_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 5106 uint8_t tid) 5107 { 5108 struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid]; 5109 struct iwn_softc *sc = ic->ic_softc; 5110 struct iwn_ops *ops = &sc->ops; 5111 struct iwn_node *wn = (void *)ni; 5112 struct iwn_node_info node; 5113 5114 memset(&node, 0, sizeof node); 5115 node.id = wn->id; 5116 node.control = IWN_NODE_UPDATE; 5117 node.flags = IWN_FLAG_SET_ADDBA; 5118 node.addba_tid = tid; 5119 node.addba_ssn = htole16(ba->ba_winstart); 5120 DPRINTF(("ADDBA RA=%d TID=%d SSN=%d\n", wn->id, tid, 5121 ba->ba_winstart)); 5122 /* XXX async command, so firmware may still fail to add BA agreement */ 5123 return ops->add_node(sc, &node, 1); 5124 } 5125 5126 /* 5127 * This function is called by upper layer on teardown of an HT-immediate 5128 * Block Ack agreement (eg. uppon receipt of a DELBA frame). 5129 */ 5130 void 5131 iwn_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni, 5132 uint8_t tid) 5133 { 5134 struct iwn_softc *sc = ic->ic_softc; 5135 struct iwn_ops *ops = &sc->ops; 5136 struct iwn_node *wn = (void *)ni; 5137 struct iwn_node_info node; 5138 5139 memset(&node, 0, sizeof node); 5140 node.id = wn->id; 5141 node.control = IWN_NODE_UPDATE; 5142 node.flags = IWN_FLAG_SET_DELBA; 5143 node.delba_tid = tid; 5144 DPRINTF(("DELBA RA=%d TID=%d\n", wn->id, tid)); 5145 (void)ops->add_node(sc, &node, 1); 5146 } 5147 5148 /* 5149 * This function is called by upper layer when an ADDBA response is received 5150 * from another STA. 5151 */ 5152 int 5153 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 5154 uint8_t tid) 5155 { 5156 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid]; 5157 struct iwn_softc *sc = ic->ic_softc; 5158 struct iwn_ops *ops = &sc->ops; 5159 struct iwn_node *wn = (void *)ni; 5160 struct iwn_node_info node; 5161 int error; 5162 5163 /* Enable TX for the specified RA/TID. */ 5164 wn->disable_tid &= ~(1 << tid); 5165 memset(&node, 0, sizeof node); 5166 node.id = wn->id; 5167 node.control = IWN_NODE_UPDATE; 5168 node.flags = IWN_FLAG_SET_DISABLE_TID; 5169 node.disable_tid = htole16(wn->disable_tid); 5170 error = ops->add_node(sc, &node, 1); 5171 if (error != 0) 5172 return error; 5173 5174 if ((error = iwn_nic_lock(sc)) != 0) 5175 return error; 5176 ops->ampdu_tx_start(sc, ni, tid, ba->ba_winstart); 5177 iwn_nic_unlock(sc); 5178 return 0; 5179 } 5180 5181 void 5182 iwn_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni, 5183 uint8_t tid) 5184 { 5185 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid]; 5186 struct iwn_softc *sc = ic->ic_softc; 5187 struct iwn_ops *ops = &sc->ops; 5188 5189 if (iwn_nic_lock(sc) != 0) 5190 return; 5191 ops->ampdu_tx_stop(sc, tid, ba->ba_winstart); 5192 iwn_nic_unlock(sc); 5193 } 5194 5195 void 5196 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 5197 uint8_t tid, uint16_t ssn) 5198 { 5199 struct iwn_node *wn = (void *)ni; 5200 int qid = 7 + tid; 5201 5202 /* Stop TX scheduler while we're changing its configuration. */ 5203 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5204 IWN4965_TXQ_STATUS_CHGACT); 5205 5206 /* Assign RA/TID translation to the queue. */ 5207 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid), 5208 wn->id << 4 | tid); 5209 5210 /* Enable chain-building mode for the queue. */ 5211 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid); 5212 5213 /* Set starting sequence number from the ADDBA request. */ 5214 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5215 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 5216 5217 /* Set scheduler window size. */ 5218 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid), 5219 IWN_SCHED_WINSZ); 5220 /* Set scheduler frame limit. */ 5221 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 5222 IWN_SCHED_LIMIT << 16); 5223 5224 /* Enable interrupts for the queue. */ 5225 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 5226 5227 /* Mark the queue as active. */ 5228 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5229 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA | 5230 iwn_tid2fifo[tid] << 1); 5231 } 5232 5233 void 5234 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn) 5235 { 5236 int qid = 7 + tid; 5237 5238 /* Stop TX scheduler while we're changing its configuration. */ 5239 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5240 IWN4965_TXQ_STATUS_CHGACT); 5241 5242 /* Set starting sequence number from the ADDBA request. */ 5243 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5244 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 5245 5246 /* Disable interrupts for the queue. */ 5247 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 5248 5249 /* Mark the queue as inactive. */ 5250 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5251 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1); 5252 } 5253 5254 void 5255 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 5256 uint8_t tid, uint16_t ssn) 5257 { 5258 struct iwn_node *wn = (void *)ni; 5259 int qid = 10 + tid; 5260 5261 /* Stop TX scheduler while we're changing its configuration. */ 5262 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5263 IWN5000_TXQ_STATUS_CHGACT); 5264 5265 /* Assign RA/TID translation to the queue. */ 5266 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid), 5267 wn->id << 4 | tid); 5268 5269 /* Enable chain-building mode for the queue. */ 5270 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid); 5271 5272 /* Enable aggregation for the queue. */ 5273 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 5274 5275 /* Set starting sequence number from the ADDBA request. */ 5276 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5277 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 5278 5279 /* Set scheduler window size and frame limit. */ 5280 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 5281 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 5282 5283 /* Enable interrupts for the queue. */ 5284 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 5285 5286 /* Mark the queue as active. */ 5287 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5288 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]); 5289 } 5290 5291 void 5292 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn) 5293 { 5294 int qid = 10 + tid; 5295 5296 /* Stop TX scheduler while we're changing its configuration. */ 5297 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5298 IWN5000_TXQ_STATUS_CHGACT); 5299 5300 /* Disable aggregation for the queue. */ 5301 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 5302 5303 /* Set starting sequence number from the ADDBA request. */ 5304 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5305 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 5306 5307 /* Disable interrupts for the queue. */ 5308 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 5309 5310 /* Mark the queue as inactive. */ 5311 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5312 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]); 5313 } 5314 5315 /* 5316 * Query calibration tables from the initialization firmware. We do this 5317 * only once at first boot. Called from a process context. 5318 */ 5319 int 5320 iwn5000_query_calibration(struct iwn_softc *sc) 5321 { 5322 struct iwn5000_calib_config cmd; 5323 int error; 5324 5325 memset(&cmd, 0, sizeof cmd); 5326 cmd.ucode.once.enable = 0xffffffff; 5327 cmd.ucode.once.start = 0xffffffff; 5328 cmd.ucode.once.send = 0xffffffff; 5329 cmd.ucode.flags = 0xffffffff; 5330 DPRINTF(("sending calibration query\n")); 5331 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0); 5332 if (error != 0) 5333 return error; 5334 5335 /* Wait at most two seconds for calibration to complete. */ 5336 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) 5337 error = tsleep(sc, PCATCH, "iwncal", 2 * hz); 5338 return error; 5339 } 5340 5341 /* 5342 * Send calibration results to the runtime firmware. These results were 5343 * obtained on first boot from the initialization firmware. 5344 */ 5345 int 5346 iwn5000_send_calibration(struct iwn_softc *sc) 5347 { 5348 int idx, error; 5349 5350 for (idx = 0; idx < 5; idx++) { 5351 if (sc->calibcmd[idx].buf == NULL) 5352 continue; /* No results available. */ 5353 DPRINTF(("send calibration result idx=%d len=%d\n", 5354 idx, sc->calibcmd[idx].len)); 5355 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf, 5356 sc->calibcmd[idx].len, 0); 5357 if (error != 0) { 5358 printf("%s: could not send calibration result\n", 5359 sc->sc_dev.dv_xname); 5360 return error; 5361 } 5362 } 5363 return 0; 5364 } 5365 5366 int 5367 iwn5000_send_wimax_coex(struct iwn_softc *sc) 5368 { 5369 struct iwn5000_wimax_coex wimax; 5370 5371 #ifdef notyet 5372 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 5373 /* Enable WiMAX coexistence for combo adapters. */ 5374 wimax.flags = 5375 IWN_WIMAX_COEX_ASSOC_WA_UNMASK | 5376 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK | 5377 IWN_WIMAX_COEX_STA_TABLE_VALID | 5378 IWN_WIMAX_COEX_ENABLE; 5379 memcpy(wimax.events, iwn6050_wimax_events, 5380 sizeof iwn6050_wimax_events); 5381 } else 5382 #endif 5383 { 5384 /* Disable WiMAX coexistence. */ 5385 wimax.flags = 0; 5386 memset(wimax.events, 0, sizeof wimax.events); 5387 } 5388 DPRINTF(("Configuring WiMAX coexistence\n")); 5389 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0); 5390 } 5391 5392 int 5393 iwn5000_crystal_calib(struct iwn_softc *sc) 5394 { 5395 struct iwn5000_phy_calib_crystal cmd; 5396 5397 memset(&cmd, 0, sizeof cmd); 5398 cmd.code = IWN5000_PHY_CALIB_CRYSTAL; 5399 cmd.ngroups = 1; 5400 cmd.isvalid = 1; 5401 cmd.cap_pin[0] = letoh32(sc->eeprom_crystal) & 0xff; 5402 cmd.cap_pin[1] = (letoh32(sc->eeprom_crystal) >> 16) & 0xff; 5403 DPRINTF(("sending crystal calibration %d, %d\n", 5404 cmd.cap_pin[0], cmd.cap_pin[1])); 5405 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 5406 } 5407 5408 int 5409 iwn6000_temp_offset_calib(struct iwn_softc *sc) 5410 { 5411 struct iwn6000_phy_calib_temp_offset cmd; 5412 5413 memset(&cmd, 0, sizeof cmd); 5414 cmd.code = IWN6000_PHY_CALIB_TEMP_OFFSET; 5415 cmd.ngroups = 1; 5416 cmd.isvalid = 1; 5417 if (sc->eeprom_temp != 0) 5418 cmd.offset = htole16(sc->eeprom_temp); 5419 else 5420 cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET); 5421 DPRINTF(("setting radio sensor offset to %d\n", letoh16(cmd.offset))); 5422 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 5423 } 5424 5425 int 5426 iwn2000_temp_offset_calib(struct iwn_softc *sc) 5427 { 5428 struct iwn2000_phy_calib_temp_offset cmd; 5429 5430 memset(&cmd, 0, sizeof cmd); 5431 cmd.code = IWN2000_PHY_CALIB_TEMP_OFFSET; 5432 cmd.ngroups = 1; 5433 cmd.isvalid = 1; 5434 if (sc->eeprom_rawtemp != 0) { 5435 cmd.offset_low = htole16(sc->eeprom_rawtemp); 5436 cmd.offset_high = htole16(sc->eeprom_temp); 5437 } else { 5438 cmd.offset_low = htole16(IWN_DEFAULT_TEMP_OFFSET); 5439 cmd.offset_high = htole16(IWN_DEFAULT_TEMP_OFFSET); 5440 } 5441 cmd.burnt_voltage_ref = htole16(sc->eeprom_voltage); 5442 DPRINTF(("setting radio sensor offset to %d:%d, voltage to %d\n", 5443 letoh16(cmd.offset_low), letoh16(cmd.offset_high), 5444 letoh16(cmd.burnt_voltage_ref))); 5445 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 5446 } 5447 5448 /* 5449 * This function is called after the runtime firmware notifies us of its 5450 * readiness (called in a process context). 5451 */ 5452 int 5453 iwn4965_post_alive(struct iwn_softc *sc) 5454 { 5455 int error, qid; 5456 5457 if ((error = iwn_nic_lock(sc)) != 0) 5458 return error; 5459 5460 /* Clear TX scheduler state in SRAM. */ 5461 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 5462 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0, 5463 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t)); 5464 5465 /* Set physical address of TX scheduler rings (1KB aligned). */ 5466 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 5467 5468 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 5469 5470 /* Disable chain mode for all our 16 queues. */ 5471 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0); 5472 5473 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) { 5474 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0); 5475 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 5476 5477 /* Set scheduler window size. */ 5478 iwn_mem_write(sc, sc->sched_base + 5479 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ); 5480 /* Set scheduler frame limit. */ 5481 iwn_mem_write(sc, sc->sched_base + 5482 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 5483 IWN_SCHED_LIMIT << 16); 5484 } 5485 5486 /* Enable interrupts for all our 16 queues. */ 5487 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff); 5488 /* Identify TX FIFO rings (0-7). */ 5489 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff); 5490 5491 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 5492 for (qid = 0; qid < 7; qid++) { 5493 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 }; 5494 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5495 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1); 5496 } 5497 iwn_nic_unlock(sc); 5498 return 0; 5499 } 5500 5501 /* 5502 * This function is called after the initialization or runtime firmware 5503 * notifies us of its readiness (called in a process context). 5504 */ 5505 int 5506 iwn5000_post_alive(struct iwn_softc *sc) 5507 { 5508 int error, qid; 5509 5510 /* Switch to using ICT interrupt mode. */ 5511 iwn5000_ict_reset(sc); 5512 5513 if ((error = iwn_nic_lock(sc)) != 0) 5514 return error; 5515 5516 /* Clear TX scheduler state in SRAM. */ 5517 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 5518 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0, 5519 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t)); 5520 5521 /* Set physical address of TX scheduler rings (1KB aligned). */ 5522 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 5523 5524 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 5525 5526 /* Enable chain mode for all queues, except command queue. */ 5527 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef); 5528 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0); 5529 5530 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) { 5531 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0); 5532 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 5533 5534 iwn_mem_write(sc, sc->sched_base + 5535 IWN5000_SCHED_QUEUE_OFFSET(qid), 0); 5536 /* Set scheduler window size and frame limit. */ 5537 iwn_mem_write(sc, sc->sched_base + 5538 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 5539 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 5540 } 5541 5542 /* Enable interrupts for all our 20 queues. */ 5543 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff); 5544 /* Identify TX FIFO rings (0-7). */ 5545 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff); 5546 5547 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 5548 for (qid = 0; qid < 7; qid++) { 5549 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 }; 5550 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5551 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 5552 } 5553 iwn_nic_unlock(sc); 5554 5555 /* Configure WiMAX coexistence for combo adapters. */ 5556 error = iwn5000_send_wimax_coex(sc); 5557 if (error != 0) { 5558 printf("%s: could not configure WiMAX coexistence\n", 5559 sc->sc_dev.dv_xname); 5560 return error; 5561 } 5562 if (sc->hw_type != IWN_HW_REV_TYPE_5150) { 5563 /* Perform crystal calibration. */ 5564 error = iwn5000_crystal_calib(sc); 5565 if (error != 0) { 5566 printf("%s: crystal calibration failed\n", 5567 sc->sc_dev.dv_xname); 5568 return error; 5569 } 5570 } 5571 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) { 5572 /* Query calibration from the initialization firmware. */ 5573 if ((error = iwn5000_query_calibration(sc)) != 0) { 5574 printf("%s: could not query calibration\n", 5575 sc->sc_dev.dv_xname); 5576 return error; 5577 } 5578 /* 5579 * We have the calibration results now, reboot with the 5580 * runtime firmware (call ourselves recursively!) 5581 */ 5582 iwn_hw_stop(sc); 5583 error = iwn_hw_init(sc); 5584 } else { 5585 /* Send calibration results to runtime firmware. */ 5586 error = iwn5000_send_calibration(sc); 5587 } 5588 return error; 5589 } 5590 5591 /* 5592 * The firmware boot code is small and is intended to be copied directly into 5593 * the NIC internal memory (no DMA transfer). 5594 */ 5595 int 5596 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size) 5597 { 5598 int error, ntries; 5599 5600 size /= sizeof (uint32_t); 5601 5602 if ((error = iwn_nic_lock(sc)) != 0) 5603 return error; 5604 5605 /* Copy microcode image into NIC memory. */ 5606 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE, 5607 (const uint32_t *)ucode, size); 5608 5609 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0); 5610 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE); 5611 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size); 5612 5613 /* Start boot load now. */ 5614 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START); 5615 5616 /* Wait for transfer to complete. */ 5617 for (ntries = 0; ntries < 1000; ntries++) { 5618 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) & 5619 IWN_BSM_WR_CTRL_START)) 5620 break; 5621 DELAY(10); 5622 } 5623 if (ntries == 1000) { 5624 printf("%s: could not load boot firmware\n", 5625 sc->sc_dev.dv_xname); 5626 iwn_nic_unlock(sc); 5627 return ETIMEDOUT; 5628 } 5629 5630 /* Enable boot after power up. */ 5631 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN); 5632 5633 iwn_nic_unlock(sc); 5634 return 0; 5635 } 5636 5637 int 5638 iwn4965_load_firmware(struct iwn_softc *sc) 5639 { 5640 struct iwn_fw_info *fw = &sc->fw; 5641 struct iwn_dma_info *dma = &sc->fw_dma; 5642 int error; 5643 5644 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 5645 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 5646 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, fw->init.datasz, 5647 BUS_DMASYNC_PREWRITE); 5648 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 5649 fw->init.text, fw->init.textsz); 5650 bus_dmamap_sync(sc->sc_dmat, dma->map, IWN4965_FW_DATA_MAXSZ, 5651 fw->init.textsz, BUS_DMASYNC_PREWRITE); 5652 5653 /* Tell adapter where to find initialization sections. */ 5654 if ((error = iwn_nic_lock(sc)) != 0) 5655 return error; 5656 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 5657 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz); 5658 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 5659 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 5660 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 5661 iwn_nic_unlock(sc); 5662 5663 /* Load firmware boot code. */ 5664 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 5665 if (error != 0) { 5666 printf("%s: could not load boot firmware\n", 5667 sc->sc_dev.dv_xname); 5668 return error; 5669 } 5670 /* Now press "execute". */ 5671 IWN_WRITE(sc, IWN_RESET, 0); 5672 5673 /* Wait at most one second for first alive notification. */ 5674 if ((error = tsleep(sc, PCATCH, "iwninit", hz)) != 0) { 5675 printf("%s: timeout waiting for adapter to initialize\n", 5676 sc->sc_dev.dv_xname); 5677 return error; 5678 } 5679 5680 /* Retrieve current temperature for initial TX power calibration. */ 5681 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz; 5682 sc->temp = iwn4965_get_temperature(sc); 5683 5684 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 5685 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 5686 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, fw->main.datasz, 5687 BUS_DMASYNC_PREWRITE); 5688 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 5689 fw->main.text, fw->main.textsz); 5690 bus_dmamap_sync(sc->sc_dmat, dma->map, IWN4965_FW_DATA_MAXSZ, 5691 fw->main.textsz, BUS_DMASYNC_PREWRITE); 5692 5693 /* Tell adapter where to find runtime sections. */ 5694 if ((error = iwn_nic_lock(sc)) != 0) 5695 return error; 5696 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 5697 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz); 5698 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 5699 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 5700 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, 5701 IWN_FW_UPDATED | fw->main.textsz); 5702 iwn_nic_unlock(sc); 5703 5704 return 0; 5705 } 5706 5707 int 5708 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst, 5709 const uint8_t *section, int size) 5710 { 5711 struct iwn_dma_info *dma = &sc->fw_dma; 5712 int error; 5713 5714 /* Copy firmware section into pre-allocated DMA-safe memory. */ 5715 memcpy(dma->vaddr, section, size); 5716 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, size, BUS_DMASYNC_PREWRITE); 5717 5718 if ((error = iwn_nic_lock(sc)) != 0) 5719 return error; 5720 5721 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 5722 IWN_FH_TX_CONFIG_DMA_PAUSE); 5723 5724 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst); 5725 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL), 5726 IWN_LOADDR(dma->paddr)); 5727 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL), 5728 IWN_HIADDR(dma->paddr) << 28 | size); 5729 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL), 5730 IWN_FH_TXBUF_STATUS_TBNUM(1) | 5731 IWN_FH_TXBUF_STATUS_TBIDX(1) | 5732 IWN_FH_TXBUF_STATUS_TFBD_VALID); 5733 5734 /* Kick Flow Handler to start DMA transfer. */ 5735 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 5736 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD); 5737 5738 iwn_nic_unlock(sc); 5739 5740 /* Wait at most five seconds for FH DMA transfer to complete. */ 5741 return tsleep(sc, PCATCH, "iwninit", 5 * hz); 5742 } 5743 5744 int 5745 iwn5000_load_firmware(struct iwn_softc *sc) 5746 { 5747 struct iwn_fw_part *fw; 5748 int error; 5749 5750 /* Load the initialization firmware on first boot only. */ 5751 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ? 5752 &sc->fw.main : &sc->fw.init; 5753 5754 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE, 5755 fw->text, fw->textsz); 5756 if (error != 0) { 5757 printf("%s: could not load firmware %s section\n", 5758 sc->sc_dev.dv_xname, ".text"); 5759 return error; 5760 } 5761 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE, 5762 fw->data, fw->datasz); 5763 if (error != 0) { 5764 printf("%s: could not load firmware %s section\n", 5765 sc->sc_dev.dv_xname, ".data"); 5766 return error; 5767 } 5768 5769 /* Now press "execute". */ 5770 IWN_WRITE(sc, IWN_RESET, 0); 5771 return 0; 5772 } 5773 5774 /* 5775 * Extract text and data sections from a legacy firmware image. 5776 */ 5777 int 5778 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw) 5779 { 5780 const uint32_t *ptr; 5781 size_t hdrlen = 24; 5782 uint32_t rev; 5783 5784 ptr = (const uint32_t *)fw->data; 5785 rev = letoh32(*ptr++); 5786 5787 /* Check firmware API version. */ 5788 if (IWN_FW_API(rev) <= 1) { 5789 printf("%s: bad firmware, need API version >=2\n", 5790 sc->sc_dev.dv_xname); 5791 return EINVAL; 5792 } 5793 if (IWN_FW_API(rev) >= 3) { 5794 /* Skip build number (version 2 header). */ 5795 hdrlen += 4; 5796 ptr++; 5797 } 5798 if (fw->size < hdrlen) { 5799 printf("%s: firmware too short: %zu bytes\n", 5800 sc->sc_dev.dv_xname, fw->size); 5801 return EINVAL; 5802 } 5803 fw->main.textsz = letoh32(*ptr++); 5804 fw->main.datasz = letoh32(*ptr++); 5805 fw->init.textsz = letoh32(*ptr++); 5806 fw->init.datasz = letoh32(*ptr++); 5807 fw->boot.textsz = letoh32(*ptr++); 5808 5809 /* Check that all firmware sections fit. */ 5810 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz + 5811 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 5812 printf("%s: firmware too short: %zu bytes\n", 5813 sc->sc_dev.dv_xname, fw->size); 5814 return EINVAL; 5815 } 5816 5817 /* Get pointers to firmware sections. */ 5818 fw->main.text = (const uint8_t *)ptr; 5819 fw->main.data = fw->main.text + fw->main.textsz; 5820 fw->init.text = fw->main.data + fw->main.datasz; 5821 fw->init.data = fw->init.text + fw->init.textsz; 5822 fw->boot.text = fw->init.data + fw->init.datasz; 5823 return 0; 5824 } 5825 5826 /* 5827 * Extract text and data sections from a TLV firmware image. 5828 */ 5829 int 5830 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw, 5831 uint16_t alt) 5832 { 5833 const struct iwn_fw_tlv_hdr *hdr; 5834 const struct iwn_fw_tlv *tlv; 5835 const uint8_t *ptr, *end; 5836 uint64_t altmask; 5837 uint32_t len; 5838 5839 if (fw->size < sizeof (*hdr)) { 5840 printf("%s: firmware too short: %zu bytes\n", 5841 sc->sc_dev.dv_xname, fw->size); 5842 return EINVAL; 5843 } 5844 hdr = (const struct iwn_fw_tlv_hdr *)fw->data; 5845 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) { 5846 printf("%s: bad firmware signature 0x%08x\n", 5847 sc->sc_dev.dv_xname, letoh32(hdr->signature)); 5848 return EINVAL; 5849 } 5850 DPRINTF(("FW: \"%.64s\", build 0x%x\n", hdr->descr, 5851 letoh32(hdr->build))); 5852 5853 /* 5854 * Select the closest supported alternative that is less than 5855 * or equal to the specified one. 5856 */ 5857 altmask = letoh64(hdr->altmask); 5858 while (alt > 0 && !(altmask & (1ULL << alt))) 5859 alt--; /* Downgrade. */ 5860 DPRINTF(("using alternative %d\n", alt)); 5861 5862 ptr = (const uint8_t *)(hdr + 1); 5863 end = (const uint8_t *)(fw->data + fw->size); 5864 5865 /* Parse type-length-value fields. */ 5866 while (ptr + sizeof (*tlv) <= end) { 5867 tlv = (const struct iwn_fw_tlv *)ptr; 5868 len = letoh32(tlv->len); 5869 5870 ptr += sizeof (*tlv); 5871 if (ptr + len > end) { 5872 printf("%s: firmware too short: %zu bytes\n", 5873 sc->sc_dev.dv_xname, fw->size); 5874 return EINVAL; 5875 } 5876 /* Skip other alternatives. */ 5877 if (tlv->alt != 0 && tlv->alt != htole16(alt)) 5878 goto next; 5879 5880 switch (letoh16(tlv->type)) { 5881 case IWN_FW_TLV_MAIN_TEXT: 5882 fw->main.text = ptr; 5883 fw->main.textsz = len; 5884 break; 5885 case IWN_FW_TLV_MAIN_DATA: 5886 fw->main.data = ptr; 5887 fw->main.datasz = len; 5888 break; 5889 case IWN_FW_TLV_INIT_TEXT: 5890 fw->init.text = ptr; 5891 fw->init.textsz = len; 5892 break; 5893 case IWN_FW_TLV_INIT_DATA: 5894 fw->init.data = ptr; 5895 fw->init.datasz = len; 5896 break; 5897 case IWN_FW_TLV_BOOT_TEXT: 5898 fw->boot.text = ptr; 5899 fw->boot.textsz = len; 5900 break; 5901 case IWN_FW_TLV_ENH_SENS: 5902 if (len != 0) { 5903 printf("%s: TLV type %d has invalid size %u\n", 5904 sc->sc_dev.dv_xname, letoh16(tlv->type), 5905 len); 5906 goto next; 5907 } 5908 sc->sc_flags |= IWN_FLAG_ENH_SENS; 5909 break; 5910 case IWN_FW_TLV_PHY_CALIB: 5911 if (len != sizeof(uint32_t)) { 5912 printf("%s: TLV type %d has invalid size %u\n", 5913 sc->sc_dev.dv_xname, letoh16(tlv->type), 5914 len); 5915 goto next; 5916 } 5917 if (letoh32(*ptr) <= IWN5000_PHY_CALIB_MAX) { 5918 sc->reset_noise_gain = letoh32(*ptr); 5919 sc->noise_gain = letoh32(*ptr) + 1; 5920 } 5921 break; 5922 case IWN_FW_TLV_FLAGS: 5923 if (len < sizeof(uint32_t)) 5924 break; 5925 if (len % sizeof(uint32_t)) 5926 break; 5927 sc->tlv_feature_flags = letoh32(*ptr); 5928 DPRINTF(("feature: 0x%08x\n", sc->tlv_feature_flags)); 5929 break; 5930 default: 5931 DPRINTF(("TLV type %d not handled\n", 5932 letoh16(tlv->type))); 5933 break; 5934 } 5935 next: /* TLV fields are 32-bit aligned. */ 5936 ptr += (len + 3) & ~3; 5937 } 5938 return 0; 5939 } 5940 5941 int 5942 iwn_read_firmware(struct iwn_softc *sc) 5943 { 5944 struct iwn_fw_info *fw = &sc->fw; 5945 int error; 5946 5947 /* 5948 * Some PHY calibration commands are firmware-dependent; these 5949 * are the default values that will be overridden if 5950 * necessary. 5951 */ 5952 sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN; 5953 sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN; 5954 5955 memset(fw, 0, sizeof (*fw)); 5956 5957 /* Read firmware image from filesystem. */ 5958 if ((error = loadfirmware(sc->fwname, &fw->data, &fw->size)) != 0) { 5959 printf("%s: could not read firmware %s (error %d)\n", 5960 sc->sc_dev.dv_xname, sc->fwname, error); 5961 return error; 5962 } 5963 if (fw->size < sizeof (uint32_t)) { 5964 printf("%s: firmware too short: %zu bytes\n", 5965 sc->sc_dev.dv_xname, fw->size); 5966 free(fw->data, M_DEVBUF, fw->size); 5967 return EINVAL; 5968 } 5969 5970 /* Retrieve text and data sections. */ 5971 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */ 5972 error = iwn_read_firmware_leg(sc, fw); 5973 else 5974 error = iwn_read_firmware_tlv(sc, fw, 1); 5975 if (error != 0) { 5976 printf("%s: could not read firmware sections\n", 5977 sc->sc_dev.dv_xname); 5978 free(fw->data, M_DEVBUF, fw->size); 5979 return error; 5980 } 5981 5982 /* Make sure text and data sections fit in hardware memory. */ 5983 if (fw->main.textsz > sc->fw_text_maxsz || 5984 fw->main.datasz > sc->fw_data_maxsz || 5985 fw->init.textsz > sc->fw_text_maxsz || 5986 fw->init.datasz > sc->fw_data_maxsz || 5987 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ || 5988 (fw->boot.textsz & 3) != 0) { 5989 printf("%s: firmware sections too large\n", 5990 sc->sc_dev.dv_xname); 5991 free(fw->data, M_DEVBUF, fw->size); 5992 return EINVAL; 5993 } 5994 5995 /* We can proceed with loading the firmware. */ 5996 return 0; 5997 } 5998 5999 int 6000 iwn_clock_wait(struct iwn_softc *sc) 6001 { 6002 int ntries; 6003 6004 /* Set "initialization complete" bit. */ 6005 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 6006 6007 /* Wait for clock stabilization. */ 6008 for (ntries = 0; ntries < 2500; ntries++) { 6009 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY) 6010 return 0; 6011 DELAY(10); 6012 } 6013 printf("%s: timeout waiting for clock stabilization\n", 6014 sc->sc_dev.dv_xname); 6015 return ETIMEDOUT; 6016 } 6017 6018 int 6019 iwn_apm_init(struct iwn_softc *sc) 6020 { 6021 pcireg_t reg; 6022 int error; 6023 6024 /* Disable L0s exit timer (NMI bug workaround). */ 6025 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER); 6026 /* Don't wait for ICH L0s (ICH bug workaround). */ 6027 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX); 6028 6029 /* Set FH wait threshold to max (HW bug under stress workaround). */ 6030 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000); 6031 6032 /* Enable HAP INTA to move adapter from L1a to L0s. */ 6033 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A); 6034 6035 /* Retrieve PCIe Active State Power Management (ASPM). */ 6036 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 6037 sc->sc_cap_off + PCI_PCIE_LCSR); 6038 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 6039 if (reg & PCI_PCIE_LCSR_ASPM_L1) /* L1 Entry enabled. */ 6040 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 6041 else 6042 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 6043 6044 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 6045 sc->hw_type <= IWN_HW_REV_TYPE_1000) 6046 IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT); 6047 6048 /* Wait for clock stabilization before accessing prph. */ 6049 if ((error = iwn_clock_wait(sc)) != 0) 6050 return error; 6051 6052 if ((error = iwn_nic_lock(sc)) != 0) 6053 return error; 6054 if (sc->hw_type == IWN_HW_REV_TYPE_4965) { 6055 /* Enable DMA and BSM (Bootstrap State Machine). */ 6056 iwn_prph_write(sc, IWN_APMG_CLK_EN, 6057 IWN_APMG_CLK_CTRL_DMA_CLK_RQT | 6058 IWN_APMG_CLK_CTRL_BSM_CLK_RQT); 6059 } else { 6060 /* Enable DMA. */ 6061 iwn_prph_write(sc, IWN_APMG_CLK_EN, 6062 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 6063 } 6064 DELAY(20); 6065 /* Disable L1-Active. */ 6066 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS); 6067 iwn_nic_unlock(sc); 6068 6069 return 0; 6070 } 6071 6072 void 6073 iwn_apm_stop_master(struct iwn_softc *sc) 6074 { 6075 int ntries; 6076 6077 /* Stop busmaster DMA activity. */ 6078 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER); 6079 for (ntries = 0; ntries < 100; ntries++) { 6080 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED) 6081 return; 6082 DELAY(10); 6083 } 6084 printf("%s: timeout waiting for master\n", sc->sc_dev.dv_xname); 6085 } 6086 6087 void 6088 iwn_apm_stop(struct iwn_softc *sc) 6089 { 6090 iwn_apm_stop_master(sc); 6091 6092 /* Reset the entire device. */ 6093 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW); 6094 DELAY(10); 6095 /* Clear "initialization complete" bit. */ 6096 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 6097 } 6098 6099 int 6100 iwn4965_nic_config(struct iwn_softc *sc) 6101 { 6102 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) { 6103 /* 6104 * I don't believe this to be correct but this is what the 6105 * vendor driver is doing. Probably the bits should not be 6106 * shifted in IWN_RFCFG_*. 6107 */ 6108 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6109 IWN_RFCFG_TYPE(sc->rfcfg) | 6110 IWN_RFCFG_STEP(sc->rfcfg) | 6111 IWN_RFCFG_DASH(sc->rfcfg)); 6112 } 6113 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6114 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 6115 return 0; 6116 } 6117 6118 int 6119 iwn5000_nic_config(struct iwn_softc *sc) 6120 { 6121 uint32_t tmp; 6122 int error; 6123 6124 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) { 6125 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6126 IWN_RFCFG_TYPE(sc->rfcfg) | 6127 IWN_RFCFG_STEP(sc->rfcfg) | 6128 IWN_RFCFG_DASH(sc->rfcfg)); 6129 } 6130 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6131 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 6132 6133 if ((error = iwn_nic_lock(sc)) != 0) 6134 return error; 6135 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS); 6136 6137 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 6138 /* 6139 * Select first Switching Voltage Regulator (1.32V) to 6140 * solve a stability issue related to noisy DC2DC line 6141 * in the silicon of 1000 Series. 6142 */ 6143 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR); 6144 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK; 6145 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32; 6146 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp); 6147 } 6148 iwn_nic_unlock(sc); 6149 6150 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) { 6151 /* Use internal power amplifier only. */ 6152 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA); 6153 } 6154 if ((sc->hw_type == IWN_HW_REV_TYPE_6050 || 6155 sc->hw_type == IWN_HW_REV_TYPE_6005) && sc->calib_ver >= 6) { 6156 /* Indicate that ROM calibration version is >=6. */ 6157 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6); 6158 } 6159 if (sc->hw_type == IWN_HW_REV_TYPE_6005) 6160 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_6050_1X2); 6161 if (sc->hw_type == IWN_HW_REV_TYPE_2030 || 6162 sc->hw_type == IWN_HW_REV_TYPE_2000 || 6163 sc->hw_type == IWN_HW_REV_TYPE_135 || 6164 sc->hw_type == IWN_HW_REV_TYPE_105) 6165 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_IQ_INVERT); 6166 return 0; 6167 } 6168 6169 /* 6170 * Take NIC ownership over Intel Active Management Technology (AMT). 6171 */ 6172 int 6173 iwn_hw_prepare(struct iwn_softc *sc) 6174 { 6175 int ntries; 6176 6177 /* Check if hardware is ready. */ 6178 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 6179 for (ntries = 0; ntries < 5; ntries++) { 6180 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 6181 IWN_HW_IF_CONFIG_NIC_READY) 6182 return 0; 6183 DELAY(10); 6184 } 6185 6186 /* Hardware not ready, force into ready state. */ 6187 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE); 6188 for (ntries = 0; ntries < 15000; ntries++) { 6189 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) & 6190 IWN_HW_IF_CONFIG_PREPARE_DONE)) 6191 break; 6192 DELAY(10); 6193 } 6194 if (ntries == 15000) 6195 return ETIMEDOUT; 6196 6197 /* Hardware should be ready now. */ 6198 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 6199 for (ntries = 0; ntries < 5; ntries++) { 6200 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 6201 IWN_HW_IF_CONFIG_NIC_READY) 6202 return 0; 6203 DELAY(10); 6204 } 6205 return ETIMEDOUT; 6206 } 6207 6208 int 6209 iwn_hw_init(struct iwn_softc *sc) 6210 { 6211 struct iwn_ops *ops = &sc->ops; 6212 int error, chnl, qid; 6213 6214 /* Clear pending interrupts. */ 6215 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6216 6217 if ((error = iwn_apm_init(sc)) != 0) { 6218 printf("%s: could not power on adapter\n", 6219 sc->sc_dev.dv_xname); 6220 return error; 6221 } 6222 6223 /* Select VMAIN power source. */ 6224 if ((error = iwn_nic_lock(sc)) != 0) 6225 return error; 6226 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK); 6227 iwn_nic_unlock(sc); 6228 6229 /* Perform adapter-specific initialization. */ 6230 if ((error = ops->nic_config(sc)) != 0) 6231 return error; 6232 6233 /* Initialize RX ring. */ 6234 if ((error = iwn_nic_lock(sc)) != 0) 6235 return error; 6236 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 6237 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0); 6238 /* Set physical address of RX ring (256-byte aligned). */ 6239 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8); 6240 /* Set physical address of RX status (16-byte aligned). */ 6241 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4); 6242 /* Enable RX. */ 6243 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 6244 IWN_FH_RX_CONFIG_ENA | 6245 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */ 6246 IWN_FH_RX_CONFIG_IRQ_DST_HOST | 6247 IWN_FH_RX_CONFIG_SINGLE_FRAME | 6248 IWN_FH_RX_CONFIG_RB_TIMEOUT(0x11) | /* about 1/2 msec */ 6249 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG)); 6250 iwn_nic_unlock(sc); 6251 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7); 6252 6253 if ((error = iwn_nic_lock(sc)) != 0) 6254 return error; 6255 6256 /* Initialize TX scheduler. */ 6257 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 6258 6259 /* Set physical address of "keep warm" page (16-byte aligned). */ 6260 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4); 6261 6262 /* Initialize TX rings. */ 6263 for (qid = 0; qid < sc->ntxqs; qid++) { 6264 struct iwn_tx_ring *txq = &sc->txq[qid]; 6265 6266 /* Set physical address of TX ring (256-byte aligned). */ 6267 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid), 6268 txq->desc_dma.paddr >> 8); 6269 } 6270 iwn_nic_unlock(sc); 6271 6272 /* Enable DMA channels. */ 6273 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 6274 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 6275 IWN_FH_TX_CONFIG_DMA_ENA | 6276 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA); 6277 } 6278 6279 /* Clear "radio off" and "commands blocked" bits. */ 6280 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 6281 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED); 6282 6283 /* Clear pending interrupts. */ 6284 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6285 /* Enable interrupt coalescing. */ 6286 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8); 6287 /* Enable interrupts. */ 6288 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 6289 6290 /* _Really_ make sure "radio off" bit is cleared! */ 6291 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 6292 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 6293 6294 /* Enable shadow registers. */ 6295 if (sc->hw_type >= IWN_HW_REV_TYPE_6000) 6296 IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff); 6297 6298 if ((error = ops->load_firmware(sc)) != 0) { 6299 printf("%s: could not load firmware\n", sc->sc_dev.dv_xname); 6300 return error; 6301 } 6302 /* Wait at most one second for firmware alive notification. */ 6303 if ((error = tsleep(sc, PCATCH, "iwninit", hz)) != 0) { 6304 printf("%s: timeout waiting for adapter to initialize\n", 6305 sc->sc_dev.dv_xname); 6306 return error; 6307 } 6308 /* Do post-firmware initialization. */ 6309 return ops->post_alive(sc); 6310 } 6311 6312 void 6313 iwn_hw_stop(struct iwn_softc *sc) 6314 { 6315 int chnl, qid, ntries; 6316 6317 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO); 6318 6319 /* Disable interrupts. */ 6320 IWN_WRITE(sc, IWN_INT_MASK, 0); 6321 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6322 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff); 6323 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 6324 6325 /* Make sure we no longer hold the NIC lock. */ 6326 iwn_nic_unlock(sc); 6327 6328 /* Stop TX scheduler. */ 6329 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 6330 6331 /* Stop all DMA channels. */ 6332 if (iwn_nic_lock(sc) == 0) { 6333 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 6334 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0); 6335 for (ntries = 0; ntries < 200; ntries++) { 6336 if (IWN_READ(sc, IWN_FH_TX_STATUS) & 6337 IWN_FH_TX_STATUS_IDLE(chnl)) 6338 break; 6339 DELAY(10); 6340 } 6341 } 6342 iwn_nic_unlock(sc); 6343 } 6344 6345 /* Stop RX ring. */ 6346 iwn_reset_rx_ring(sc, &sc->rxq); 6347 6348 /* Reset all TX rings. */ 6349 for (qid = 0; qid < sc->ntxqs; qid++) 6350 iwn_reset_tx_ring(sc, &sc->txq[qid]); 6351 6352 if (iwn_nic_lock(sc) == 0) { 6353 iwn_prph_write(sc, IWN_APMG_CLK_DIS, 6354 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 6355 iwn_nic_unlock(sc); 6356 } 6357 DELAY(5); 6358 /* Power OFF adapter. */ 6359 iwn_apm_stop(sc); 6360 } 6361 6362 int 6363 iwn_init(struct ifnet *ifp) 6364 { 6365 struct iwn_softc *sc = ifp->if_softc; 6366 struct ieee80211com *ic = &sc->sc_ic; 6367 int error; 6368 6369 if ((error = iwn_hw_prepare(sc)) != 0) { 6370 printf("%s: hardware not ready\n", sc->sc_dev.dv_xname); 6371 goto fail; 6372 } 6373 6374 /* Check that the radio is not disabled by hardware switch. */ 6375 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) { 6376 printf("%s: radio is disabled by hardware switch\n", 6377 sc->sc_dev.dv_xname); 6378 error = EPERM; /* :-) */ 6379 goto fail; 6380 } 6381 6382 /* Read firmware images from the filesystem. */ 6383 if ((error = iwn_read_firmware(sc)) != 0) { 6384 printf("%s: could not read firmware\n", sc->sc_dev.dv_xname); 6385 goto fail; 6386 } 6387 6388 /* Initialize interrupt mask to default value. */ 6389 sc->int_mask = IWN_INT_MASK_DEF; 6390 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 6391 6392 /* Initialize hardware and upload firmware. */ 6393 error = iwn_hw_init(sc); 6394 free(sc->fw.data, M_DEVBUF, sc->fw.size); 6395 if (error != 0) { 6396 printf("%s: could not initialize hardware\n", 6397 sc->sc_dev.dv_xname); 6398 goto fail; 6399 } 6400 6401 /* Configure adapter now that it is ready. */ 6402 if ((error = iwn_config(sc)) != 0) { 6403 printf("%s: could not configure device\n", 6404 sc->sc_dev.dv_xname); 6405 goto fail; 6406 } 6407 6408 ifq_clr_oactive(&ifp->if_snd); 6409 ifp->if_flags |= IFF_RUNNING; 6410 6411 if (ic->ic_opmode != IEEE80211_M_MONITOR) 6412 ieee80211_begin_scan(ifp); 6413 else 6414 ieee80211_new_state(ic, IEEE80211_S_RUN, -1); 6415 6416 return 0; 6417 6418 fail: iwn_stop(ifp, 1); 6419 return error; 6420 } 6421 6422 void 6423 iwn_stop(struct ifnet *ifp, int disable) 6424 { 6425 struct iwn_softc *sc = ifp->if_softc; 6426 struct ieee80211com *ic = &sc->sc_ic; 6427 6428 timeout_del(&sc->calib_to); 6429 ifp->if_timer = sc->sc_tx_timer = 0; 6430 ifp->if_flags &= ~IFF_RUNNING; 6431 ifq_clr_oactive(&ifp->if_snd); 6432 6433 /* In case we were scanning, release the scan "lock". */ 6434 ic->ic_scan_lock = IEEE80211_SCAN_UNLOCKED; 6435 6436 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 6437 6438 /* Power OFF hardware. */ 6439 iwn_hw_stop(sc); 6440 } 6441