1 /* $NetBSD: if_iwn.c,v 1.70 2013/10/17 21:06:15 christos Exp $ */ 2 /* $OpenBSD: if_iwn.c,v 1.119 2013/05/29 23:16:52 yuo Exp $ */ 3 4 /*- 5 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /* 21 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network 22 * adapters. 23 */ 24 #include <sys/cdefs.h> 25 __KERNEL_RCSID(0, "$NetBSD: if_iwn.c,v 1.70 2013/10/17 21:06:15 christos Exp $"); 26 27 #define IWN_USE_RBUF /* Use local storage for RX */ 28 #undef IWN_HWCRYPTO /* XXX does not even compile yet */ 29 30 #include <sys/param.h> 31 #include <sys/sockio.h> 32 #include <sys/proc.h> 33 #include <sys/mbuf.h> 34 #include <sys/kernel.h> 35 #include <sys/socket.h> 36 #include <sys/systm.h> 37 #include <sys/malloc.h> 38 #ifdef notyetMODULE 39 #include <sys/module.h> 40 #endif 41 #include <sys/mutex.h> 42 #include <sys/conf.h> 43 #include <sys/kauth.h> 44 #include <sys/callout.h> 45 46 #include <dev/sysmon/sysmonvar.h> 47 48 #include <sys/bus.h> 49 #include <machine/endian.h> 50 #include <machine/intr.h> 51 52 #include <dev/pci/pcireg.h> 53 #include <dev/pci/pcivar.h> 54 #include <dev/pci/pcidevs.h> 55 56 #include <net/bpf.h> 57 #include <net/if.h> 58 #include <net/if_arp.h> 59 #include <net/if_dl.h> 60 #include <net/if_media.h> 61 #include <net/if_types.h> 62 63 #include <netinet/in.h> 64 #include <netinet/in_systm.h> 65 #include <netinet/in_var.h> 66 #include <net/if_ether.h> 67 #include <netinet/ip.h> 68 69 #include <net80211/ieee80211_var.h> 70 #include <net80211/ieee80211_amrr.h> 71 #include <net80211/ieee80211_radiotap.h> 72 73 #include <dev/firmload.h> 74 75 #include <dev/pci/if_iwnreg.h> 76 #include <dev/pci/if_iwnvar.h> 77 78 static const pci_product_id_t iwn_devices[] = { 79 PCI_PRODUCT_INTEL_WIFI_LINK_1030_1, 80 PCI_PRODUCT_INTEL_WIFI_LINK_1030_2, 81 PCI_PRODUCT_INTEL_WIFI_LINK_4965_1, 82 PCI_PRODUCT_INTEL_WIFI_LINK_4965_2, 83 PCI_PRODUCT_INTEL_WIFI_LINK_4965_3, 84 PCI_PRODUCT_INTEL_WIFI_LINK_4965_4, 85 PCI_PRODUCT_INTEL_WIFI_LINK_5100_1, 86 PCI_PRODUCT_INTEL_WIFI_LINK_5100_2, 87 PCI_PRODUCT_INTEL_WIFI_LINK_5150_1, 88 PCI_PRODUCT_INTEL_WIFI_LINK_5150_2, 89 PCI_PRODUCT_INTEL_WIFI_LINK_5300_1, 90 PCI_PRODUCT_INTEL_WIFI_LINK_5300_2, 91 PCI_PRODUCT_INTEL_WIFI_LINK_5350_1, 92 PCI_PRODUCT_INTEL_WIFI_LINK_5350_2, 93 PCI_PRODUCT_INTEL_WIFI_LINK_1000_1, 94 PCI_PRODUCT_INTEL_WIFI_LINK_1000_2, 95 PCI_PRODUCT_INTEL_WIFI_LINK_6000_3X3_1, 96 PCI_PRODUCT_INTEL_WIFI_LINK_6000_3X3_2, 97 PCI_PRODUCT_INTEL_WIFI_LINK_6000_IPA_1, 98 PCI_PRODUCT_INTEL_WIFI_LINK_6000_IPA_2, 99 PCI_PRODUCT_INTEL_WIFI_LINK_6050_2X2_1, 100 PCI_PRODUCT_INTEL_WIFI_LINK_6050_2X2_2, 101 PCI_PRODUCT_INTEL_WIFI_LINK_6005_2X2_1, 102 PCI_PRODUCT_INTEL_WIFI_LINK_6005_2X2_2, 103 PCI_PRODUCT_INTEL_WIFI_LINK_6230_1, 104 PCI_PRODUCT_INTEL_WIFI_LINK_6230_2, 105 PCI_PRODUCT_INTEL_WIFI_LINK_6235, 106 }; 107 108 /* 109 * Supported rates for 802.11a/b/g modes (in 500Kbps unit). 110 */ 111 static const struct ieee80211_rateset iwn_rateset_11a = 112 { 8, { 12, 18, 24, 36, 48, 72, 96, 108 } }; 113 114 static const struct ieee80211_rateset iwn_rateset_11b = 115 { 4, { 2, 4, 11, 22 } }; 116 117 static const struct ieee80211_rateset iwn_rateset_11g = 118 { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } }; 119 120 static int iwn_match(device_t , struct cfdata *, void *); 121 static void iwn_attach(device_t , device_t , void *); 122 static int iwn4965_attach(struct iwn_softc *, pci_product_id_t); 123 static int iwn5000_attach(struct iwn_softc *, pci_product_id_t); 124 static void iwn_radiotap_attach(struct iwn_softc *); 125 static int iwn_detach(device_t , int); 126 #if 0 127 static void iwn_power(int, void *); 128 #endif 129 static bool iwn_resume(device_t, const pmf_qual_t *); 130 static int iwn_nic_lock(struct iwn_softc *); 131 static int iwn_eeprom_lock(struct iwn_softc *); 132 static int iwn_init_otprom(struct iwn_softc *); 133 static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int); 134 static int iwn_dma_contig_alloc(bus_dma_tag_t, struct iwn_dma_info *, 135 void **, bus_size_t, bus_size_t); 136 static void iwn_dma_contig_free(struct iwn_dma_info *); 137 static int iwn_alloc_sched(struct iwn_softc *); 138 static void iwn_free_sched(struct iwn_softc *); 139 static int iwn_alloc_kw(struct iwn_softc *); 140 static void iwn_free_kw(struct iwn_softc *); 141 static int iwn_alloc_ict(struct iwn_softc *); 142 static void iwn_free_ict(struct iwn_softc *); 143 static int iwn_alloc_fwmem(struct iwn_softc *); 144 static void iwn_free_fwmem(struct iwn_softc *); 145 static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 146 static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 147 static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 148 static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *, 149 int); 150 static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 151 static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 152 static void iwn5000_ict_reset(struct iwn_softc *); 153 static int iwn_read_eeprom(struct iwn_softc *); 154 static void iwn4965_read_eeprom(struct iwn_softc *); 155 156 #ifdef IWN_DEBUG 157 static void iwn4965_print_power_group(struct iwn_softc *, int); 158 #endif 159 static void iwn5000_read_eeprom(struct iwn_softc *); 160 static void iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t); 161 static void iwn_read_eeprom_enhinfo(struct iwn_softc *); 162 static struct ieee80211_node *iwn_node_alloc(struct ieee80211_node_table *); 163 static void iwn_newassoc(struct ieee80211_node *, int); 164 static int iwn_media_change(struct ifnet *); 165 static int iwn_newstate(struct ieee80211com *, enum ieee80211_state, int); 166 static void iwn_iter_func(void *, struct ieee80211_node *); 167 static void iwn_calib_timeout(void *); 168 static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *, 169 struct iwn_rx_data *); 170 static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *, 171 struct iwn_rx_data *); 172 #ifndef IEEE80211_NO_HT 173 static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *, 174 struct iwn_rx_data *); 175 #endif 176 static void iwn5000_rx_calib_results(struct iwn_softc *, 177 struct iwn_rx_desc *, struct iwn_rx_data *); 178 static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *, 179 struct iwn_rx_data *); 180 static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 181 struct iwn_rx_data *); 182 static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 183 struct iwn_rx_data *); 184 static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int, 185 uint8_t); 186 static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *); 187 static void iwn_notif_intr(struct iwn_softc *); 188 static void iwn_wakeup_intr(struct iwn_softc *); 189 static void iwn_fatal_intr(struct iwn_softc *); 190 static int iwn_intr(void *); 191 static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t, 192 uint16_t); 193 static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t, 194 uint16_t); 195 #ifdef notyet 196 static void iwn5000_reset_sched(struct iwn_softc *, int, int); 197 #endif 198 static int iwn_tx(struct iwn_softc *, struct mbuf *, 199 struct ieee80211_node *, int); 200 static void iwn_start(struct ifnet *); 201 static void iwn_watchdog(struct ifnet *); 202 static int iwn_ioctl(struct ifnet *, u_long, void *); 203 static int iwn_cmd(struct iwn_softc *, int, const void *, int, int); 204 static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *, 205 int); 206 static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *, 207 int); 208 static int iwn_set_link_quality(struct iwn_softc *, 209 struct ieee80211_node *); 210 static int iwn_add_broadcast_node(struct iwn_softc *, int); 211 static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t); 212 static int iwn_set_critical_temp(struct iwn_softc *); 213 static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *); 214 static void iwn4965_power_calibration(struct iwn_softc *, int); 215 static int iwn4965_set_txpower(struct iwn_softc *, int); 216 static int iwn5000_set_txpower(struct iwn_softc *, int); 217 static int iwn4965_get_rssi(const struct iwn_rx_stat *); 218 static int iwn5000_get_rssi(const struct iwn_rx_stat *); 219 static int iwn_get_noise(const struct iwn_rx_general_stats *); 220 static int iwn4965_get_temperature(struct iwn_softc *); 221 static int iwn5000_get_temperature(struct iwn_softc *); 222 static int iwn_init_sensitivity(struct iwn_softc *); 223 static void iwn_collect_noise(struct iwn_softc *, 224 const struct iwn_rx_general_stats *); 225 static int iwn4965_init_gains(struct iwn_softc *); 226 static int iwn5000_init_gains(struct iwn_softc *); 227 static int iwn4965_set_gains(struct iwn_softc *); 228 static int iwn5000_set_gains(struct iwn_softc *); 229 static void iwn_tune_sensitivity(struct iwn_softc *, 230 const struct iwn_rx_stats *); 231 static int iwn_send_sensitivity(struct iwn_softc *); 232 static int iwn_set_pslevel(struct iwn_softc *, int, int, int); 233 static int iwn5000_runtime_calib(struct iwn_softc *); 234 235 static int iwn_config_bt_coex_bluetooth(struct iwn_softc *); 236 static int iwn_config_bt_coex_prio_table(struct iwn_softc *); 237 static int iwn_config_bt_coex_adv1(struct iwn_softc *); 238 239 static int iwn_config(struct iwn_softc *); 240 static int iwn_scan(struct iwn_softc *, uint16_t); 241 static int iwn_auth(struct iwn_softc *); 242 static int iwn_run(struct iwn_softc *); 243 #ifdef IWN_HWCRYPTO 244 static int iwn_set_key(struct ieee80211com *, struct ieee80211_node *, 245 struct ieee80211_key *); 246 static void iwn_delete_key(struct ieee80211com *, struct ieee80211_node *, 247 struct ieee80211_key *); 248 #endif 249 static int iwn_wme_update(struct ieee80211com *); 250 #ifndef IEEE80211_NO_HT 251 static int iwn_ampdu_rx_start(struct ieee80211com *, 252 struct ieee80211_node *, uint8_t); 253 static void iwn_ampdu_rx_stop(struct ieee80211com *, 254 struct ieee80211_node *, uint8_t); 255 static int iwn_ampdu_tx_start(struct ieee80211com *, 256 struct ieee80211_node *, uint8_t); 257 static void iwn_ampdu_tx_stop(struct ieee80211com *, 258 struct ieee80211_node *, uint8_t); 259 static void iwn4965_ampdu_tx_start(struct iwn_softc *, 260 struct ieee80211_node *, uint8_t, uint16_t); 261 static void iwn4965_ampdu_tx_stop(struct iwn_softc *, 262 uint8_t, uint16_t); 263 static void iwn5000_ampdu_tx_start(struct iwn_softc *, 264 struct ieee80211_node *, uint8_t, uint16_t); 265 static void iwn5000_ampdu_tx_stop(struct iwn_softc *, 266 uint8_t, uint16_t); 267 #endif 268 static int iwn5000_query_calibration(struct iwn_softc *); 269 static int iwn5000_send_calibration(struct iwn_softc *); 270 static int iwn5000_send_wimax_coex(struct iwn_softc *); 271 static int iwn4965_post_alive(struct iwn_softc *); 272 static int iwn5000_post_alive(struct iwn_softc *); 273 static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *, 274 int); 275 static int iwn4965_load_firmware(struct iwn_softc *); 276 static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t, 277 const uint8_t *, int); 278 static int iwn5000_load_firmware(struct iwn_softc *); 279 static int iwn_read_firmware_leg(struct iwn_softc *, 280 struct iwn_fw_info *); 281 static int iwn_read_firmware_tlv(struct iwn_softc *, 282 struct iwn_fw_info *, uint16_t); 283 static int iwn_read_firmware(struct iwn_softc *); 284 static int iwn_clock_wait(struct iwn_softc *); 285 static int iwn_apm_init(struct iwn_softc *); 286 static void iwn_apm_stop_master(struct iwn_softc *); 287 static void iwn_apm_stop(struct iwn_softc *); 288 static int iwn4965_nic_config(struct iwn_softc *); 289 static int iwn5000_nic_config(struct iwn_softc *); 290 static int iwn_hw_prepare(struct iwn_softc *); 291 static int iwn_hw_init(struct iwn_softc *); 292 static void iwn_hw_stop(struct iwn_softc *); 293 static int iwn_init(struct ifnet *); 294 static void iwn_stop(struct ifnet *, int); 295 296 /* XXX MCLGETI alternative */ 297 static struct mbuf *MCLGETIalt(struct iwn_softc *, int, 298 struct ifnet *, u_int); 299 #ifdef IWN_USE_RBUF 300 static struct iwn_rbuf *iwn_alloc_rbuf(struct iwn_softc *); 301 static void iwn_free_rbuf(struct mbuf *, void *, size_t, void *); 302 static int iwn_alloc_rpool(struct iwn_softc *); 303 static void iwn_free_rpool(struct iwn_softc *); 304 #endif 305 306 /* XXX needed by iwn_scan */ 307 static u_int8_t *ieee80211_add_ssid(u_int8_t *, const u_int8_t *, u_int); 308 static u_int8_t *ieee80211_add_rates(u_int8_t *, 309 const struct ieee80211_rateset *); 310 static u_int8_t *ieee80211_add_xrates(u_int8_t *, 311 const struct ieee80211_rateset *); 312 313 static void iwn_fix_channel(struct ieee80211com *, struct mbuf *); 314 315 #ifdef IWN_DEBUG 316 #define DPRINTF(x) do { if (iwn_debug > 0) printf x; } while (0) 317 #define DPRINTFN(n, x) do { if (iwn_debug >= (n)) printf x; } while (0) 318 int iwn_debug = 0; 319 #else 320 #define DPRINTF(x) 321 #define DPRINTFN(n, x) 322 #endif 323 324 CFATTACH_DECL_NEW(iwn, sizeof(struct iwn_softc), iwn_match, iwn_attach, 325 iwn_detach, NULL); 326 327 static int 328 iwn_match(device_t parent, cfdata_t match __unused, void *aux) 329 { 330 struct pci_attach_args *pa = aux; 331 size_t i; 332 333 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL) 334 return 0; 335 336 for (i = 0; i < __arraycount(iwn_devices); i++) 337 if (PCI_PRODUCT(pa->pa_id) == iwn_devices[i]) 338 return 1; 339 340 return 0; 341 } 342 343 static void 344 iwn_attach(device_t parent __unused, device_t self, void *aux) 345 { 346 struct iwn_softc *sc = device_private(self); 347 struct ieee80211com *ic = &sc->sc_ic; 348 struct ifnet *ifp = &sc->sc_ec.ec_if; 349 struct pci_attach_args *pa = aux; 350 const char *intrstr; 351 pci_intr_handle_t ih; 352 pcireg_t memtype, reg; 353 int i, error; 354 355 sc->sc_dev = self; 356 sc->sc_pct = pa->pa_pc; 357 sc->sc_pcitag = pa->pa_tag; 358 sc->sc_dmat = pa->pa_dmat; 359 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE); 360 361 callout_init(&sc->calib_to, 0); 362 callout_setfunc(&sc->calib_to, iwn_calib_timeout, sc); 363 364 pci_aprint_devinfo(pa, NULL); 365 366 /* 367 * Get the offset of the PCI Express Capability Structure in PCI 368 * Configuration Space. 369 */ 370 error = pci_get_capability(sc->sc_pct, sc->sc_pcitag, 371 PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL); 372 if (error == 0) { 373 aprint_error(": PCIe capability structure not found!\n"); 374 return; 375 } 376 377 /* Clear device-specific "PCI retry timeout" register (41h). */ 378 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40); 379 if (reg & 0xff00) 380 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00); 381 382 /* Enable bus-mastering and hardware bug workaround. */ 383 /* XXX verify the bus-mastering is really needed (not in OpenBSD) */ 384 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 385 reg |= PCI_COMMAND_MASTER_ENABLE; 386 if (reg & PCI_COMMAND_INTERRUPT_DISABLE) { 387 DPRINTF(("PCIe INTx Disable set\n")); 388 reg &= ~PCI_COMMAND_INTERRUPT_DISABLE; 389 } 390 pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg); 391 392 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IWN_PCI_BAR0); 393 error = pci_mapreg_map(pa, IWN_PCI_BAR0, memtype, 0, &sc->sc_st, 394 &sc->sc_sh, NULL, &sc->sc_sz); 395 if (error != 0) { 396 aprint_error(": can't map mem space\n"); 397 return; 398 } 399 400 /* Install interrupt handler. */ 401 if (pci_intr_map(pa, &ih) != 0) { 402 aprint_error(": can't map interrupt\n"); 403 return; 404 } 405 intrstr = pci_intr_string(sc->sc_pct, ih); 406 sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET, iwn_intr, sc); 407 if (sc->sc_ih == NULL) { 408 aprint_error(": can't establish interrupt"); 409 if (intrstr != NULL) 410 aprint_error(" at %s", intrstr); 411 aprint_error("\n"); 412 return; 413 } 414 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 415 416 /* Read hardware revision and attach. */ 417 sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> 4) & 0xf; 418 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 419 error = iwn4965_attach(sc, PCI_PRODUCT(pa->pa_id)); 420 else 421 error = iwn5000_attach(sc, PCI_PRODUCT(pa->pa_id)); 422 if (error != 0) { 423 aprint_error(": could not attach device\n"); 424 return; 425 } 426 427 if ((error = iwn_hw_prepare(sc)) != 0) { 428 aprint_error(": hardware not ready\n"); 429 return; 430 } 431 432 /* Read MAC address, channels, etc from EEPROM. */ 433 if ((error = iwn_read_eeprom(sc)) != 0) { 434 aprint_error(": could not read EEPROM\n"); 435 return; 436 } 437 438 /* Allocate DMA memory for firmware transfers. */ 439 if ((error = iwn_alloc_fwmem(sc)) != 0) { 440 aprint_error(": could not allocate memory for firmware\n"); 441 return; 442 } 443 444 /* Allocate "Keep Warm" page. */ 445 if ((error = iwn_alloc_kw(sc)) != 0) { 446 aprint_error(": could not allocate keep warm page\n"); 447 goto fail1; 448 } 449 450 /* Allocate ICT table for 5000 Series. */ 451 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 452 (error = iwn_alloc_ict(sc)) != 0) { 453 aprint_error(": could not allocate ICT table\n"); 454 goto fail2; 455 } 456 457 /* Allocate TX scheduler "rings". */ 458 if ((error = iwn_alloc_sched(sc)) != 0) { 459 aprint_error(": could not allocate TX scheduler rings\n"); 460 goto fail3; 461 } 462 463 #ifdef IWN_USE_RBUF 464 /* Allocate RX buffers. */ 465 if ((error = iwn_alloc_rpool(sc)) != 0) { 466 aprint_error_dev(self, "could not allocate RX buffers\n"); 467 goto fail3; 468 } 469 #endif 470 471 /* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */ 472 for (i = 0; i < sc->ntxqs; i++) { 473 if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 474 aprint_error(": could not allocate TX ring %d\n", i); 475 goto fail4; 476 } 477 } 478 479 /* Allocate RX ring. */ 480 if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) { 481 aprint_error(": could not allocate RX ring\n"); 482 goto fail4; 483 } 484 485 /* Clear pending interrupts. */ 486 IWN_WRITE(sc, IWN_INT, 0xffffffff); 487 488 /* Count the number of available chains. */ 489 sc->ntxchains = 490 ((sc->txchainmask >> 2) & 1) + 491 ((sc->txchainmask >> 1) & 1) + 492 ((sc->txchainmask >> 0) & 1); 493 sc->nrxchains = 494 ((sc->rxchainmask >> 2) & 1) + 495 ((sc->rxchainmask >> 1) & 1) + 496 ((sc->rxchainmask >> 0) & 1); 497 aprint_normal_dev(self, "MIMO %dT%dR, %.4s, address %s\n", 498 sc->ntxchains, sc->nrxchains, sc->eeprom_domain, 499 ether_sprintf(ic->ic_myaddr)); 500 501 ic->ic_ifp = ifp; 502 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 503 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 504 ic->ic_state = IEEE80211_S_INIT; 505 506 /* Set device capabilities. */ 507 /* XXX OpenBSD has IEEE80211_C_WEP, IEEE80211_C_RSN, 508 * and IEEE80211_C_PMGT too. */ 509 ic->ic_caps = 510 IEEE80211_C_IBSS | /* IBSS mode support */ 511 IEEE80211_C_WPA | /* 802.11i */ 512 IEEE80211_C_MONITOR | /* monitor mode supported */ 513 IEEE80211_C_TXPMGT | /* tx power management */ 514 IEEE80211_C_SHSLOT | /* short slot time supported */ 515 IEEE80211_C_SHPREAMBLE | /* short preamble supported */ 516 IEEE80211_C_WME; /* 802.11e */ 517 518 #ifndef IEEE80211_NO_HT 519 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 520 /* Set HT capabilities. */ 521 ic->ic_htcaps = 522 #if IWN_RBUF_SIZE == 8192 523 IEEE80211_HTCAP_AMSDU7935 | 524 #endif 525 IEEE80211_HTCAP_CBW20_40 | 526 IEEE80211_HTCAP_SGI20 | 527 IEEE80211_HTCAP_SGI40; 528 if (sc->hw_type != IWN_HW_REV_TYPE_4965) 529 ic->ic_htcaps |= IEEE80211_HTCAP_GF; 530 if (sc->hw_type == IWN_HW_REV_TYPE_6050) 531 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DYN; 532 else 533 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DIS; 534 } 535 #endif /* !IEEE80211_NO_HT */ 536 537 /* Set supported legacy rates. */ 538 ic->ic_sup_rates[IEEE80211_MODE_11B] = iwn_rateset_11b; 539 ic->ic_sup_rates[IEEE80211_MODE_11G] = iwn_rateset_11g; 540 if (sc->sc_flags & IWN_FLAG_HAS_5GHZ) { 541 ic->ic_sup_rates[IEEE80211_MODE_11A] = iwn_rateset_11a; 542 } 543 #ifndef IEEE80211_NO_HT 544 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 545 /* Set supported HT rates. */ 546 ic->ic_sup_mcs[0] = 0xff; /* MCS 0-7 */ 547 if (sc->nrxchains > 1) 548 ic->ic_sup_mcs[1] = 0xff; /* MCS 7-15 */ 549 if (sc->nrxchains > 2) 550 ic->ic_sup_mcs[2] = 0xff; /* MCS 16-23 */ 551 } 552 #endif 553 554 /* IBSS channel undefined for now. */ 555 ic->ic_ibss_chan = &ic->ic_channels[0]; 556 557 ifp->if_softc = sc; 558 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 559 ifp->if_init = iwn_init; 560 ifp->if_ioctl = iwn_ioctl; 561 ifp->if_start = iwn_start; 562 ifp->if_stop = iwn_stop; 563 ifp->if_watchdog = iwn_watchdog; 564 IFQ_SET_READY(&ifp->if_snd); 565 memcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 566 567 if_attach(ifp); 568 ieee80211_ifattach(ic); 569 ic->ic_node_alloc = iwn_node_alloc; 570 ic->ic_newassoc = iwn_newassoc; 571 #ifdef IWN_HWCRYPTO 572 ic->ic_crypto.cs_key_set = iwn_set_key; 573 ic->ic_crypto.cs_key_delete = iwn_delete_key; 574 #endif 575 ic->ic_wme.wme_update = iwn_wme_update; 576 #ifndef IEEE80211_NO_HT 577 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start; 578 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop; 579 ic->ic_ampdu_tx_start = iwn_ampdu_tx_start; 580 ic->ic_ampdu_tx_stop = iwn_ampdu_tx_stop; 581 #endif 582 583 /* Override 802.11 state transition machine. */ 584 sc->sc_newstate = ic->ic_newstate; 585 ic->ic_newstate = iwn_newstate; 586 ieee80211_media_init(ic, iwn_media_change, ieee80211_media_status); 587 588 sc->amrr.amrr_min_success_threshold = 1; 589 sc->amrr.amrr_max_success_threshold = 15; 590 591 iwn_radiotap_attach(sc); 592 593 /* 594 * XXX for NetBSD, OpenBSD timeout_set replaced by 595 * callout_init and callout_setfunc, above. 596 */ 597 598 if (pmf_device_register(self, NULL, iwn_resume)) 599 pmf_class_network_register(self, ifp); 600 else 601 aprint_error_dev(self, "couldn't establish power handler\n"); 602 603 /* XXX NetBSD add call to ieee80211_announce for dmesg. */ 604 ieee80211_announce(ic); 605 606 return; 607 608 /* Free allocated memory if something failed during attachment. */ 609 fail4: while (--i >= 0) 610 iwn_free_tx_ring(sc, &sc->txq[i]); 611 #ifdef IWN_USE_RBUF 612 iwn_free_rpool(sc); 613 #endif 614 iwn_free_sched(sc); 615 fail3: if (sc->ict != NULL) 616 iwn_free_ict(sc); 617 fail2: iwn_free_kw(sc); 618 fail1: iwn_free_fwmem(sc); 619 } 620 621 int 622 iwn4965_attach(struct iwn_softc *sc, pci_product_id_t pid) 623 { 624 struct iwn_ops *ops = &sc->ops; 625 626 ops->load_firmware = iwn4965_load_firmware; 627 ops->read_eeprom = iwn4965_read_eeprom; 628 ops->post_alive = iwn4965_post_alive; 629 ops->nic_config = iwn4965_nic_config; 630 ops->config_bt_coex = iwn_config_bt_coex_bluetooth; 631 ops->update_sched = iwn4965_update_sched; 632 ops->get_temperature = iwn4965_get_temperature; 633 ops->get_rssi = iwn4965_get_rssi; 634 ops->set_txpower = iwn4965_set_txpower; 635 ops->init_gains = iwn4965_init_gains; 636 ops->set_gains = iwn4965_set_gains; 637 ops->add_node = iwn4965_add_node; 638 ops->tx_done = iwn4965_tx_done; 639 #ifndef IEEE80211_NO_HT 640 ops->ampdu_tx_start = iwn4965_ampdu_tx_start; 641 ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop; 642 #endif 643 sc->ntxqs = IWN4965_NTXQUEUES; 644 sc->ndmachnls = IWN4965_NDMACHNLS; 645 sc->broadcast_id = IWN4965_ID_BROADCAST; 646 sc->rxonsz = IWN4965_RXONSZ; 647 sc->schedsz = IWN4965_SCHEDSZ; 648 sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ; 649 sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ; 650 sc->fwsz = IWN4965_FWSZ; 651 sc->sched_txfact_addr = IWN4965_SCHED_TXFACT; 652 sc->limits = &iwn4965_sensitivity_limits; 653 sc->fwname = "iwlwifi-4965-2.ucode"; 654 /* Override chains masks, ROM is known to be broken. */ 655 sc->txchainmask = IWN_ANT_AB; 656 sc->rxchainmask = IWN_ANT_ABC; 657 658 return 0; 659 } 660 661 int 662 iwn5000_attach(struct iwn_softc *sc, pci_product_id_t pid) 663 { 664 struct iwn_ops *ops = &sc->ops; 665 666 ops->load_firmware = iwn5000_load_firmware; 667 ops->read_eeprom = iwn5000_read_eeprom; 668 ops->post_alive = iwn5000_post_alive; 669 ops->nic_config = iwn5000_nic_config; 670 ops->config_bt_coex = iwn_config_bt_coex_bluetooth; 671 ops->update_sched = iwn5000_update_sched; 672 ops->get_temperature = iwn5000_get_temperature; 673 ops->get_rssi = iwn5000_get_rssi; 674 ops->set_txpower = iwn5000_set_txpower; 675 ops->init_gains = iwn5000_init_gains; 676 ops->set_gains = iwn5000_set_gains; 677 ops->add_node = iwn5000_add_node; 678 ops->tx_done = iwn5000_tx_done; 679 #ifndef IEEE80211_NO_HT 680 ops->ampdu_tx_start = iwn5000_ampdu_tx_start; 681 ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop; 682 #endif 683 sc->ntxqs = IWN5000_NTXQUEUES; 684 sc->ndmachnls = IWN5000_NDMACHNLS; 685 sc->broadcast_id = IWN5000_ID_BROADCAST; 686 sc->rxonsz = IWN5000_RXONSZ; 687 sc->schedsz = IWN5000_SCHEDSZ; 688 sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ; 689 sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ; 690 sc->fwsz = IWN5000_FWSZ; 691 sc->sched_txfact_addr = IWN5000_SCHED_TXFACT; 692 693 switch (sc->hw_type) { 694 case IWN_HW_REV_TYPE_5100: 695 sc->limits = &iwn5000_sensitivity_limits; 696 sc->fwname = "iwlwifi-5000-2.ucode"; 697 /* Override chains masks, ROM is known to be broken. */ 698 sc->txchainmask = IWN_ANT_B; 699 sc->rxchainmask = IWN_ANT_AB; 700 break; 701 case IWN_HW_REV_TYPE_5150: 702 sc->limits = &iwn5150_sensitivity_limits; 703 sc->fwname = "iwlwifi-5150-2.ucode"; 704 break; 705 case IWN_HW_REV_TYPE_5300: 706 case IWN_HW_REV_TYPE_5350: 707 sc->limits = &iwn5000_sensitivity_limits; 708 sc->fwname = "iwlwifi-5000-2.ucode"; 709 break; 710 case IWN_HW_REV_TYPE_1000: 711 sc->limits = &iwn1000_sensitivity_limits; 712 sc->fwname = "iwlwifi-1000-3.ucode"; 713 break; 714 case IWN_HW_REV_TYPE_6000: 715 sc->limits = &iwn6000_sensitivity_limits; 716 sc->fwname = "iwlwifi-6000-4.ucode"; 717 if (pid == PCI_PRODUCT_INTEL_WIFI_LINK_6000_IPA_1 || 718 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6000_IPA_2) { 719 sc->sc_flags |= IWN_FLAG_INTERNAL_PA; 720 /* Override chains masks, ROM is known to be broken. */ 721 sc->txchainmask = IWN_ANT_BC; 722 sc->rxchainmask = IWN_ANT_BC; 723 } 724 break; 725 case IWN_HW_REV_TYPE_6050: 726 sc->limits = &iwn6000_sensitivity_limits; 727 sc->fwname = "iwlwifi-6050-5.ucode"; 728 break; 729 case IWN_HW_REV_TYPE_6005: 730 sc->limits = &iwn6000_sensitivity_limits; 731 /* Type 6030 cards return IWN_HW_REV_TYPE_6005 */ 732 if (pid == PCI_PRODUCT_INTEL_WIFI_LINK_1030_1 || 733 pid == PCI_PRODUCT_INTEL_WIFI_LINK_1030_2 || 734 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6230_1 || 735 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6230_2 || 736 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6235) { 737 sc->fwname = "iwlwifi-6000g2b-6.ucode"; 738 ops->config_bt_coex = iwn_config_bt_coex_adv1; 739 } 740 else 741 sc->fwname = "iwlwifi-6000g2a-5.ucode"; 742 break; 743 default: 744 aprint_normal(": adapter type %d not supported\n", sc->hw_type); 745 return ENOTSUP; 746 } 747 return 0; 748 } 749 750 /* 751 * Attach the interface to 802.11 radiotap. 752 */ 753 static void 754 iwn_radiotap_attach(struct iwn_softc *sc) 755 { 756 struct ifnet *ifp = sc->sc_ic.ic_ifp; 757 758 bpf_attach2(ifp, DLT_IEEE802_11_RADIO, 759 sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN, 760 &sc->sc_drvbpf); 761 762 sc->sc_rxtap_len = sizeof sc->sc_rxtapu; 763 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); 764 sc->sc_rxtap.wr_ihdr.it_present = htole32(IWN_RX_RADIOTAP_PRESENT); 765 766 sc->sc_txtap_len = sizeof sc->sc_txtapu; 767 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); 768 sc->sc_txtap.wt_ihdr.it_present = htole32(IWN_TX_RADIOTAP_PRESENT); 769 } 770 771 static int 772 iwn_detach(device_t self, int flags __unused) 773 { 774 struct iwn_softc *sc = device_private(self); 775 struct ifnet *ifp = sc->sc_ic.ic_ifp; 776 int qid; 777 778 callout_stop(&sc->calib_to); 779 780 /* Uninstall interrupt handler. */ 781 if (sc->sc_ih != NULL) 782 pci_intr_disestablish(sc->sc_pct, sc->sc_ih); 783 784 /* Free DMA resources. */ 785 iwn_free_rx_ring(sc, &sc->rxq); 786 for (qid = 0; qid < sc->ntxqs; qid++) 787 iwn_free_tx_ring(sc, &sc->txq[qid]); 788 #ifdef IWN_USE_RBUF 789 iwn_free_rpool(sc); 790 #endif 791 iwn_free_sched(sc); 792 iwn_free_kw(sc); 793 if (sc->ict != NULL) 794 iwn_free_ict(sc); 795 iwn_free_fwmem(sc); 796 797 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz); 798 799 ieee80211_ifdetach(&sc->sc_ic); 800 if_detach(ifp); 801 802 return 0; 803 } 804 805 #if 0 806 /* 807 * XXX Investigate if clearing the PCI retry timeout could eliminate 808 * the repeated scan calls. Also the calls to if_init and if_start 809 * are similar to the effect of adding the call to ifioctl_common . 810 */ 811 static void 812 iwn_power(int why, void *arg) 813 { 814 struct iwn_softc *sc = arg; 815 struct ifnet *ifp; 816 pcireg_t reg; 817 int s; 818 819 if (why != PWR_RESUME) 820 return; 821 822 /* Clear device-specific "PCI retry timeout" register (41h). */ 823 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40); 824 if (reg & 0xff00) 825 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00); 826 827 s = splnet(); 828 ifp = &sc->sc_ic.ic_if; 829 if (ifp->if_flags & IFF_UP) { 830 ifp->if_init(ifp); 831 if (ifp->if_flags & IFF_RUNNING) 832 ifp->if_start(ifp); 833 } 834 splx(s); 835 } 836 #endif 837 838 static bool 839 iwn_resume(device_t dv, const pmf_qual_t *qual) 840 { 841 return true; 842 } 843 844 static int 845 iwn_nic_lock(struct iwn_softc *sc) 846 { 847 int ntries; 848 849 /* Request exclusive access to NIC. */ 850 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 851 852 /* Spin until we actually get the lock. */ 853 for (ntries = 0; ntries < 1000; ntries++) { 854 if ((IWN_READ(sc, IWN_GP_CNTRL) & 855 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) == 856 IWN_GP_CNTRL_MAC_ACCESS_ENA) 857 return 0; 858 DELAY(10); 859 } 860 return ETIMEDOUT; 861 } 862 863 static __inline void 864 iwn_nic_unlock(struct iwn_softc *sc) 865 { 866 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 867 } 868 869 static __inline uint32_t 870 iwn_prph_read(struct iwn_softc *sc, uint32_t addr) 871 { 872 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr); 873 IWN_BARRIER_READ_WRITE(sc); 874 return IWN_READ(sc, IWN_PRPH_RDATA); 875 } 876 877 static __inline void 878 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 879 { 880 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr); 881 IWN_BARRIER_WRITE(sc); 882 IWN_WRITE(sc, IWN_PRPH_WDATA, data); 883 } 884 885 static __inline void 886 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 887 { 888 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask); 889 } 890 891 static __inline void 892 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 893 { 894 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask); 895 } 896 897 static __inline void 898 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr, 899 const uint32_t *data, int count) 900 { 901 for (; count > 0; count--, data++, addr += 4) 902 iwn_prph_write(sc, addr, *data); 903 } 904 905 static __inline uint32_t 906 iwn_mem_read(struct iwn_softc *sc, uint32_t addr) 907 { 908 IWN_WRITE(sc, IWN_MEM_RADDR, addr); 909 IWN_BARRIER_READ_WRITE(sc); 910 return IWN_READ(sc, IWN_MEM_RDATA); 911 } 912 913 static __inline void 914 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 915 { 916 IWN_WRITE(sc, IWN_MEM_WADDR, addr); 917 IWN_BARRIER_WRITE(sc); 918 IWN_WRITE(sc, IWN_MEM_WDATA, data); 919 } 920 921 #ifndef IEEE80211_NO_HT 922 static __inline void 923 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data) 924 { 925 uint32_t tmp; 926 927 tmp = iwn_mem_read(sc, addr & ~3); 928 if (addr & 3) 929 tmp = (tmp & 0x0000ffff) | data << 16; 930 else 931 tmp = (tmp & 0xffff0000) | data; 932 iwn_mem_write(sc, addr & ~3, tmp); 933 } 934 #endif 935 936 static __inline void 937 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data, 938 int count) 939 { 940 for (; count > 0; count--, addr += 4) 941 *data++ = iwn_mem_read(sc, addr); 942 } 943 944 static __inline void 945 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val, 946 int count) 947 { 948 for (; count > 0; count--, addr += 4) 949 iwn_mem_write(sc, addr, val); 950 } 951 952 static int 953 iwn_eeprom_lock(struct iwn_softc *sc) 954 { 955 int i, ntries; 956 957 for (i = 0; i < 100; i++) { 958 /* Request exclusive access to EEPROM. */ 959 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 960 IWN_HW_IF_CONFIG_EEPROM_LOCKED); 961 962 /* Spin until we actually get the lock. */ 963 for (ntries = 0; ntries < 100; ntries++) { 964 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 965 IWN_HW_IF_CONFIG_EEPROM_LOCKED) 966 return 0; 967 DELAY(10); 968 } 969 } 970 return ETIMEDOUT; 971 } 972 973 static __inline void 974 iwn_eeprom_unlock(struct iwn_softc *sc) 975 { 976 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED); 977 } 978 979 /* 980 * Initialize access by host to One Time Programmable ROM. 981 * NB: This kind of ROM can be found on 1000 or 6000 Series only. 982 */ 983 static int 984 iwn_init_otprom(struct iwn_softc *sc) 985 { 986 uint16_t prev = 0, base, next; 987 int count, error; 988 989 /* Wait for clock stabilization before accessing prph. */ 990 if ((error = iwn_clock_wait(sc)) != 0) 991 return error; 992 993 if ((error = iwn_nic_lock(sc)) != 0) 994 return error; 995 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 996 DELAY(5); 997 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 998 iwn_nic_unlock(sc); 999 1000 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */ 1001 if (sc->hw_type != IWN_HW_REV_TYPE_1000) { 1002 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT, 1003 IWN_RESET_LINK_PWR_MGMT_DIS); 1004 } 1005 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER); 1006 /* Clear ECC status. */ 1007 IWN_SETBITS(sc, IWN_OTP_GP, 1008 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS); 1009 1010 /* 1011 * Find the block before last block (contains the EEPROM image) 1012 * for HW without OTP shadow RAM. 1013 */ 1014 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 1015 /* Switch to absolute addressing mode. */ 1016 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS); 1017 base = 0; 1018 for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) { 1019 error = iwn_read_prom_data(sc, base, &next, 2); 1020 if (error != 0) 1021 return error; 1022 if (next == 0) /* End of linked-list. */ 1023 break; 1024 prev = base; 1025 base = le16toh(next); 1026 } 1027 if (count == 0 || count == IWN1000_OTP_NBLOCKS) 1028 return EIO; 1029 /* Skip "next" word. */ 1030 sc->prom_base = prev + 1; 1031 } 1032 return 0; 1033 } 1034 1035 static int 1036 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count) 1037 { 1038 uint8_t *out = data; 1039 uint32_t val, tmp; 1040 int ntries; 1041 1042 addr += sc->prom_base; 1043 for (; count > 0; count -= 2, addr++) { 1044 IWN_WRITE(sc, IWN_EEPROM, addr << 2); 1045 for (ntries = 0; ntries < 10; ntries++) { 1046 val = IWN_READ(sc, IWN_EEPROM); 1047 if (val & IWN_EEPROM_READ_VALID) 1048 break; 1049 DELAY(5); 1050 } 1051 if (ntries == 10) { 1052 aprint_error_dev(sc->sc_dev, 1053 "timeout reading ROM at 0x%x\n", addr); 1054 return ETIMEDOUT; 1055 } 1056 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1057 /* OTPROM, check for ECC errors. */ 1058 tmp = IWN_READ(sc, IWN_OTP_GP); 1059 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) { 1060 aprint_error_dev(sc->sc_dev, 1061 "OTPROM ECC error at 0x%x\n", addr); 1062 return EIO; 1063 } 1064 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) { 1065 /* Correctable ECC error, clear bit. */ 1066 IWN_SETBITS(sc, IWN_OTP_GP, 1067 IWN_OTP_GP_ECC_CORR_STTS); 1068 } 1069 } 1070 *out++ = val >> 16; 1071 if (count > 1) 1072 *out++ = val >> 24; 1073 } 1074 return 0; 1075 } 1076 1077 static int 1078 iwn_dma_contig_alloc(bus_dma_tag_t tag, struct iwn_dma_info *dma, void **kvap, 1079 bus_size_t size, bus_size_t alignment) 1080 { 1081 int nsegs, error; 1082 1083 dma->tag = tag; 1084 dma->size = size; 1085 1086 error = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT, 1087 &dma->map); 1088 if (error != 0) 1089 goto fail; 1090 1091 error = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs, 1092 BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_ZERO */ 1093 if (error != 0) 1094 goto fail; 1095 1096 error = bus_dmamem_map(tag, &dma->seg, 1, size, &dma->vaddr, 1097 BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_COHERENT */ 1098 if (error != 0) 1099 goto fail; 1100 1101 error = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL, 1102 BUS_DMA_NOWAIT); 1103 if (error != 0) 1104 goto fail; 1105 1106 /* XXX Presumably needed because of missing BUS_DMA_ZERO, above. */ 1107 memset(dma->vaddr, 0, size); 1108 bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE); 1109 1110 dma->paddr = dma->map->dm_segs[0].ds_addr; 1111 if (kvap != NULL) 1112 *kvap = dma->vaddr; 1113 1114 return 0; 1115 1116 fail: iwn_dma_contig_free(dma); 1117 return error; 1118 } 1119 1120 static void 1121 iwn_dma_contig_free(struct iwn_dma_info *dma) 1122 { 1123 if (dma->map != NULL) { 1124 if (dma->vaddr != NULL) { 1125 bus_dmamap_sync(dma->tag, dma->map, 0, dma->size, 1126 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1127 bus_dmamap_unload(dma->tag, dma->map); 1128 bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size); 1129 bus_dmamem_free(dma->tag, &dma->seg, 1); 1130 dma->vaddr = NULL; 1131 } 1132 bus_dmamap_destroy(dma->tag, dma->map); 1133 dma->map = NULL; 1134 } 1135 } 1136 1137 static int 1138 iwn_alloc_sched(struct iwn_softc *sc) 1139 { 1140 /* TX scheduler rings must be aligned on a 1KB boundary. */ 1141 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma, 1142 (void **)&sc->sched, sc->schedsz, 1024); 1143 } 1144 1145 static void 1146 iwn_free_sched(struct iwn_softc *sc) 1147 { 1148 iwn_dma_contig_free(&sc->sched_dma); 1149 } 1150 1151 static int 1152 iwn_alloc_kw(struct iwn_softc *sc) 1153 { 1154 /* "Keep Warm" page must be aligned on a 4KB boundary. */ 1155 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, NULL, 4096, 1156 4096); 1157 } 1158 1159 static void 1160 iwn_free_kw(struct iwn_softc *sc) 1161 { 1162 iwn_dma_contig_free(&sc->kw_dma); 1163 } 1164 1165 static int 1166 iwn_alloc_ict(struct iwn_softc *sc) 1167 { 1168 /* ICT table must be aligned on a 4KB boundary. */ 1169 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, 1170 (void **)&sc->ict, IWN_ICT_SIZE, 4096); 1171 } 1172 1173 static void 1174 iwn_free_ict(struct iwn_softc *sc) 1175 { 1176 iwn_dma_contig_free(&sc->ict_dma); 1177 } 1178 1179 static int 1180 iwn_alloc_fwmem(struct iwn_softc *sc) 1181 { 1182 /* Must be aligned on a 16-byte boundary. */ 1183 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, NULL, 1184 sc->fwsz, 16); 1185 } 1186 1187 static void 1188 iwn_free_fwmem(struct iwn_softc *sc) 1189 { 1190 iwn_dma_contig_free(&sc->fw_dma); 1191 } 1192 1193 static int 1194 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1195 { 1196 bus_size_t size; 1197 int i, error; 1198 1199 ring->cur = 0; 1200 1201 /* Allocate RX descriptors (256-byte aligned). */ 1202 size = IWN_RX_RING_COUNT * sizeof (uint32_t); 1203 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, 1204 (void **)&ring->desc, size, 256); 1205 if (error != 0) { 1206 aprint_error_dev(sc->sc_dev, 1207 "could not allocate RX ring DMA memory\n"); 1208 goto fail; 1209 } 1210 1211 /* Allocate RX status area (16-byte aligned). */ 1212 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, 1213 (void **)&ring->stat, sizeof (struct iwn_rx_status), 16); 1214 if (error != 0) { 1215 aprint_error_dev(sc->sc_dev, 1216 "could not allocate RX status DMA memory\n"); 1217 goto fail; 1218 } 1219 1220 /* 1221 * Allocate and map RX buffers. 1222 */ 1223 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1224 struct iwn_rx_data *data = &ring->data[i]; 1225 1226 error = bus_dmamap_create(sc->sc_dmat, IWN_RBUF_SIZE, 1, 1227 IWN_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1228 &data->map); 1229 if (error != 0) { 1230 aprint_error_dev(sc->sc_dev, 1231 "could not create RX buf DMA map\n"); 1232 goto fail; 1233 } 1234 1235 data->m = MCLGETIalt(sc, M_DONTWAIT, NULL, IWN_RBUF_SIZE); 1236 if (data->m == NULL) { 1237 aprint_error_dev(sc->sc_dev, 1238 "could not allocate RX mbuf\n"); 1239 error = ENOBUFS; 1240 goto fail; 1241 } 1242 1243 error = bus_dmamap_load(sc->sc_dmat, data->map, 1244 mtod(data->m, void *), IWN_RBUF_SIZE, NULL, 1245 BUS_DMA_NOWAIT | BUS_DMA_READ); 1246 if (error != 0) { 1247 aprint_error_dev(sc->sc_dev, 1248 "can't not map mbuf (error %d)\n", error); 1249 goto fail; 1250 } 1251 1252 /* Set physical address of RX buffer (256-byte aligned). */ 1253 ring->desc[i] = htole32(data->map->dm_segs[0].ds_addr >> 8); 1254 } 1255 1256 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0, size, 1257 BUS_DMASYNC_PREWRITE); 1258 1259 return 0; 1260 1261 fail: iwn_free_rx_ring(sc, ring); 1262 return error; 1263 } 1264 1265 static void 1266 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1267 { 1268 int ntries; 1269 1270 if (iwn_nic_lock(sc) == 0) { 1271 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 1272 for (ntries = 0; ntries < 1000; ntries++) { 1273 if (IWN_READ(sc, IWN_FH_RX_STATUS) & 1274 IWN_FH_RX_STATUS_IDLE) 1275 break; 1276 DELAY(10); 1277 } 1278 iwn_nic_unlock(sc); 1279 } 1280 ring->cur = 0; 1281 sc->last_rx_valid = 0; 1282 } 1283 1284 static void 1285 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1286 { 1287 int i; 1288 1289 iwn_dma_contig_free(&ring->desc_dma); 1290 iwn_dma_contig_free(&ring->stat_dma); 1291 1292 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1293 struct iwn_rx_data *data = &ring->data[i]; 1294 1295 if (data->m != NULL) { 1296 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1297 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1298 bus_dmamap_unload(sc->sc_dmat, data->map); 1299 m_freem(data->m); 1300 } 1301 if (data->map != NULL) 1302 bus_dmamap_destroy(sc->sc_dmat, data->map); 1303 } 1304 } 1305 1306 static int 1307 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid) 1308 { 1309 bus_addr_t paddr; 1310 bus_size_t size; 1311 int i, error; 1312 1313 ring->qid = qid; 1314 ring->queued = 0; 1315 ring->cur = 0; 1316 1317 /* Allocate TX descriptors (256-byte aligned). */ 1318 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc); 1319 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, 1320 (void **)&ring->desc, size, 256); 1321 if (error != 0) { 1322 aprint_error_dev(sc->sc_dev, 1323 "could not allocate TX ring DMA memory\n"); 1324 goto fail; 1325 } 1326 /* 1327 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need 1328 * to allocate commands space for other rings. 1329 * XXX Do we really need to allocate descriptors for other rings? 1330 */ 1331 if (qid > 4) 1332 return 0; 1333 1334 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd); 1335 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, 1336 (void **)&ring->cmd, size, 4); 1337 if (error != 0) { 1338 aprint_error_dev(sc->sc_dev, 1339 "could not allocate TX cmd DMA memory\n"); 1340 goto fail; 1341 } 1342 1343 paddr = ring->cmd_dma.paddr; 1344 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1345 struct iwn_tx_data *data = &ring->data[i]; 1346 1347 data->cmd_paddr = paddr; 1348 data->scratch_paddr = paddr + 12; 1349 paddr += sizeof (struct iwn_tx_cmd); 1350 1351 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1352 IWN_MAX_SCATTER - 1, MCLBYTES, 0, BUS_DMA_NOWAIT, 1353 &data->map); 1354 if (error != 0) { 1355 aprint_error_dev(sc->sc_dev, 1356 "could not create TX buf DMA map\n"); 1357 goto fail; 1358 } 1359 } 1360 return 0; 1361 1362 fail: iwn_free_tx_ring(sc, ring); 1363 return error; 1364 } 1365 1366 static void 1367 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1368 { 1369 int i; 1370 1371 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1372 struct iwn_tx_data *data = &ring->data[i]; 1373 1374 if (data->m != NULL) { 1375 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1376 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1377 bus_dmamap_unload(sc->sc_dmat, data->map); 1378 m_freem(data->m); 1379 data->m = NULL; 1380 } 1381 } 1382 /* Clear TX descriptors. */ 1383 memset(ring->desc, 0, ring->desc_dma.size); 1384 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0, 1385 ring->desc_dma.size, BUS_DMASYNC_PREWRITE); 1386 sc->qfullmsk &= ~(1 << ring->qid); 1387 ring->queued = 0; 1388 ring->cur = 0; 1389 } 1390 1391 static void 1392 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1393 { 1394 int i; 1395 1396 iwn_dma_contig_free(&ring->desc_dma); 1397 iwn_dma_contig_free(&ring->cmd_dma); 1398 1399 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1400 struct iwn_tx_data *data = &ring->data[i]; 1401 1402 if (data->m != NULL) { 1403 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1404 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1405 bus_dmamap_unload(sc->sc_dmat, data->map); 1406 m_freem(data->m); 1407 } 1408 if (data->map != NULL) 1409 bus_dmamap_destroy(sc->sc_dmat, data->map); 1410 } 1411 } 1412 1413 static void 1414 iwn5000_ict_reset(struct iwn_softc *sc) 1415 { 1416 /* Disable interrupts. */ 1417 IWN_WRITE(sc, IWN_INT_MASK, 0); 1418 1419 /* Reset ICT table. */ 1420 memset(sc->ict, 0, IWN_ICT_SIZE); 1421 sc->ict_cur = 0; 1422 1423 /* Set physical address of ICT table (4KB aligned). */ 1424 DPRINTF(("enabling ICT\n")); 1425 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE | 1426 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12); 1427 1428 /* Enable periodic RX interrupt. */ 1429 sc->int_mask |= IWN_INT_RX_PERIODIC; 1430 /* Switch to ICT interrupt mode in driver. */ 1431 sc->sc_flags |= IWN_FLAG_USE_ICT; 1432 1433 /* Re-enable interrupts. */ 1434 IWN_WRITE(sc, IWN_INT, 0xffffffff); 1435 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 1436 } 1437 1438 static int 1439 iwn_read_eeprom(struct iwn_softc *sc) 1440 { 1441 struct iwn_ops *ops = &sc->ops; 1442 struct ieee80211com *ic = &sc->sc_ic; 1443 uint16_t val; 1444 int error; 1445 1446 /* Check whether adapter has an EEPROM or an OTPROM. */ 1447 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 && 1448 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP)) 1449 sc->sc_flags |= IWN_FLAG_HAS_OTPROM; 1450 DPRINTF(("%s found\n", (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? 1451 "OTPROM" : "EEPROM")); 1452 1453 /* Adapter has to be powered on for EEPROM access to work. */ 1454 if ((error = iwn_apm_init(sc)) != 0) { 1455 aprint_error_dev(sc->sc_dev, 1456 "could not power ON adapter\n"); 1457 return error; 1458 } 1459 1460 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) { 1461 aprint_error_dev(sc->sc_dev, 1462 "bad ROM signature\n"); 1463 return EIO; 1464 } 1465 if ((error = iwn_eeprom_lock(sc)) != 0) { 1466 aprint_error_dev(sc->sc_dev, 1467 "could not lock ROM (error=%d)\n", error); 1468 return error; 1469 } 1470 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1471 if ((error = iwn_init_otprom(sc)) != 0) { 1472 aprint_error_dev(sc->sc_dev, 1473 "could not initialize OTPROM\n"); 1474 return error; 1475 } 1476 } 1477 1478 iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2); 1479 DPRINTF(("SKU capabilities=0x%04x\n", le16toh(val))); 1480 /* Check if HT support is bonded out. */ 1481 if (val & htole16(IWN_EEPROM_SKU_CAP_11N)) 1482 sc->sc_flags |= IWN_FLAG_HAS_11N; 1483 1484 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2); 1485 sc->rfcfg = le16toh(val); 1486 DPRINTF(("radio config=0x%04x\n", sc->rfcfg)); 1487 /* Read Tx/Rx chains from ROM unless it's known to be broken. */ 1488 if (sc->txchainmask == 0) 1489 sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg); 1490 if (sc->rxchainmask == 0) 1491 sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg); 1492 1493 /* Read MAC address. */ 1494 iwn_read_prom_data(sc, IWN_EEPROM_MAC, ic->ic_myaddr, 6); 1495 1496 /* Read adapter-specific information from EEPROM. */ 1497 ops->read_eeprom(sc); 1498 1499 iwn_apm_stop(sc); /* Power OFF adapter. */ 1500 1501 iwn_eeprom_unlock(sc); 1502 return 0; 1503 } 1504 1505 static void 1506 iwn4965_read_eeprom(struct iwn_softc *sc) 1507 { 1508 uint32_t addr; 1509 uint16_t val; 1510 int i; 1511 1512 /* Read regulatory domain (4 ASCII characters). */ 1513 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4); 1514 1515 /* Read the list of authorized channels (20MHz ones only). */ 1516 for (i = 0; i < 5; i++) { 1517 addr = iwn4965_regulatory_bands[i]; 1518 iwn_read_eeprom_channels(sc, i, addr); 1519 } 1520 1521 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */ 1522 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2); 1523 sc->maxpwr2GHz = val & 0xff; 1524 sc->maxpwr5GHz = val >> 8; 1525 /* Check that EEPROM values are within valid range. */ 1526 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50) 1527 sc->maxpwr5GHz = 38; 1528 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50) 1529 sc->maxpwr2GHz = 38; 1530 DPRINTF(("maxpwr 2GHz=%d 5GHz=%d\n", sc->maxpwr2GHz, sc->maxpwr5GHz)); 1531 1532 /* Read samples for each TX power group. */ 1533 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands, 1534 sizeof sc->bands); 1535 1536 /* Read voltage at which samples were taken. */ 1537 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2); 1538 sc->eeprom_voltage = (int16_t)le16toh(val); 1539 DPRINTF(("voltage=%d (in 0.3V)\n", sc->eeprom_voltage)); 1540 1541 #ifdef IWN_DEBUG 1542 /* Print samples. */ 1543 if (iwn_debug > 0) { 1544 for (i = 0; i < IWN_NBANDS; i++) 1545 iwn4965_print_power_group(sc, i); 1546 } 1547 #endif 1548 } 1549 1550 #ifdef IWN_DEBUG 1551 static void 1552 iwn4965_print_power_group(struct iwn_softc *sc, int i) 1553 { 1554 struct iwn4965_eeprom_band *band = &sc->bands[i]; 1555 struct iwn4965_eeprom_chan_samples *chans = band->chans; 1556 int j, c; 1557 1558 aprint_normal("===band %d===\n", i); 1559 aprint_normal("chan lo=%d, chan hi=%d\n", band->lo, band->hi); 1560 aprint_normal("chan1 num=%d\n", chans[0].num); 1561 for (c = 0; c < 2; c++) { 1562 for (j = 0; j < IWN_NSAMPLES; j++) { 1563 aprint_normal("chain %d, sample %d: temp=%d gain=%d " 1564 "power=%d pa_det=%d\n", c, j, 1565 chans[0].samples[c][j].temp, 1566 chans[0].samples[c][j].gain, 1567 chans[0].samples[c][j].power, 1568 chans[0].samples[c][j].pa_det); 1569 } 1570 } 1571 aprint_normal("chan2 num=%d\n", chans[1].num); 1572 for (c = 0; c < 2; c++) { 1573 for (j = 0; j < IWN_NSAMPLES; j++) { 1574 aprint_normal("chain %d, sample %d: temp=%d gain=%d " 1575 "power=%d pa_det=%d\n", c, j, 1576 chans[1].samples[c][j].temp, 1577 chans[1].samples[c][j].gain, 1578 chans[1].samples[c][j].power, 1579 chans[1].samples[c][j].pa_det); 1580 } 1581 } 1582 } 1583 #endif 1584 1585 static void 1586 iwn5000_read_eeprom(struct iwn_softc *sc) 1587 { 1588 struct iwn5000_eeprom_calib_hdr hdr; 1589 int32_t volt; 1590 uint32_t base, addr; 1591 uint16_t val; 1592 int i; 1593 1594 /* Read regulatory domain (4 ASCII characters). */ 1595 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 1596 base = le16toh(val); 1597 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN, 1598 sc->eeprom_domain, 4); 1599 1600 /* Read the list of authorized channels (20MHz ones only). */ 1601 for (i = 0; i < 5; i++) { 1602 addr = base + iwn5000_regulatory_bands[i]; 1603 iwn_read_eeprom_channels(sc, i, addr); 1604 } 1605 1606 /* Read enhanced TX power information for 6000 Series. */ 1607 if (sc->hw_type >= IWN_HW_REV_TYPE_6000) 1608 iwn_read_eeprom_enhinfo(sc); 1609 1610 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2); 1611 base = le16toh(val); 1612 iwn_read_prom_data(sc, base, &hdr, sizeof hdr); 1613 DPRINTF(("calib version=%u pa type=%u voltage=%u\n", 1614 hdr.version, hdr.pa_type, le16toh(hdr.volt))); 1615 sc->calib_ver = hdr.version; 1616 1617 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 1618 /* Compute temperature offset. */ 1619 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 1620 sc->eeprom_temp = le16toh(val); 1621 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 1622 volt = le16toh(val); 1623 sc->temp_off = sc->eeprom_temp - (volt / -5); 1624 DPRINTF(("temp=%d volt=%d offset=%dK\n", 1625 sc->eeprom_temp, volt, sc->temp_off)); 1626 } else { 1627 /* Read crystal calibration. */ 1628 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL, 1629 &sc->eeprom_crystal, sizeof (uint32_t)); 1630 DPRINTF(("crystal calibration 0x%08x\n", 1631 le32toh(sc->eeprom_crystal))); 1632 } 1633 } 1634 1635 static void 1636 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr) 1637 { 1638 struct ieee80211com *ic = &sc->sc_ic; 1639 const struct iwn_chan_band *band = &iwn_bands[n]; 1640 struct iwn_eeprom_chan channels[IWN_MAX_CHAN_PER_BAND]; 1641 uint8_t chan; 1642 int i; 1643 1644 iwn_read_prom_data(sc, addr, channels, 1645 band->nchan * sizeof (struct iwn_eeprom_chan)); 1646 1647 for (i = 0; i < band->nchan; i++) { 1648 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) 1649 continue; 1650 1651 chan = band->chan[i]; 1652 1653 if (n == 0) { /* 2GHz band */ 1654 ic->ic_channels[chan].ic_freq = 1655 ieee80211_ieee2mhz(chan, IEEE80211_CHAN_2GHZ); 1656 ic->ic_channels[chan].ic_flags = 1657 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM | 1658 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; 1659 1660 } else { /* 5GHz band */ 1661 /* 1662 * Some adapters support channels 7, 8, 11 and 12 1663 * both in the 2GHz and 4.9GHz bands. 1664 * Because of limitations in our net80211 layer, 1665 * we don't support them in the 4.9GHz band. 1666 */ 1667 if (chan <= 14) 1668 continue; 1669 1670 ic->ic_channels[chan].ic_freq = 1671 ieee80211_ieee2mhz(chan, IEEE80211_CHAN_5GHZ); 1672 ic->ic_channels[chan].ic_flags = IEEE80211_CHAN_A; 1673 /* We have at least one valid 5GHz channel. */ 1674 sc->sc_flags |= IWN_FLAG_HAS_5GHZ; 1675 } 1676 1677 /* Is active scan allowed on this channel? */ 1678 if (!(channels[i].flags & IWN_EEPROM_CHAN_ACTIVE)) { 1679 ic->ic_channels[chan].ic_flags |= 1680 IEEE80211_CHAN_PASSIVE; 1681 } 1682 1683 /* Save maximum allowed TX power for this channel. */ 1684 sc->maxpwr[chan] = channels[i].maxpwr; 1685 1686 DPRINTF(("adding chan %d flags=0x%x maxpwr=%d\n", 1687 chan, channels[i].flags, sc->maxpwr[chan])); 1688 } 1689 } 1690 1691 static void 1692 iwn_read_eeprom_enhinfo(struct iwn_softc *sc) 1693 { 1694 struct iwn_eeprom_enhinfo enhinfo[35]; 1695 uint16_t val, base; 1696 int8_t maxpwr; 1697 int i; 1698 1699 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 1700 base = le16toh(val); 1701 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO, 1702 enhinfo, sizeof enhinfo); 1703 1704 memset(sc->enh_maxpwr, 0, sizeof sc->enh_maxpwr); 1705 for (i = 0; i < __arraycount(enhinfo); i++) { 1706 if (enhinfo[i].chan == 0 || enhinfo[i].reserved != 0) 1707 continue; /* Skip invalid entries. */ 1708 1709 maxpwr = 0; 1710 if (sc->txchainmask & IWN_ANT_A) 1711 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]); 1712 if (sc->txchainmask & IWN_ANT_B) 1713 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]); 1714 if (sc->txchainmask & IWN_ANT_C) 1715 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]); 1716 if (sc->ntxchains == 2) 1717 maxpwr = MAX(maxpwr, enhinfo[i].mimo2); 1718 else if (sc->ntxchains == 3) 1719 maxpwr = MAX(maxpwr, enhinfo[i].mimo3); 1720 maxpwr /= 2; /* Convert half-dBm to dBm. */ 1721 1722 DPRINTF(("enhinfo %d, maxpwr=%d\n", i, maxpwr)); 1723 sc->enh_maxpwr[i] = maxpwr; 1724 } 1725 } 1726 1727 static struct ieee80211_node * 1728 iwn_node_alloc(struct ieee80211_node_table *ic __unused) 1729 { 1730 return malloc(sizeof (struct iwn_node), M_80211_NODE, M_NOWAIT | M_ZERO); 1731 } 1732 1733 static void 1734 iwn_newassoc(struct ieee80211_node *ni, int isnew) 1735 { 1736 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 1737 struct iwn_node *wn = (void *)ni; 1738 uint8_t rate; 1739 int ridx, i; 1740 1741 ieee80211_amrr_node_init(&sc->amrr, &wn->amn); 1742 /* Start at lowest available bit-rate, AMRR will raise. */ 1743 ni->ni_txrate = 0; 1744 1745 for (i = 0; i < ni->ni_rates.rs_nrates; i++) { 1746 rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL; 1747 /* Map 802.11 rate to HW rate index. */ 1748 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) 1749 if (iwn_rates[ridx].rate == rate) 1750 break; 1751 wn->ridx[i] = ridx; 1752 } 1753 } 1754 1755 static int 1756 iwn_media_change(struct ifnet *ifp) 1757 { 1758 struct iwn_softc *sc = ifp->if_softc; 1759 struct ieee80211com *ic = &sc->sc_ic; 1760 uint8_t rate, ridx; 1761 int error; 1762 1763 error = ieee80211_media_change(ifp); 1764 if (error != ENETRESET) 1765 return error; 1766 1767 if (ic->ic_fixed_rate != -1) { 1768 rate = ic->ic_sup_rates[ic->ic_curmode]. 1769 rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL; 1770 /* Map 802.11 rate to HW rate index. */ 1771 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) 1772 if (iwn_rates[ridx].rate == rate) 1773 break; 1774 sc->fixed_ridx = ridx; 1775 } 1776 1777 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 1778 (IFF_UP | IFF_RUNNING)) { 1779 iwn_stop(ifp, 0); 1780 error = iwn_init(ifp); 1781 } 1782 return error; 1783 } 1784 1785 static int 1786 iwn_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg) 1787 { 1788 struct ifnet *ifp = ic->ic_ifp; 1789 struct iwn_softc *sc = ifp->if_softc; 1790 int error; 1791 1792 callout_stop(&sc->calib_to); 1793 1794 switch (nstate) { 1795 case IEEE80211_S_SCAN: 1796 /* XXX Do not abort a running scan. */ 1797 if (sc->sc_flags & IWN_FLAG_SCANNING) { 1798 if (ic->ic_state != nstate) 1799 aprint_error_dev(sc->sc_dev, "scan request(%d) " 1800 "while scanning(%d) ignored\n", nstate, 1801 ic->ic_state); 1802 break; 1803 } 1804 1805 /* XXX Not sure if call and flags are needed. */ 1806 ieee80211_node_table_reset(&ic->ic_scan); 1807 ic->ic_flags |= IEEE80211_F_SCAN | IEEE80211_F_ASCAN; 1808 sc->sc_flags |= IWN_FLAG_SCANNING; 1809 1810 /* Make the link LED blink while we're scanning. */ 1811 iwn_set_led(sc, IWN_LED_LINK, 10, 10); 1812 1813 if ((error = iwn_scan(sc, IEEE80211_CHAN_2GHZ)) != 0) { 1814 aprint_error_dev(sc->sc_dev, 1815 "could not initiate scan\n"); 1816 return error; 1817 } 1818 ic->ic_state = nstate; 1819 return 0; 1820 1821 case IEEE80211_S_ASSOC: 1822 if (ic->ic_state != IEEE80211_S_RUN) 1823 break; 1824 /* FALLTHROUGH */ 1825 case IEEE80211_S_AUTH: 1826 /* Reset state to handle reassociations correctly. */ 1827 sc->rxon.associd = 0; 1828 sc->rxon.filter &= ~htole32(IWN_FILTER_BSS); 1829 sc->calib.state = IWN_CALIB_STATE_INIT; 1830 1831 if ((error = iwn_auth(sc)) != 0) { 1832 aprint_error_dev(sc->sc_dev, 1833 "could not move to auth state\n"); 1834 return error; 1835 } 1836 break; 1837 1838 case IEEE80211_S_RUN: 1839 if ((error = iwn_run(sc)) != 0) { 1840 aprint_error_dev(sc->sc_dev, 1841 "could not move to run state\n"); 1842 return error; 1843 } 1844 break; 1845 1846 case IEEE80211_S_INIT: 1847 sc->sc_flags &= ~IWN_FLAG_SCANNING; 1848 sc->calib.state = IWN_CALIB_STATE_INIT; 1849 break; 1850 } 1851 1852 return sc->sc_newstate(ic, nstate, arg); 1853 } 1854 1855 static void 1856 iwn_iter_func(void *arg, struct ieee80211_node *ni) 1857 { 1858 struct iwn_softc *sc = arg; 1859 struct iwn_node *wn = (struct iwn_node *)ni; 1860 1861 ieee80211_amrr_choose(&sc->amrr, ni, &wn->amn); 1862 } 1863 1864 static void 1865 iwn_calib_timeout(void *arg) 1866 { 1867 struct iwn_softc *sc = arg; 1868 struct ieee80211com *ic = &sc->sc_ic; 1869 int s; 1870 1871 s = splnet(); 1872 if (ic->ic_fixed_rate == -1) { 1873 if (ic->ic_opmode == IEEE80211_M_STA) 1874 iwn_iter_func(sc, ic->ic_bss); 1875 else 1876 ieee80211_iterate_nodes(&ic->ic_sta, iwn_iter_func, sc); 1877 } 1878 /* Force automatic TX power calibration every 60 secs. */ 1879 if (++sc->calib_cnt >= 120) { 1880 uint32_t flags = 0; 1881 1882 DPRINTF(("sending request for statistics\n")); 1883 (void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, 1884 sizeof flags, 1); 1885 sc->calib_cnt = 0; 1886 } 1887 splx(s); 1888 1889 /* Automatic rate control triggered every 500ms. */ 1890 callout_schedule(&sc->calib_to, hz/2); 1891 } 1892 1893 /* 1894 * Process an RX_PHY firmware notification. This is usually immediately 1895 * followed by an MPDU_RX_DONE notification. 1896 */ 1897 static void 1898 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc, 1899 struct iwn_rx_data *data) 1900 { 1901 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1); 1902 1903 DPRINTFN(2, ("received PHY stats\n")); 1904 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 1905 sizeof (*stat), BUS_DMASYNC_POSTREAD); 1906 1907 /* Save RX statistics, they will be used on MPDU_RX_DONE. */ 1908 memcpy(&sc->last_rx_stat, stat, sizeof (*stat)); 1909 sc->last_rx_valid = 1; 1910 } 1911 1912 /* 1913 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification. 1914 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one. 1915 */ 1916 static void 1917 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 1918 struct iwn_rx_data *data) 1919 { 1920 struct iwn_ops *ops = &sc->ops; 1921 struct ieee80211com *ic = &sc->sc_ic; 1922 struct ifnet *ifp = ic->ic_ifp; 1923 struct iwn_rx_ring *ring = &sc->rxq; 1924 struct ieee80211_frame *wh; 1925 struct ieee80211_node *ni; 1926 struct mbuf *m, *m1; 1927 struct iwn_rx_stat *stat; 1928 char *head; 1929 uint32_t flags; 1930 int error, len, rssi; 1931 1932 if (desc->type == IWN_MPDU_RX_DONE) { 1933 /* Check for prior RX_PHY notification. */ 1934 if (!sc->last_rx_valid) { 1935 DPRINTF(("missing RX_PHY\n")); 1936 return; 1937 } 1938 sc->last_rx_valid = 0; 1939 stat = &sc->last_rx_stat; 1940 } else 1941 stat = (struct iwn_rx_stat *)(desc + 1); 1942 1943 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWN_RBUF_SIZE, 1944 BUS_DMASYNC_POSTREAD); 1945 1946 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) { 1947 aprint_error_dev(sc->sc_dev, 1948 "invalid RX statistic header\n"); 1949 return; 1950 } 1951 if (desc->type == IWN_MPDU_RX_DONE) { 1952 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1); 1953 head = (char *)(mpdu + 1); 1954 len = le16toh(mpdu->len); 1955 } else { 1956 head = (char *)(stat + 1) + stat->cfg_phy_len; 1957 len = le16toh(stat->len); 1958 } 1959 1960 flags = le32toh(*(uint32_t *)(head + len)); 1961 1962 /* Discard frames with a bad FCS early. */ 1963 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) { 1964 DPRINTFN(2, ("RX flags error %x\n", flags)); 1965 ifp->if_ierrors++; 1966 return; 1967 } 1968 /* Discard frames that are too short. */ 1969 if (len < sizeof (*wh)) { 1970 DPRINTF(("frame too short: %d\n", len)); 1971 ic->ic_stats.is_rx_tooshort++; 1972 ifp->if_ierrors++; 1973 return; 1974 } 1975 1976 m1 = MCLGETIalt(sc, M_DONTWAIT, NULL, IWN_RBUF_SIZE); 1977 if (m1 == NULL) { 1978 ic->ic_stats.is_rx_nobuf++; 1979 ifp->if_ierrors++; 1980 return; 1981 } 1982 bus_dmamap_unload(sc->sc_dmat, data->map); 1983 1984 error = bus_dmamap_load(sc->sc_dmat, data->map, mtod(m1, void *), 1985 IWN_RBUF_SIZE, NULL, BUS_DMA_NOWAIT | BUS_DMA_READ); 1986 if (error != 0) { 1987 m_freem(m1); 1988 1989 /* Try to reload the old mbuf. */ 1990 error = bus_dmamap_load(sc->sc_dmat, data->map, 1991 mtod(data->m, void *), IWN_RBUF_SIZE, NULL, 1992 BUS_DMA_NOWAIT | BUS_DMA_READ); 1993 if (error != 0) { 1994 panic("%s: could not load old RX mbuf", 1995 device_xname(sc->sc_dev)); 1996 } 1997 /* Physical address may have changed. */ 1998 ring->desc[ring->cur] = 1999 htole32(data->map->dm_segs[0].ds_addr >> 8); 2000 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 2001 ring->cur * sizeof (uint32_t), sizeof (uint32_t), 2002 BUS_DMASYNC_PREWRITE); 2003 ifp->if_ierrors++; 2004 return; 2005 } 2006 2007 m = data->m; 2008 data->m = m1; 2009 /* Update RX descriptor. */ 2010 ring->desc[ring->cur] = htole32(data->map->dm_segs[0].ds_addr >> 8); 2011 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 2012 ring->cur * sizeof (uint32_t), sizeof (uint32_t), 2013 BUS_DMASYNC_PREWRITE); 2014 2015 /* Finalize mbuf. */ 2016 m->m_pkthdr.rcvif = ifp; 2017 m->m_data = head; 2018 m->m_pkthdr.len = m->m_len = len; 2019 2020 /* Grab a reference to the source node. */ 2021 wh = mtod(m, struct ieee80211_frame *); 2022 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 2023 2024 /* XXX OpenBSD adds decryption here (see also comments in iwn_tx). */ 2025 /* NetBSD does decryption in ieee80211_input. */ 2026 2027 rssi = ops->get_rssi(stat); 2028 2029 /* XXX Added for NetBSD: scans never stop without it */ 2030 if (ic->ic_state == IEEE80211_S_SCAN) 2031 iwn_fix_channel(ic, m); 2032 2033 if (sc->sc_drvbpf != NULL) { 2034 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap; 2035 2036 tap->wr_flags = 0; 2037 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE)) 2038 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2039 tap->wr_chan_freq = 2040 htole16(ic->ic_channels[stat->chan].ic_freq); 2041 tap->wr_chan_flags = 2042 htole16(ic->ic_channels[stat->chan].ic_flags); 2043 tap->wr_dbm_antsignal = (int8_t)rssi; 2044 tap->wr_dbm_antnoise = (int8_t)sc->noise; 2045 tap->wr_tsft = stat->tstamp; 2046 switch (stat->rate) { 2047 /* CCK rates. */ 2048 case 10: tap->wr_rate = 2; break; 2049 case 20: tap->wr_rate = 4; break; 2050 case 55: tap->wr_rate = 11; break; 2051 case 110: tap->wr_rate = 22; break; 2052 /* OFDM rates. */ 2053 case 0xd: tap->wr_rate = 12; break; 2054 case 0xf: tap->wr_rate = 18; break; 2055 case 0x5: tap->wr_rate = 24; break; 2056 case 0x7: tap->wr_rate = 36; break; 2057 case 0x9: tap->wr_rate = 48; break; 2058 case 0xb: tap->wr_rate = 72; break; 2059 case 0x1: tap->wr_rate = 96; break; 2060 case 0x3: tap->wr_rate = 108; break; 2061 /* Unknown rate: should not happen. */ 2062 default: tap->wr_rate = 0; 2063 } 2064 2065 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m); 2066 } 2067 2068 /* Send the frame to the 802.11 layer. */ 2069 ieee80211_input(ic, m, ni, rssi, 0); 2070 2071 /* Node is no longer needed. */ 2072 ieee80211_free_node(ni); 2073 } 2074 2075 #ifndef IEEE80211_NO_HT 2076 /* Process an incoming Compressed BlockAck. */ 2077 static void 2078 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2079 struct iwn_rx_data *data) 2080 { 2081 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1); 2082 struct iwn_tx_ring *txq; 2083 2084 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), sizeof (*ba), 2085 BUS_DMASYNC_POSTREAD); 2086 2087 txq = &sc->txq[le16toh(ba->qid)]; 2088 /* XXX TBD */ 2089 } 2090 #endif 2091 2092 /* 2093 * Process a CALIBRATION_RESULT notification sent by the initialization 2094 * firmware on response to a CMD_CALIB_CONFIG command (5000 only). 2095 */ 2096 static void 2097 iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2098 struct iwn_rx_data *data) 2099 { 2100 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1); 2101 int len, idx = -1; 2102 2103 /* Runtime firmware should not send such a notification. */ 2104 if (sc->sc_flags & IWN_FLAG_CALIB_DONE) 2105 return; 2106 2107 len = (le32toh(desc->len) & 0x3fff) - 4; 2108 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), len, 2109 BUS_DMASYNC_POSTREAD); 2110 2111 switch (calib->code) { 2112 case IWN5000_PHY_CALIB_DC: 2113 if (sc->hw_type == IWN_HW_REV_TYPE_5150) 2114 idx = 0; 2115 break; 2116 case IWN5000_PHY_CALIB_LO: 2117 idx = 1; 2118 break; 2119 case IWN5000_PHY_CALIB_TX_IQ: 2120 idx = 2; 2121 break; 2122 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC: 2123 if (sc->hw_type < IWN_HW_REV_TYPE_6000 && 2124 sc->hw_type != IWN_HW_REV_TYPE_5150) 2125 idx = 3; 2126 break; 2127 case IWN5000_PHY_CALIB_BASE_BAND: 2128 idx = 4; 2129 break; 2130 } 2131 if (idx == -1) /* Ignore other results. */ 2132 return; 2133 2134 /* Save calibration result. */ 2135 if (sc->calibcmd[idx].buf != NULL) 2136 free(sc->calibcmd[idx].buf, M_DEVBUF); 2137 sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT); 2138 if (sc->calibcmd[idx].buf == NULL) { 2139 DPRINTF(("not enough memory for calibration result %d\n", 2140 calib->code)); 2141 return; 2142 } 2143 DPRINTF(("saving calibration result code=%d len=%d\n", 2144 calib->code, len)); 2145 sc->calibcmd[idx].len = len; 2146 memcpy(sc->calibcmd[idx].buf, calib, len); 2147 } 2148 2149 /* 2150 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification. 2151 * The latter is sent by the firmware after each received beacon. 2152 */ 2153 static void 2154 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2155 struct iwn_rx_data *data) 2156 { 2157 struct iwn_ops *ops = &sc->ops; 2158 struct ieee80211com *ic = &sc->sc_ic; 2159 struct iwn_calib_state *calib = &sc->calib; 2160 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1); 2161 int temp; 2162 2163 /* Ignore statistics received during a scan. */ 2164 if (ic->ic_state != IEEE80211_S_RUN) 2165 return; 2166 2167 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2168 sizeof (*stats), BUS_DMASYNC_POSTREAD); 2169 2170 DPRINTFN(3, ("received statistics (cmd=%d)\n", desc->type)); 2171 sc->calib_cnt = 0; /* Reset TX power calibration timeout. */ 2172 2173 /* Test if temperature has changed. */ 2174 if (stats->general.temp != sc->rawtemp) { 2175 /* Convert "raw" temperature to degC. */ 2176 sc->rawtemp = stats->general.temp; 2177 temp = ops->get_temperature(sc); 2178 DPRINTFN(2, ("temperature=%dC\n", temp)); 2179 2180 /* Update TX power if need be (4965AGN only). */ 2181 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 2182 iwn4965_power_calibration(sc, temp); 2183 } 2184 2185 if (desc->type != IWN_BEACON_STATISTICS) 2186 return; /* Reply to a statistics request. */ 2187 2188 sc->noise = iwn_get_noise(&stats->rx.general); 2189 2190 /* Test that RSSI and noise are present in stats report. */ 2191 if (le32toh(stats->rx.general.flags) != 1) { 2192 DPRINTF(("received statistics without RSSI\n")); 2193 return; 2194 } 2195 2196 /* 2197 * XXX Differential gain calibration makes the 6005 firmware 2198 * crap out, so skip it for now. This effectively disables 2199 * sensitivity tuning as well. 2200 */ 2201 if (sc->hw_type == IWN_HW_REV_TYPE_6005) 2202 return; 2203 2204 if (calib->state == IWN_CALIB_STATE_ASSOC) 2205 iwn_collect_noise(sc, &stats->rx.general); 2206 else if (calib->state == IWN_CALIB_STATE_RUN) 2207 iwn_tune_sensitivity(sc, &stats->rx); 2208 } 2209 2210 /* 2211 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN 2212 * and 5000 adapters have different incompatible TX status formats. 2213 */ 2214 static void 2215 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2216 struct iwn_rx_data *data) 2217 { 2218 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1); 2219 2220 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2221 sizeof (*stat), BUS_DMASYNC_POSTREAD); 2222 iwn_tx_done(sc, desc, stat->ackfailcnt, le32toh(stat->status) & 0xff); 2223 } 2224 2225 static void 2226 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2227 struct iwn_rx_data *data) 2228 { 2229 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1); 2230 2231 #ifdef notyet 2232 /* Reset TX scheduler slot. */ 2233 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx); 2234 #endif 2235 2236 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2237 sizeof (*stat), BUS_DMASYNC_POSTREAD); 2238 iwn_tx_done(sc, desc, stat->ackfailcnt, le16toh(stat->status) & 0xff); 2239 } 2240 2241 /* 2242 * Adapter-independent backend for TX_DONE firmware notifications. 2243 */ 2244 static void 2245 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt, 2246 uint8_t status) 2247 { 2248 struct ieee80211com *ic = &sc->sc_ic; 2249 struct ifnet *ifp = ic->ic_ifp; 2250 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf]; 2251 struct iwn_tx_data *data = &ring->data[desc->idx]; 2252 struct iwn_node *wn = (struct iwn_node *)data->ni; 2253 2254 /* Update rate control statistics. */ 2255 wn->amn.amn_txcnt++; 2256 if (ackfailcnt > 0) 2257 wn->amn.amn_retrycnt++; 2258 2259 if (status != 1 && status != 2) 2260 ifp->if_oerrors++; 2261 else 2262 ifp->if_opackets++; 2263 2264 /* Unmap and free mbuf. */ 2265 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize, 2266 BUS_DMASYNC_POSTWRITE); 2267 bus_dmamap_unload(sc->sc_dmat, data->map); 2268 m_freem(data->m); 2269 data->m = NULL; 2270 ieee80211_free_node(data->ni); 2271 data->ni = NULL; 2272 2273 sc->sc_tx_timer = 0; 2274 if (--ring->queued < IWN_TX_RING_LOMARK) { 2275 sc->qfullmsk &= ~(1 << ring->qid); 2276 if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) { 2277 ifp->if_flags &= ~IFF_OACTIVE; 2278 (*ifp->if_start)(ifp); 2279 } 2280 } 2281 } 2282 2283 /* 2284 * Process a "command done" firmware notification. This is where we wakeup 2285 * processes waiting for a synchronous command completion. 2286 */ 2287 static void 2288 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc) 2289 { 2290 struct iwn_tx_ring *ring = &sc->txq[4]; 2291 struct iwn_tx_data *data; 2292 2293 if ((desc->qid & 0xf) != 4) 2294 return; /* Not a command ack. */ 2295 2296 data = &ring->data[desc->idx]; 2297 2298 /* If the command was mapped in an mbuf, free it. */ 2299 if (data->m != NULL) { 2300 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 2301 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2302 bus_dmamap_unload(sc->sc_dmat, data->map); 2303 m_freem(data->m); 2304 data->m = NULL; 2305 } 2306 wakeup(&ring->desc[desc->idx]); 2307 } 2308 2309 /* 2310 * Process an INT_FH_RX or INT_SW_RX interrupt. 2311 */ 2312 static void 2313 iwn_notif_intr(struct iwn_softc *sc) 2314 { 2315 struct iwn_ops *ops = &sc->ops; 2316 struct ieee80211com *ic = &sc->sc_ic; 2317 struct ifnet *ifp = ic->ic_ifp; 2318 uint16_t hw; 2319 2320 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map, 2321 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD); 2322 2323 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff; 2324 while (sc->rxq.cur != hw) { 2325 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 2326 struct iwn_rx_desc *desc; 2327 2328 bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof (*desc), 2329 BUS_DMASYNC_POSTREAD); 2330 desc = mtod(data->m, struct iwn_rx_desc *); 2331 2332 DPRINTFN(4, ("notification qid=%d idx=%d flags=%x type=%d\n", 2333 desc->qid & 0xf, desc->idx, desc->flags, desc->type)); 2334 2335 if (!(desc->qid & 0x80)) /* Reply to a command. */ 2336 iwn_cmd_done(sc, desc); 2337 2338 switch (desc->type) { 2339 case IWN_RX_PHY: 2340 iwn_rx_phy(sc, desc, data); 2341 break; 2342 2343 case IWN_RX_DONE: /* 4965AGN only. */ 2344 case IWN_MPDU_RX_DONE: 2345 /* An 802.11 frame has been received. */ 2346 iwn_rx_done(sc, desc, data); 2347 break; 2348 #ifndef IEEE80211_NO_HT 2349 case IWN_RX_COMPRESSED_BA: 2350 /* A Compressed BlockAck has been received. */ 2351 iwn_rx_compressed_ba(sc, desc, data); 2352 break; 2353 #endif 2354 case IWN_TX_DONE: 2355 /* An 802.11 frame has been transmitted. */ 2356 ops->tx_done(sc, desc, data); 2357 break; 2358 2359 case IWN_RX_STATISTICS: 2360 case IWN_BEACON_STATISTICS: 2361 iwn_rx_statistics(sc, desc, data); 2362 break; 2363 2364 case IWN_BEACON_MISSED: 2365 { 2366 struct iwn_beacon_missed *miss = 2367 (struct iwn_beacon_missed *)(desc + 1); 2368 2369 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2370 sizeof (*miss), BUS_DMASYNC_POSTREAD); 2371 /* 2372 * If more than 5 consecutive beacons are missed, 2373 * reinitialize the sensitivity state machine. 2374 */ 2375 DPRINTF(("beacons missed %d/%d\n", 2376 le32toh(miss->consecutive), le32toh(miss->total))); 2377 if (ic->ic_state == IEEE80211_S_RUN && 2378 le32toh(miss->consecutive) > 5) 2379 (void)iwn_init_sensitivity(sc); 2380 break; 2381 } 2382 case IWN_UC_READY: 2383 { 2384 struct iwn_ucode_info *uc = 2385 (struct iwn_ucode_info *)(desc + 1); 2386 2387 /* The microcontroller is ready. */ 2388 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2389 sizeof (*uc), BUS_DMASYNC_POSTREAD); 2390 DPRINTF(("microcode alive notification version=%d.%d " 2391 "subtype=%x alive=%x\n", uc->major, uc->minor, 2392 uc->subtype, le32toh(uc->valid))); 2393 2394 if (le32toh(uc->valid) != 1) { 2395 aprint_error_dev(sc->sc_dev, 2396 "microcontroller initialization " 2397 "failed\n"); 2398 break; 2399 } 2400 if (uc->subtype == IWN_UCODE_INIT) { 2401 /* Save microcontroller report. */ 2402 memcpy(&sc->ucode_info, uc, sizeof (*uc)); 2403 } 2404 /* Save the address of the error log in SRAM. */ 2405 sc->errptr = le32toh(uc->errptr); 2406 break; 2407 } 2408 case IWN_STATE_CHANGED: 2409 { 2410 uint32_t *status = (uint32_t *)(desc + 1); 2411 2412 /* Enabled/disabled notification. */ 2413 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2414 sizeof (*status), BUS_DMASYNC_POSTREAD); 2415 DPRINTF(("state changed to %x\n", le32toh(*status))); 2416 2417 if (le32toh(*status) & 1) { 2418 /* The radio button has to be pushed. */ 2419 aprint_error_dev(sc->sc_dev, 2420 "Radio transmitter is off\n"); 2421 /* Turn the interface down. */ 2422 ifp->if_flags &= ~IFF_UP; 2423 iwn_stop(ifp, 1); 2424 return; /* No further processing. */ 2425 } 2426 break; 2427 } 2428 case IWN_START_SCAN: 2429 { 2430 struct iwn_start_scan *scan = 2431 (struct iwn_start_scan *)(desc + 1); 2432 2433 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2434 sizeof (*scan), BUS_DMASYNC_POSTREAD); 2435 DPRINTFN(2, ("scanning channel %d status %x\n", 2436 scan->chan, le32toh(scan->status))); 2437 2438 /* Fix current channel. */ 2439 ic->ic_bss->ni_chan = &ic->ic_channels[scan->chan]; 2440 break; 2441 } 2442 case IWN_STOP_SCAN: 2443 { 2444 struct iwn_stop_scan *scan = 2445 (struct iwn_stop_scan *)(desc + 1); 2446 2447 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2448 sizeof (*scan), BUS_DMASYNC_POSTREAD); 2449 DPRINTF(("scan finished nchan=%d status=%d chan=%d\n", 2450 scan->nchan, scan->status, scan->chan)); 2451 2452 if (scan->status == 1 && scan->chan <= 14 && 2453 (sc->sc_flags & IWN_FLAG_HAS_5GHZ)) { 2454 /* 2455 * We just finished scanning 2GHz channels, 2456 * start scanning 5GHz ones. 2457 */ 2458 if (iwn_scan(sc, IEEE80211_CHAN_5GHZ) == 0) 2459 break; 2460 } 2461 sc->sc_flags &= ~IWN_FLAG_SCANNING; 2462 ieee80211_end_scan(ic); 2463 break; 2464 } 2465 case IWN5000_CALIBRATION_RESULT: 2466 iwn5000_rx_calib_results(sc, desc, data); 2467 break; 2468 2469 case IWN5000_CALIBRATION_DONE: 2470 sc->sc_flags |= IWN_FLAG_CALIB_DONE; 2471 wakeup(sc); 2472 break; 2473 } 2474 2475 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT; 2476 } 2477 2478 /* Tell the firmware what we have processed. */ 2479 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1; 2480 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7); 2481 } 2482 2483 /* 2484 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 2485 * from power-down sleep mode. 2486 */ 2487 static void 2488 iwn_wakeup_intr(struct iwn_softc *sc) 2489 { 2490 int qid; 2491 2492 DPRINTF(("ucode wakeup from power-down sleep\n")); 2493 2494 /* Wakeup RX and TX rings. */ 2495 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7); 2496 for (qid = 0; qid < sc->ntxqs; qid++) { 2497 struct iwn_tx_ring *ring = &sc->txq[qid]; 2498 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur); 2499 } 2500 } 2501 2502 /* 2503 * Dump the error log of the firmware when a firmware panic occurs. Although 2504 * we can't debug the firmware because it is neither open source nor free, it 2505 * can help us to identify certain classes of problems. 2506 */ 2507 static void 2508 iwn_fatal_intr(struct iwn_softc *sc) 2509 { 2510 struct iwn_fw_dump dump; 2511 int i; 2512 2513 /* Force a complete recalibration on next init. */ 2514 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE; 2515 2516 /* Check that the error log address is valid. */ 2517 if (sc->errptr < IWN_FW_DATA_BASE || 2518 sc->errptr + sizeof (dump) > 2519 IWN_FW_DATA_BASE + sc->fw_data_maxsz) { 2520 aprint_error_dev(sc->sc_dev, 2521 "bad firmware error log address 0x%08x\n", sc->errptr); 2522 return; 2523 } 2524 if (iwn_nic_lock(sc) != 0) { 2525 aprint_error_dev(sc->sc_dev, 2526 "could not read firmware error log\n"); 2527 return; 2528 } 2529 /* Read firmware error log from SRAM. */ 2530 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump, 2531 sizeof (dump) / sizeof (uint32_t)); 2532 iwn_nic_unlock(sc); 2533 2534 if (dump.valid == 0) { 2535 aprint_error_dev(sc->sc_dev, 2536 "firmware error log is empty\n"); 2537 return; 2538 } 2539 aprint_error("firmware error log:\n"); 2540 aprint_error(" error type = \"%s\" (0x%08X)\n", 2541 (dump.id < __arraycount(iwn_fw_errmsg)) ? 2542 iwn_fw_errmsg[dump.id] : "UNKNOWN", 2543 dump.id); 2544 aprint_error(" program counter = 0x%08X\n", dump.pc); 2545 aprint_error(" source line = 0x%08X\n", dump.src_line); 2546 aprint_error(" error data = 0x%08X%08X\n", 2547 dump.error_data[0], dump.error_data[1]); 2548 aprint_error(" branch link = 0x%08X%08X\n", 2549 dump.branch_link[0], dump.branch_link[1]); 2550 aprint_error(" interrupt link = 0x%08X%08X\n", 2551 dump.interrupt_link[0], dump.interrupt_link[1]); 2552 aprint_error(" time = %u\n", dump.time[0]); 2553 2554 /* Dump driver status (TX and RX rings) while we're here. */ 2555 aprint_error("driver status:\n"); 2556 for (i = 0; i < sc->ntxqs; i++) { 2557 struct iwn_tx_ring *ring = &sc->txq[i]; 2558 aprint_error(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 2559 i, ring->qid, ring->cur, ring->queued); 2560 } 2561 aprint_error(" rx ring: cur=%d\n", sc->rxq.cur); 2562 aprint_error(" 802.11 state %d\n", sc->sc_ic.ic_state); 2563 } 2564 2565 static int 2566 iwn_intr(void *arg) 2567 { 2568 struct iwn_softc *sc = arg; 2569 struct ifnet *ifp = sc->sc_ic.ic_ifp; 2570 uint32_t r1, r2, tmp; 2571 2572 /* Disable interrupts. */ 2573 IWN_WRITE(sc, IWN_INT_MASK, 0); 2574 2575 /* Read interrupts from ICT (fast) or from registers (slow). */ 2576 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 2577 tmp = 0; 2578 while (sc->ict[sc->ict_cur] != 0) { 2579 tmp |= sc->ict[sc->ict_cur]; 2580 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */ 2581 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT; 2582 } 2583 tmp = le32toh(tmp); 2584 if (tmp == 0xffffffff) /* Shouldn't happen. */ 2585 tmp = 0; 2586 else if (tmp & 0xc0000) /* Workaround a HW bug. */ 2587 tmp |= 0x8000; 2588 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff); 2589 r2 = 0; /* Unused. */ 2590 } else { 2591 r1 = IWN_READ(sc, IWN_INT); 2592 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 2593 return 0; /* Hardware gone! */ 2594 r2 = IWN_READ(sc, IWN_FH_INT); 2595 } 2596 if (r1 == 0 && r2 == 0) { 2597 if (ifp->if_flags & IFF_UP) 2598 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 2599 return 0; /* Interrupt not for us. */ 2600 } 2601 2602 /* Acknowledge interrupts. */ 2603 IWN_WRITE(sc, IWN_INT, r1); 2604 if (!(sc->sc_flags & IWN_FLAG_USE_ICT)) 2605 IWN_WRITE(sc, IWN_FH_INT, r2); 2606 2607 if (r1 & IWN_INT_RF_TOGGLED) { 2608 tmp = IWN_READ(sc, IWN_GP_CNTRL); 2609 aprint_error_dev(sc->sc_dev, 2610 "RF switch: radio %s\n", 2611 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled"); 2612 } 2613 if (r1 & IWN_INT_CT_REACHED) { 2614 aprint_error_dev(sc->sc_dev, 2615 "critical temperature reached!\n"); 2616 } 2617 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) { 2618 aprint_error_dev(sc->sc_dev, 2619 "fatal firmware error\n"); 2620 /* Dump firmware error log and stop. */ 2621 iwn_fatal_intr(sc); 2622 ifp->if_flags &= ~IFF_UP; 2623 iwn_stop(ifp, 1); 2624 return 1; 2625 } 2626 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) || 2627 (r2 & IWN_FH_INT_RX)) { 2628 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 2629 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) 2630 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX); 2631 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 2632 IWN_INT_PERIODIC_DIS); 2633 iwn_notif_intr(sc); 2634 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) { 2635 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 2636 IWN_INT_PERIODIC_ENA); 2637 } 2638 } else 2639 iwn_notif_intr(sc); 2640 } 2641 2642 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) { 2643 if (sc->sc_flags & IWN_FLAG_USE_ICT) 2644 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX); 2645 wakeup(sc); /* FH DMA transfer completed. */ 2646 } 2647 2648 if (r1 & IWN_INT_ALIVE) 2649 wakeup(sc); /* Firmware is alive. */ 2650 2651 if (r1 & IWN_INT_WAKEUP) 2652 iwn_wakeup_intr(sc); 2653 2654 /* Re-enable interrupts. */ 2655 if (ifp->if_flags & IFF_UP) 2656 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 2657 2658 return 1; 2659 } 2660 2661 /* 2662 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and 2663 * 5000 adapters use a slightly different format). 2664 */ 2665 static void 2666 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 2667 uint16_t len) 2668 { 2669 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx]; 2670 2671 *w = htole16(len + 8); 2672 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2673 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr, 2674 sizeof (uint16_t), 2675 BUS_DMASYNC_PREWRITE); 2676 if (idx < IWN_SCHED_WINSZ) { 2677 *(w + IWN_TX_RING_COUNT) = *w; 2678 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2679 (char *)(void *)(w + IWN_TX_RING_COUNT) - 2680 (char *)(void *)sc->sched_dma.vaddr, 2681 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 2682 } 2683 } 2684 2685 static void 2686 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 2687 uint16_t len) 2688 { 2689 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 2690 2691 *w = htole16(id << 12 | (len + 8)); 2692 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2693 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr, 2694 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 2695 if (idx < IWN_SCHED_WINSZ) { 2696 *(w + IWN_TX_RING_COUNT) = *w; 2697 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2698 (char *)(void *)(w + IWN_TX_RING_COUNT) - 2699 (char *)(void *)sc->sched_dma.vaddr, 2700 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 2701 } 2702 } 2703 2704 #ifdef notyet 2705 static void 2706 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx) 2707 { 2708 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 2709 2710 *w = (*w & htole16(0xf000)) | htole16(1); 2711 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2712 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr, 2713 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 2714 if (idx < IWN_SCHED_WINSZ) { 2715 *(w + IWN_TX_RING_COUNT) = *w; 2716 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2717 (char *)(void *)(w + IWN_TX_RING_COUNT) - 2718 (char *)(void *)sc->sched_dma.vaddr, 2719 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 2720 } 2721 } 2722 #endif 2723 2724 static int 2725 iwn_tx(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac) 2726 { 2727 struct ieee80211com *ic = &sc->sc_ic; 2728 struct iwn_node *wn = (void *)ni; 2729 struct iwn_tx_ring *ring; 2730 struct iwn_tx_desc *desc; 2731 struct iwn_tx_data *data; 2732 struct iwn_tx_cmd *cmd; 2733 struct iwn_cmd_data *tx; 2734 const struct iwn_rate *rinfo; 2735 struct ieee80211_frame *wh; 2736 struct ieee80211_key *k = NULL; 2737 struct mbuf *m1; 2738 uint32_t flags; 2739 u_int hdrlen; 2740 bus_dma_segment_t *seg; 2741 uint8_t tid, ridx, txant, type; 2742 int i, totlen, error, pad; 2743 2744 const struct chanAccParams *cap; 2745 int noack; 2746 int hdrlen2; 2747 2748 wh = mtod(m, struct ieee80211_frame *); 2749 hdrlen = ieee80211_anyhdrsize(wh); 2750 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2751 2752 hdrlen2 = (ieee80211_has_qos(wh)) ? 2753 sizeof (struct ieee80211_qosframe) : 2754 sizeof (struct ieee80211_frame); 2755 2756 if (hdrlen != hdrlen2) 2757 aprint_error_dev(sc->sc_dev, "hdrlen error (%d != %d)\n", 2758 hdrlen, hdrlen2); 2759 2760 /* XXX OpenBSD sets a different tid when using QOS */ 2761 tid = 0; 2762 if (ieee80211_has_qos(wh)) { 2763 cap = &ic->ic_wme.wme_chanParams; 2764 noack = cap->cap_wmeParams[ac].wmep_noackPolicy; 2765 } 2766 else 2767 noack = 0; 2768 2769 ring = &sc->txq[ac]; 2770 desc = &ring->desc[ring->cur]; 2771 data = &ring->data[ring->cur]; 2772 2773 /* Choose a TX rate index. */ 2774 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 2775 type != IEEE80211_FC0_TYPE_DATA) { 2776 ridx = (ic->ic_curmode == IEEE80211_MODE_11A) ? 2777 IWN_RIDX_OFDM6 : IWN_RIDX_CCK1; 2778 } else if (ic->ic_fixed_rate != -1) { 2779 ridx = sc->fixed_ridx; 2780 } else 2781 ridx = wn->ridx[ni->ni_txrate]; 2782 rinfo = &iwn_rates[ridx]; 2783 2784 /* Encrypt the frame if need be. */ 2785 /* 2786 * XXX For now, NetBSD swaps the encryption and bpf sections 2787 * in order to match old code and other drivers. Tests with 2788 * tcpdump indicates that the order is irrelevant, however, 2789 * as bpf produces unencrypted data for both ordering choices. 2790 */ 2791 if (wh->i_fc[1] & IEEE80211_FC1_WEP) { 2792 k = ieee80211_crypto_encap(ic, ni, m); 2793 if (k == NULL) { 2794 m_freem(m); 2795 return ENOBUFS; 2796 } 2797 /* Packet header may have moved, reset our local pointer. */ 2798 wh = mtod(m, struct ieee80211_frame *); 2799 } 2800 totlen = m->m_pkthdr.len; 2801 2802 if (sc->sc_drvbpf != NULL) { 2803 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 2804 2805 tap->wt_flags = 0; 2806 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq); 2807 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags); 2808 tap->wt_rate = rinfo->rate; 2809 tap->wt_hwqueue = ac; 2810 if (wh->i_fc[1] & IEEE80211_FC1_WEP) 2811 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2812 2813 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m); 2814 } 2815 2816 /* Prepare TX firmware command. */ 2817 cmd = &ring->cmd[ring->cur]; 2818 cmd->code = IWN_CMD_TX_DATA; 2819 cmd->flags = 0; 2820 cmd->qid = ring->qid; 2821 cmd->idx = ring->cur; 2822 2823 tx = (struct iwn_cmd_data *)cmd->data; 2824 /* NB: No need to clear tx, all fields are reinitialized here. */ 2825 tx->scratch = 0; /* clear "scratch" area */ 2826 2827 flags = 0; 2828 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2829 /* Unicast frame, check if an ACK is expected. */ 2830 if (!noack) 2831 flags |= IWN_TX_NEED_ACK; 2832 } 2833 2834 #ifdef notyet 2835 /* XXX NetBSD does not define IEEE80211_FC0_SUBTYPE_BAR */ 2836 if ((wh->i_fc[0] & 2837 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == 2838 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR)) 2839 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */ 2840 #endif 2841 2842 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2843 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */ 2844 2845 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 2846 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2847 /* NB: Group frames are sent using CCK in 802.11b/g. */ 2848 if (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold) { 2849 flags |= IWN_TX_NEED_RTS; 2850 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 2851 ridx >= IWN_RIDX_OFDM6) { 2852 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 2853 flags |= IWN_TX_NEED_CTS; 2854 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 2855 flags |= IWN_TX_NEED_RTS; 2856 } 2857 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) { 2858 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 2859 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 2860 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS); 2861 flags |= IWN_TX_NEED_PROTECTION; 2862 } else 2863 flags |= IWN_TX_FULL_TXOP; 2864 } 2865 } 2866 2867 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 2868 type != IEEE80211_FC0_TYPE_DATA) 2869 tx->id = sc->broadcast_id; 2870 else 2871 tx->id = wn->id; 2872 2873 if (type == IEEE80211_FC0_TYPE_MGT) { 2874 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2875 2876 #ifndef IEEE80211_STA_ONLY 2877 /* Tell HW to set timestamp in probe responses. */ 2878 /* XXX NetBSD rev 1.11 added probe requests here but */ 2879 /* probe requests do not take timestamps (from Bergamini). */ 2880 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2881 flags |= IWN_TX_INSERT_TSTAMP; 2882 #endif 2883 /* XXX NetBSD rev 1.11 and 1.20 added AUTH/DAUTH and RTS/CTS */ 2884 /* changes here. These are not needed (from Bergamini). */ 2885 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2886 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2887 tx->timeout = htole16(3); 2888 else 2889 tx->timeout = htole16(2); 2890 } else 2891 tx->timeout = htole16(0); 2892 2893 if (hdrlen & 3) { 2894 /* First segment length must be a multiple of 4. */ 2895 flags |= IWN_TX_NEED_PADDING; 2896 pad = 4 - (hdrlen & 3); 2897 } else 2898 pad = 0; 2899 2900 tx->len = htole16(totlen); 2901 tx->tid = tid; 2902 tx->rts_ntries = 60; 2903 tx->data_ntries = 15; 2904 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 2905 tx->plcp = rinfo->plcp; 2906 tx->rflags = rinfo->flags; 2907 if (tx->id == sc->broadcast_id) { 2908 /* Group or management frame. */ 2909 tx->linkq = 0; 2910 /* XXX Alternate between antenna A and B? */ 2911 txant = IWN_LSB(sc->txchainmask); 2912 tx->rflags |= IWN_RFLAG_ANT(txant); 2913 } else { 2914 tx->linkq = ni->ni_rates.rs_nrates - ni->ni_txrate - 1; 2915 flags |= IWN_TX_LINKQ; /* enable MRR */ 2916 } 2917 /* Set physical address of "scratch area". */ 2918 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 2919 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 2920 2921 /* Copy 802.11 header in TX command. */ 2922 /* XXX NetBSD changed this in rev 1.20 */ 2923 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen); 2924 2925 /* Trim 802.11 header. */ 2926 m_adj(m, hdrlen); 2927 tx->security = 0; 2928 tx->flags = htole32(flags); 2929 2930 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m, 2931 BUS_DMA_NOWAIT | BUS_DMA_WRITE); 2932 if (error != 0) { 2933 if (error != EFBIG) { 2934 aprint_error_dev(sc->sc_dev, 2935 "can't map mbuf (error %d)\n", error); 2936 m_freem(m); 2937 return error; 2938 } 2939 /* Too many DMA segments, linearize mbuf. */ 2940 MGETHDR(m1, M_DONTWAIT, MT_DATA); 2941 if (m1 == NULL) { 2942 m_freem(m); 2943 return ENOBUFS; 2944 } 2945 if (m->m_pkthdr.len > MHLEN) { 2946 MCLGET(m1, M_DONTWAIT); 2947 if (!(m1->m_flags & M_EXT)) { 2948 m_freem(m); 2949 m_freem(m1); 2950 return ENOBUFS; 2951 } 2952 } 2953 m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *)); 2954 m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len; 2955 m_freem(m); 2956 m = m1; 2957 2958 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m, 2959 BUS_DMA_NOWAIT | BUS_DMA_WRITE); 2960 if (error != 0) { 2961 aprint_error_dev(sc->sc_dev, 2962 "can't map mbuf (error %d)\n", error); 2963 m_freem(m); 2964 return error; 2965 } 2966 } 2967 2968 data->m = m; 2969 data->ni = ni; 2970 2971 DPRINTFN(4, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n", 2972 ring->qid, ring->cur, m->m_pkthdr.len, data->map->dm_nsegs)); 2973 2974 /* Fill TX descriptor. */ 2975 desc->nsegs = 1 + data->map->dm_nsegs; 2976 /* First DMA segment is used by the TX command. */ 2977 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 2978 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 2979 (4 + sizeof (*tx) + hdrlen + pad) << 4); 2980 /* Other DMA segments are for data payload. */ 2981 seg = data->map->dm_segs; 2982 for (i = 1; i <= data->map->dm_nsegs; i++) { 2983 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); 2984 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | 2985 seg->ds_len << 4); 2986 seg++; 2987 } 2988 2989 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize, 2990 BUS_DMASYNC_PREWRITE); 2991 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map, 2992 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr, 2993 sizeof (*cmd), BUS_DMASYNC_PREWRITE); 2994 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 2995 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr, 2996 sizeof (*desc), BUS_DMASYNC_PREWRITE); 2997 2998 #ifdef notyet 2999 /* Update TX scheduler. */ 3000 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 3001 #endif 3002 3003 /* Kick TX ring. */ 3004 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3005 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3006 3007 /* Mark TX ring as full if we reach a certain threshold. */ 3008 if (++ring->queued > IWN_TX_RING_HIMARK) 3009 sc->qfullmsk |= 1 << ring->qid; 3010 3011 return 0; 3012 } 3013 3014 static void 3015 iwn_start(struct ifnet *ifp) 3016 { 3017 struct iwn_softc *sc = ifp->if_softc; 3018 struct ieee80211com *ic = &sc->sc_ic; 3019 struct ieee80211_node *ni; 3020 struct ether_header *eh; 3021 struct mbuf *m; 3022 int ac; 3023 3024 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 3025 return; 3026 3027 for (;;) { 3028 if (sc->qfullmsk != 0) { 3029 ifp->if_flags |= IFF_OACTIVE; 3030 break; 3031 } 3032 /* Send pending management frames first. */ 3033 IF_DEQUEUE(&ic->ic_mgtq, m); 3034 if (m != NULL) { 3035 ni = (void *)m->m_pkthdr.rcvif; 3036 ac = 0; 3037 goto sendit; 3038 } 3039 if (ic->ic_state != IEEE80211_S_RUN) 3040 break; 3041 3042 /* Encapsulate and send data frames. */ 3043 IFQ_DEQUEUE(&ifp->if_snd, m); 3044 if (m == NULL) 3045 break; 3046 if (m->m_len < sizeof (*eh) && 3047 (m = m_pullup(m, sizeof (*eh))) == NULL) { 3048 ifp->if_oerrors++; 3049 continue; 3050 } 3051 eh = mtod(m, struct ether_header *); 3052 ni = ieee80211_find_txnode(ic, eh->ether_dhost); 3053 if (ni == NULL) { 3054 m_freem(m); 3055 ifp->if_oerrors++; 3056 continue; 3057 } 3058 /* classify mbuf so we can find which tx ring to use */ 3059 if (ieee80211_classify(ic, m, ni) != 0) { 3060 m_freem(m); 3061 ieee80211_free_node(ni); 3062 ifp->if_oerrors++; 3063 continue; 3064 } 3065 3066 /* No QoS encapsulation for EAPOL frames. */ 3067 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ? 3068 M_WME_GETAC(m) : WME_AC_BE; 3069 3070 bpf_mtap(ifp, m); 3071 3072 if ((m = ieee80211_encap(ic, m, ni)) == NULL) { 3073 ieee80211_free_node(ni); 3074 ifp->if_oerrors++; 3075 continue; 3076 } 3077 sendit: 3078 bpf_mtap3(ic->ic_rawbpf, m); 3079 3080 if (iwn_tx(sc, m, ni, ac) != 0) { 3081 ieee80211_free_node(ni); 3082 ifp->if_oerrors++; 3083 continue; 3084 } 3085 3086 sc->sc_tx_timer = 5; 3087 ifp->if_timer = 1; 3088 } 3089 } 3090 3091 static void 3092 iwn_watchdog(struct ifnet *ifp) 3093 { 3094 struct iwn_softc *sc = ifp->if_softc; 3095 3096 ifp->if_timer = 0; 3097 3098 if (sc->sc_tx_timer > 0) { 3099 if (--sc->sc_tx_timer == 0) { 3100 aprint_error_dev(sc->sc_dev, 3101 "device timeout\n"); 3102 ifp->if_flags &= ~IFF_UP; 3103 iwn_stop(ifp, 1); 3104 ifp->if_oerrors++; 3105 return; 3106 } 3107 ifp->if_timer = 1; 3108 } 3109 3110 ieee80211_watchdog(&sc->sc_ic); 3111 } 3112 3113 static int 3114 iwn_ioctl(struct ifnet *ifp, u_long cmd, void *data) 3115 { 3116 struct iwn_softc *sc = ifp->if_softc; 3117 struct ieee80211com *ic = &sc->sc_ic; 3118 const struct sockaddr *sa; 3119 int s, error = 0; 3120 3121 s = splnet(); 3122 3123 switch (cmd) { 3124 case SIOCSIFADDR: 3125 ifp->if_flags |= IFF_UP; 3126 #ifdef INET 3127 struct ifaddr *ifa = (struct ifaddr *)data; 3128 if (ifa->ifa_addr->sa_family == AF_INET) 3129 arp_ifinit(&ic->ic_ac, ifa); 3130 #endif 3131 /* FALLTHROUGH */ 3132 case SIOCSIFFLAGS: 3133 /* XXX Added as it is in every NetBSD driver */ 3134 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 3135 break; 3136 if (ifp->if_flags & IFF_UP) { 3137 if (!(ifp->if_flags & IFF_RUNNING)) 3138 error = iwn_init(ifp); 3139 } else { 3140 if (ifp->if_flags & IFF_RUNNING) 3141 iwn_stop(ifp, 1); 3142 } 3143 break; 3144 3145 case SIOCADDMULTI: 3146 case SIOCDELMULTI: 3147 sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data); 3148 error = (cmd == SIOCADDMULTI) ? 3149 ether_addmulti(sa, &sc->sc_ec) : 3150 ether_delmulti(sa, &sc->sc_ec); 3151 3152 if (error == ENETRESET) 3153 error = 0; 3154 break; 3155 3156 default: 3157 error = ieee80211_ioctl(ic, cmd, data); 3158 } 3159 3160 if (error == ENETRESET) { 3161 error = 0; 3162 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 3163 (IFF_UP | IFF_RUNNING)) { 3164 iwn_stop(ifp, 0); 3165 error = iwn_init(ifp); 3166 } 3167 } 3168 3169 splx(s); 3170 return error; 3171 } 3172 3173 /* 3174 * Send a command to the firmware. 3175 */ 3176 static int 3177 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async) 3178 { 3179 struct iwn_tx_ring *ring = &sc->txq[4]; 3180 struct iwn_tx_desc *desc; 3181 struct iwn_tx_data *data; 3182 struct iwn_tx_cmd *cmd; 3183 struct mbuf *m; 3184 bus_addr_t paddr; 3185 int totlen, error; 3186 3187 desc = &ring->desc[ring->cur]; 3188 data = &ring->data[ring->cur]; 3189 totlen = 4 + size; 3190 3191 if (size > sizeof cmd->data) { 3192 /* Command is too large to fit in a descriptor. */ 3193 if (totlen > MCLBYTES) 3194 return EINVAL; 3195 MGETHDR(m, M_DONTWAIT, MT_DATA); 3196 if (m == NULL) 3197 return ENOMEM; 3198 if (totlen > MHLEN) { 3199 MCLGET(m, M_DONTWAIT); 3200 if (!(m->m_flags & M_EXT)) { 3201 m_freem(m); 3202 return ENOMEM; 3203 } 3204 } 3205 cmd = mtod(m, struct iwn_tx_cmd *); 3206 error = bus_dmamap_load(sc->sc_dmat, data->map, cmd, totlen, 3207 NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE); 3208 if (error != 0) { 3209 m_freem(m); 3210 return error; 3211 } 3212 data->m = m; 3213 paddr = data->map->dm_segs[0].ds_addr; 3214 } else { 3215 cmd = &ring->cmd[ring->cur]; 3216 paddr = data->cmd_paddr; 3217 } 3218 3219 cmd->code = code; 3220 cmd->flags = 0; 3221 cmd->qid = ring->qid; 3222 cmd->idx = ring->cur; 3223 memcpy(cmd->data, buf, size); 3224 3225 desc->nsegs = 1; 3226 desc->segs[0].addr = htole32(IWN_LOADDR(paddr)); 3227 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4); 3228 3229 if (size > sizeof cmd->data) { 3230 bus_dmamap_sync(sc->sc_dmat, data->map, 0, totlen, 3231 BUS_DMASYNC_PREWRITE); 3232 } else { 3233 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map, 3234 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr, 3235 totlen, BUS_DMASYNC_PREWRITE); 3236 } 3237 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 3238 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr, 3239 sizeof (*desc), BUS_DMASYNC_PREWRITE); 3240 3241 #ifdef notyet 3242 /* Update TX scheduler. */ 3243 ops->update_sched(sc, ring->qid, ring->cur, 0, 0); 3244 #endif 3245 DPRINTFN(4, ("iwn_cmd %d size=%d %s\n", code, size, async ? " (async)" : "")); 3246 3247 /* Kick command ring. */ 3248 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3249 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3250 3251 return async ? 0 : tsleep(desc, PCATCH, "iwncmd", hz); 3252 } 3253 3254 static int 3255 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 3256 { 3257 struct iwn4965_node_info hnode; 3258 char *src, *dst; 3259 3260 /* 3261 * We use the node structure for 5000 Series internally (it is 3262 * a superset of the one for 4965AGN). We thus copy the common 3263 * fields before sending the command. 3264 */ 3265 src = (char *)node; 3266 dst = (char *)&hnode; 3267 memcpy(dst, src, 48); 3268 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */ 3269 memcpy(dst + 48, src + 72, 20); 3270 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async); 3271 } 3272 3273 static int 3274 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 3275 { 3276 /* Direct mapping. */ 3277 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async); 3278 } 3279 3280 static int 3281 iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni) 3282 { 3283 struct iwn_node *wn = (void *)ni; 3284 struct ieee80211_rateset *rs = &ni->ni_rates; 3285 struct iwn_cmd_link_quality linkq; 3286 const struct iwn_rate *rinfo; 3287 uint8_t txant; 3288 int i, txrate; 3289 3290 /* Use the first valid TX antenna. */ 3291 txant = IWN_LSB(sc->txchainmask); 3292 3293 memset(&linkq, 0, sizeof linkq); 3294 linkq.id = wn->id; 3295 linkq.antmsk_1stream = txant; 3296 linkq.antmsk_2stream = IWN_ANT_AB; 3297 linkq.ampdu_max = 31; 3298 linkq.ampdu_threshold = 3; 3299 linkq.ampdu_limit = htole16(4000); /* 4ms */ 3300 3301 /* Start at highest available bit-rate. */ 3302 txrate = rs->rs_nrates - 1; 3303 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) { 3304 rinfo = &iwn_rates[wn->ridx[txrate]]; 3305 linkq.retry[i].plcp = rinfo->plcp; 3306 linkq.retry[i].rflags = rinfo->flags; 3307 linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant); 3308 /* Next retry at immediate lower bit-rate. */ 3309 if (txrate > 0) 3310 txrate--; 3311 } 3312 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1); 3313 } 3314 3315 /* 3316 * Broadcast node is used to send group-addressed and management frames. 3317 */ 3318 static int 3319 iwn_add_broadcast_node(struct iwn_softc *sc, int async) 3320 { 3321 struct iwn_ops *ops = &sc->ops; 3322 struct iwn_node_info node; 3323 struct iwn_cmd_link_quality linkq; 3324 const struct iwn_rate *rinfo; 3325 uint8_t txant; 3326 int i, error; 3327 3328 memset(&node, 0, sizeof node); 3329 IEEE80211_ADDR_COPY(node.macaddr, etherbroadcastaddr); 3330 node.id = sc->broadcast_id; 3331 DPRINTF(("adding broadcast node\n")); 3332 if ((error = ops->add_node(sc, &node, async)) != 0) 3333 return error; 3334 3335 /* Use the first valid TX antenna. */ 3336 txant = IWN_LSB(sc->txchainmask); 3337 3338 memset(&linkq, 0, sizeof linkq); 3339 linkq.id = sc->broadcast_id; 3340 linkq.antmsk_1stream = txant; 3341 linkq.antmsk_2stream = IWN_ANT_AB; 3342 linkq.ampdu_max = 64; 3343 linkq.ampdu_threshold = 3; 3344 linkq.ampdu_limit = htole16(4000); /* 4ms */ 3345 3346 /* Use lowest mandatory bit-rate. */ 3347 rinfo = (sc->sc_ic.ic_curmode != IEEE80211_MODE_11A) ? 3348 &iwn_rates[IWN_RIDX_CCK1] : &iwn_rates[IWN_RIDX_OFDM6]; 3349 linkq.retry[0].plcp = rinfo->plcp; 3350 linkq.retry[0].rflags = rinfo->flags; 3351 linkq.retry[0].rflags |= IWN_RFLAG_ANT(txant); 3352 /* Use same bit-rate for all TX retries. */ 3353 for (i = 1; i < IWN_MAX_TX_RETRIES; i++) { 3354 linkq.retry[i].plcp = linkq.retry[0].plcp; 3355 linkq.retry[i].rflags = linkq.retry[0].rflags; 3356 } 3357 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async); 3358 } 3359 3360 static void 3361 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on) 3362 { 3363 struct iwn_cmd_led led; 3364 3365 /* Clear microcode LED ownership. */ 3366 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL); 3367 3368 led.which = which; 3369 led.unit = htole32(10000); /* on/off in unit of 100ms */ 3370 led.off = off; 3371 led.on = on; 3372 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1); 3373 } 3374 3375 /* 3376 * Set the critical temperature at which the firmware will stop the radio 3377 * and notify us. 3378 */ 3379 static int 3380 iwn_set_critical_temp(struct iwn_softc *sc) 3381 { 3382 struct iwn_critical_temp crit; 3383 int32_t temp; 3384 3385 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF); 3386 3387 if (sc->hw_type == IWN_HW_REV_TYPE_5150) 3388 temp = (IWN_CTOK(110) - sc->temp_off) * -5; 3389 else if (sc->hw_type == IWN_HW_REV_TYPE_4965) 3390 temp = IWN_CTOK(110); 3391 else 3392 temp = 110; 3393 memset(&crit, 0, sizeof crit); 3394 crit.tempR = htole32(temp); 3395 DPRINTF(("setting critical temperature to %d\n", temp)); 3396 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0); 3397 } 3398 3399 static int 3400 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni) 3401 { 3402 struct iwn_cmd_timing cmd; 3403 uint64_t val, mod; 3404 3405 memset(&cmd, 0, sizeof cmd); 3406 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 3407 cmd.bintval = htole16(ni->ni_intval); 3408 cmd.lintval = htole16(10); 3409 3410 /* Compute remaining time until next beacon. */ 3411 val = (uint64_t)ni->ni_intval * 1024; /* msecs -> usecs */ 3412 mod = le64toh(cmd.tstamp) % val; 3413 cmd.binitval = htole32((uint32_t)(val - mod)); 3414 3415 DPRINTF(("timing bintval=%u, tstamp=%" PRIu64 ", init=%" PRIu32 "\n", 3416 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod))); 3417 3418 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1); 3419 } 3420 3421 static void 3422 iwn4965_power_calibration(struct iwn_softc *sc, int temp) 3423 { 3424 /* Adjust TX power if need be (delta >= 3 degC). */ 3425 DPRINTF(("temperature %d->%d\n", sc->temp, temp)); 3426 if (abs(temp - sc->temp) >= 3) { 3427 /* Record temperature of last calibration. */ 3428 sc->temp = temp; 3429 (void)iwn4965_set_txpower(sc, 1); 3430 } 3431 } 3432 3433 /* 3434 * Set TX power for current channel (each rate has its own power settings). 3435 * This function takes into account the regulatory information from EEPROM, 3436 * the current temperature and the current voltage. 3437 */ 3438 static int 3439 iwn4965_set_txpower(struct iwn_softc *sc, int async) 3440 { 3441 /* Fixed-point arithmetic division using a n-bit fractional part. */ 3442 #define fdivround(a, b, n) \ 3443 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 3444 /* Linear interpolation. */ 3445 #define interpolate(x, x1, y1, x2, y2, n) \ 3446 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 3447 3448 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 }; 3449 struct ieee80211com *ic = &sc->sc_ic; 3450 struct iwn_ucode_info *uc = &sc->ucode_info; 3451 struct ieee80211_channel *ch; 3452 struct iwn4965_cmd_txpower cmd; 3453 struct iwn4965_eeprom_chan_samples *chans; 3454 const uint8_t *rf_gain, *dsp_gain; 3455 int32_t vdiff, tdiff; 3456 int i, c, grp, maxpwr; 3457 uint8_t chan; 3458 3459 /* Retrieve current channel from last RXON. */ 3460 chan = sc->rxon.chan; 3461 DPRINTF(("setting TX power for channel %d\n", chan)); 3462 ch = &ic->ic_channels[chan]; 3463 3464 memset(&cmd, 0, sizeof cmd); 3465 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1; 3466 cmd.chan = chan; 3467 3468 if (IEEE80211_IS_CHAN_5GHZ(ch)) { 3469 maxpwr = sc->maxpwr5GHz; 3470 rf_gain = iwn4965_rf_gain_5ghz; 3471 dsp_gain = iwn4965_dsp_gain_5ghz; 3472 } else { 3473 maxpwr = sc->maxpwr2GHz; 3474 rf_gain = iwn4965_rf_gain_2ghz; 3475 dsp_gain = iwn4965_dsp_gain_2ghz; 3476 } 3477 3478 /* Compute voltage compensation. */ 3479 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7; 3480 if (vdiff > 0) 3481 vdiff *= 2; 3482 if (abs(vdiff) > 2) 3483 vdiff = 0; 3484 DPRINTF(("voltage compensation=%d (UCODE=%d, EEPROM=%d)\n", 3485 vdiff, le32toh(uc->volt), sc->eeprom_voltage)); 3486 3487 /* Get channel attenuation group. */ 3488 if (chan <= 20) /* 1-20 */ 3489 grp = 4; 3490 else if (chan <= 43) /* 34-43 */ 3491 grp = 0; 3492 else if (chan <= 70) /* 44-70 */ 3493 grp = 1; 3494 else if (chan <= 124) /* 71-124 */ 3495 grp = 2; 3496 else /* 125-200 */ 3497 grp = 3; 3498 DPRINTF(("chan %d, attenuation group=%d\n", chan, grp)); 3499 3500 /* Get channel sub-band. */ 3501 for (i = 0; i < IWN_NBANDS; i++) 3502 if (sc->bands[i].lo != 0 && 3503 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi) 3504 break; 3505 if (i == IWN_NBANDS) /* Can't happen in real-life. */ 3506 return EINVAL; 3507 chans = sc->bands[i].chans; 3508 DPRINTF(("chan %d sub-band=%d\n", chan, i)); 3509 3510 for (c = 0; c < 2; c++) { 3511 uint8_t power, gain, temp; 3512 int maxchpwr, pwr, ridx, idx; 3513 3514 power = interpolate(chan, 3515 chans[0].num, chans[0].samples[c][1].power, 3516 chans[1].num, chans[1].samples[c][1].power, 1); 3517 gain = interpolate(chan, 3518 chans[0].num, chans[0].samples[c][1].gain, 3519 chans[1].num, chans[1].samples[c][1].gain, 1); 3520 temp = interpolate(chan, 3521 chans[0].num, chans[0].samples[c][1].temp, 3522 chans[1].num, chans[1].samples[c][1].temp, 1); 3523 DPRINTF(("TX chain %d: power=%d gain=%d temp=%d\n", 3524 c, power, gain, temp)); 3525 3526 /* Compute temperature compensation. */ 3527 tdiff = ((sc->temp - temp) * 2) / tdiv[grp]; 3528 DPRINTF(("temperature compensation=%d (current=%d, " 3529 "EEPROM=%d)\n", tdiff, sc->temp, temp)); 3530 3531 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) { 3532 /* Convert dBm to half-dBm. */ 3533 maxchpwr = sc->maxpwr[chan] * 2; 3534 if ((ridx / 8) & 1) 3535 maxchpwr -= 6; /* MIMO 2T: -3dB */ 3536 3537 pwr = maxpwr; 3538 3539 /* Adjust TX power based on rate. */ 3540 if ((ridx % 8) == 5) 3541 pwr -= 15; /* OFDM48: -7.5dB */ 3542 else if ((ridx % 8) == 6) 3543 pwr -= 17; /* OFDM54: -8.5dB */ 3544 else if ((ridx % 8) == 7) 3545 pwr -= 20; /* OFDM60: -10dB */ 3546 else 3547 pwr -= 10; /* Others: -5dB */ 3548 3549 /* Do not exceed channel max TX power. */ 3550 if (pwr > maxchpwr) 3551 pwr = maxchpwr; 3552 3553 idx = gain - (pwr - power) - tdiff - vdiff; 3554 if ((ridx / 8) & 1) /* MIMO */ 3555 idx += (int32_t)le32toh(uc->atten[grp][c]); 3556 3557 if (cmd.band == 0) 3558 idx += 9; /* 5GHz */ 3559 if (ridx == IWN_RIDX_MAX) 3560 idx += 5; /* CCK */ 3561 3562 /* Make sure idx stays in a valid range. */ 3563 if (idx < 0) 3564 idx = 0; 3565 else if (idx > IWN4965_MAX_PWR_INDEX) 3566 idx = IWN4965_MAX_PWR_INDEX; 3567 3568 DPRINTF(("TX chain %d, rate idx %d: power=%d\n", 3569 c, ridx, idx)); 3570 cmd.power[ridx].rf_gain[c] = rf_gain[idx]; 3571 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx]; 3572 } 3573 } 3574 3575 DPRINTF(("setting TX power for chan %d\n", chan)); 3576 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async); 3577 3578 #undef interpolate 3579 #undef fdivround 3580 } 3581 3582 static int 3583 iwn5000_set_txpower(struct iwn_softc *sc, int async) 3584 { 3585 struct iwn5000_cmd_txpower cmd; 3586 3587 /* 3588 * TX power calibration is handled automatically by the firmware 3589 * for 5000 Series. 3590 */ 3591 memset(&cmd, 0, sizeof cmd); 3592 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */ 3593 cmd.flags = IWN5000_TXPOWER_NO_CLOSED; 3594 cmd.srv_limit = IWN5000_TXPOWER_AUTO; 3595 DPRINTF(("setting TX power\n")); 3596 return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async); 3597 } 3598 3599 /* 3600 * Retrieve the maximum RSSI (in dBm) among receivers. 3601 */ 3602 static int 3603 iwn4965_get_rssi(const struct iwn_rx_stat *stat) 3604 { 3605 const struct iwn4965_rx_phystat *phy = (const void *)stat->phybuf; 3606 uint8_t mask, agc; 3607 int rssi; 3608 3609 mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC; 3610 agc = (le16toh(phy->agc) >> 7) & 0x7f; 3611 3612 rssi = 0; 3613 if (mask & IWN_ANT_A) 3614 rssi = MAX(rssi, phy->rssi[0]); 3615 if (mask & IWN_ANT_B) 3616 rssi = MAX(rssi, phy->rssi[2]); 3617 if (mask & IWN_ANT_C) 3618 rssi = MAX(rssi, phy->rssi[4]); 3619 3620 return rssi - agc - IWN_RSSI_TO_DBM; 3621 } 3622 3623 static int 3624 iwn5000_get_rssi(const struct iwn_rx_stat *stat) 3625 { 3626 const struct iwn5000_rx_phystat *phy = (const void *)stat->phybuf; 3627 uint8_t agc; 3628 int rssi; 3629 3630 agc = (le32toh(phy->agc) >> 9) & 0x7f; 3631 3632 rssi = MAX(le16toh(phy->rssi[0]) & 0xff, 3633 le16toh(phy->rssi[1]) & 0xff); 3634 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi); 3635 3636 return rssi - agc - IWN_RSSI_TO_DBM; 3637 } 3638 3639 /* 3640 * Retrieve the average noise (in dBm) among receivers. 3641 */ 3642 static int 3643 iwn_get_noise(const struct iwn_rx_general_stats *stats) 3644 { 3645 int i, total, nbant, noise; 3646 3647 total = nbant = 0; 3648 for (i = 0; i < 3; i++) { 3649 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0) 3650 continue; 3651 total += noise; 3652 nbant++; 3653 } 3654 /* There should be at least one antenna but check anyway. */ 3655 return (nbant == 0) ? -127 : (total / nbant) - 107; 3656 } 3657 3658 /* 3659 * Compute temperature (in degC) from last received statistics. 3660 */ 3661 static int 3662 iwn4965_get_temperature(struct iwn_softc *sc) 3663 { 3664 struct iwn_ucode_info *uc = &sc->ucode_info; 3665 int32_t r1, r2, r3, r4, temp; 3666 3667 r1 = le32toh(uc->temp[0].chan20MHz); 3668 r2 = le32toh(uc->temp[1].chan20MHz); 3669 r3 = le32toh(uc->temp[2].chan20MHz); 3670 r4 = le32toh(sc->rawtemp); 3671 3672 if (r1 == r3) /* Prevents division by 0 (should not happen). */ 3673 return 0; 3674 3675 /* Sign-extend 23-bit R4 value to 32-bit. */ 3676 r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000; 3677 /* Compute temperature in Kelvin. */ 3678 temp = (259 * (r4 - r2)) / (r3 - r1); 3679 temp = (temp * 97) / 100 + 8; 3680 3681 DPRINTF(("temperature %dK/%dC\n", temp, IWN_KTOC(temp))); 3682 return IWN_KTOC(temp); 3683 } 3684 3685 static int 3686 iwn5000_get_temperature(struct iwn_softc *sc) 3687 { 3688 int32_t temp; 3689 3690 /* 3691 * Temperature is not used by the driver for 5000 Series because 3692 * TX power calibration is handled by firmware. We export it to 3693 * users through the sensor framework though. 3694 */ 3695 temp = le32toh(sc->rawtemp); 3696 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 3697 temp = (temp / -5) + sc->temp_off; 3698 temp = IWN_KTOC(temp); 3699 } 3700 return temp; 3701 } 3702 3703 /* 3704 * Initialize sensitivity calibration state machine. 3705 */ 3706 static int 3707 iwn_init_sensitivity(struct iwn_softc *sc) 3708 { 3709 struct iwn_ops *ops = &sc->ops; 3710 struct iwn_calib_state *calib = &sc->calib; 3711 uint32_t flags; 3712 int error; 3713 3714 /* Reset calibration state machine. */ 3715 memset(calib, 0, sizeof (*calib)); 3716 calib->state = IWN_CALIB_STATE_INIT; 3717 calib->cck_state = IWN_CCK_STATE_HIFA; 3718 /* Set initial correlation values. */ 3719 calib->ofdm_x1 = sc->limits->min_ofdm_x1; 3720 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1; 3721 calib->ofdm_x4 = sc->limits->min_ofdm_x4; 3722 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4; 3723 calib->cck_x4 = 125; 3724 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4; 3725 calib->energy_cck = sc->limits->energy_cck; 3726 3727 /* Write initial sensitivity. */ 3728 if ((error = iwn_send_sensitivity(sc)) != 0) 3729 return error; 3730 3731 /* Write initial gains. */ 3732 if ((error = ops->init_gains(sc)) != 0) 3733 return error; 3734 3735 /* Request statistics at each beacon interval. */ 3736 flags = 0; 3737 DPRINTF(("sending request for statistics\n")); 3738 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1); 3739 } 3740 3741 /* 3742 * Collect noise and RSSI statistics for the first 20 beacons received 3743 * after association and use them to determine connected antennas and 3744 * to set differential gains. 3745 */ 3746 static void 3747 iwn_collect_noise(struct iwn_softc *sc, 3748 const struct iwn_rx_general_stats *stats) 3749 { 3750 struct iwn_ops *ops = &sc->ops; 3751 struct iwn_calib_state *calib = &sc->calib; 3752 uint32_t val; 3753 int i; 3754 3755 /* Accumulate RSSI and noise for all 3 antennas. */ 3756 for (i = 0; i < 3; i++) { 3757 calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff; 3758 calib->noise[i] += le32toh(stats->noise[i]) & 0xff; 3759 } 3760 /* NB: We update differential gains only once after 20 beacons. */ 3761 if (++calib->nbeacons < 20) 3762 return; 3763 3764 /* Determine highest average RSSI. */ 3765 val = MAX(calib->rssi[0], calib->rssi[1]); 3766 val = MAX(calib->rssi[2], val); 3767 3768 /* Determine which antennas are connected. */ 3769 sc->chainmask = sc->rxchainmask; 3770 for (i = 0; i < 3; i++) 3771 if (val - calib->rssi[i] > 15 * 20) 3772 sc->chainmask &= ~(1 << i); 3773 DPRINTF(("RX chains mask: theoretical=0x%x, actual=0x%x\n", 3774 sc->rxchainmask, sc->chainmask)); 3775 3776 /* If none of the TX antennas are connected, keep at least one. */ 3777 if ((sc->chainmask & sc->txchainmask) == 0) 3778 sc->chainmask |= IWN_LSB(sc->txchainmask); 3779 3780 (void)ops->set_gains(sc); 3781 calib->state = IWN_CALIB_STATE_RUN; 3782 3783 #ifdef notyet 3784 /* XXX Disable RX chains with no antennas connected. */ 3785 sc->rxon.rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask)); 3786 (void)iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1); 3787 #endif 3788 3789 /* Enable power-saving mode if requested by user. */ 3790 if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON) 3791 (void)iwn_set_pslevel(sc, 0, 3, 1); 3792 } 3793 3794 static int 3795 iwn4965_init_gains(struct iwn_softc *sc) 3796 { 3797 struct iwn_phy_calib_gain cmd; 3798 3799 memset(&cmd, 0, sizeof cmd); 3800 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 3801 /* Differential gains initially set to 0 for all 3 antennas. */ 3802 DPRINTF(("setting initial differential gains\n")); 3803 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 3804 } 3805 3806 static int 3807 iwn5000_init_gains(struct iwn_softc *sc) 3808 { 3809 struct iwn_phy_calib cmd; 3810 3811 memset(&cmd, 0, sizeof cmd); 3812 cmd.code = IWN5000_PHY_CALIB_RESET_NOISE_GAIN; 3813 cmd.ngroups = 1; 3814 cmd.isvalid = 1; 3815 DPRINTF(("setting initial differential gains\n")); 3816 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 3817 } 3818 3819 static int 3820 iwn4965_set_gains(struct iwn_softc *sc) 3821 { 3822 struct iwn_calib_state *calib = &sc->calib; 3823 struct iwn_phy_calib_gain cmd; 3824 int i, delta, noise; 3825 3826 /* Get minimal noise among connected antennas. */ 3827 noise = INT_MAX; /* NB: There's at least one antenna. */ 3828 for (i = 0; i < 3; i++) 3829 if (sc->chainmask & (1 << i)) 3830 noise = MIN(calib->noise[i], noise); 3831 3832 memset(&cmd, 0, sizeof cmd); 3833 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 3834 /* Set differential gains for connected antennas. */ 3835 for (i = 0; i < 3; i++) { 3836 if (sc->chainmask & (1 << i)) { 3837 /* Compute attenuation (in unit of 1.5dB). */ 3838 delta = (noise - (int32_t)calib->noise[i]) / 30; 3839 /* NB: delta <= 0 */ 3840 /* Limit to [-4.5dB,0]. */ 3841 cmd.gain[i] = MIN(abs(delta), 3); 3842 if (delta < 0) 3843 cmd.gain[i] |= 1 << 2; /* sign bit */ 3844 } 3845 } 3846 DPRINTF(("setting differential gains Ant A/B/C: %x/%x/%x (%x)\n", 3847 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask)); 3848 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 3849 } 3850 3851 static int 3852 iwn5000_set_gains(struct iwn_softc *sc) 3853 { 3854 struct iwn_calib_state *calib = &sc->calib; 3855 struct iwn_phy_calib_gain cmd; 3856 int i, ant, div, delta; 3857 3858 /* We collected 20 beacons and !=6050 need a 1.5 factor. */ 3859 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30; 3860 3861 memset(&cmd, 0, sizeof cmd); 3862 cmd.code = IWN5000_PHY_CALIB_NOISE_GAIN; 3863 cmd.ngroups = 1; 3864 cmd.isvalid = 1; 3865 /* Get first available RX antenna as referential. */ 3866 ant = IWN_LSB(sc->rxchainmask); 3867 /* Set differential gains for other antennas. */ 3868 for (i = ant + 1; i < 3; i++) { 3869 if (sc->chainmask & (1 << i)) { 3870 /* The delta is relative to antenna "ant". */ 3871 delta = ((int32_t)calib->noise[ant] - 3872 (int32_t)calib->noise[i]) / div; 3873 /* Limit to [-4.5dB,+4.5dB]. */ 3874 cmd.gain[i - 1] = MIN(abs(delta), 3); 3875 if (delta < 0) 3876 cmd.gain[i - 1] |= 1 << 2; /* sign bit */ 3877 } 3878 } 3879 DPRINTF(("setting differential gains: %x/%x (%x)\n", 3880 cmd.gain[0], cmd.gain[1], sc->chainmask)); 3881 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 3882 } 3883 3884 /* 3885 * Tune RF RX sensitivity based on the number of false alarms detected 3886 * during the last beacon period. 3887 */ 3888 static void 3889 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats) 3890 { 3891 #define inc(val, inc, max) \ 3892 if ((val) < (max)) { \ 3893 if ((val) < (max) - (inc)) \ 3894 (val) += (inc); \ 3895 else \ 3896 (val) = (max); \ 3897 needs_update = 1; \ 3898 } 3899 #define dec(val, dec, min) \ 3900 if ((val) > (min)) { \ 3901 if ((val) > (min) + (dec)) \ 3902 (val) -= (dec); \ 3903 else \ 3904 (val) = (min); \ 3905 needs_update = 1; \ 3906 } 3907 3908 const struct iwn_sensitivity_limits *limits = sc->limits; 3909 struct iwn_calib_state *calib = &sc->calib; 3910 uint32_t val, rxena, fa; 3911 uint32_t energy[3], energy_min; 3912 uint8_t noise[3], noise_ref; 3913 int i, needs_update = 0; 3914 3915 /* Check that we've been enabled long enough. */ 3916 if ((rxena = le32toh(stats->general.load)) == 0) 3917 return; 3918 3919 /* Compute number of false alarms since last call for OFDM. */ 3920 fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm; 3921 fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm; 3922 fa *= 200 * 1024; /* 200TU */ 3923 3924 /* Save counters values for next call. */ 3925 calib->bad_plcp_ofdm = le32toh(stats->ofdm.bad_plcp); 3926 calib->fa_ofdm = le32toh(stats->ofdm.fa); 3927 3928 if (fa > 50 * rxena) { 3929 /* High false alarm count, decrease sensitivity. */ 3930 DPRINTFN(2, ("OFDM high false alarm count: %u\n", fa)); 3931 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1); 3932 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1); 3933 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4); 3934 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4); 3935 3936 } else if (fa < 5 * rxena) { 3937 /* Low false alarm count, increase sensitivity. */ 3938 DPRINTFN(2, ("OFDM low false alarm count: %u\n", fa)); 3939 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1); 3940 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1); 3941 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4); 3942 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4); 3943 } 3944 3945 /* Compute maximum noise among 3 receivers. */ 3946 for (i = 0; i < 3; i++) 3947 noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff; 3948 val = MAX(noise[0], noise[1]); 3949 val = MAX(noise[2], val); 3950 /* Insert it into our samples table. */ 3951 calib->noise_samples[calib->cur_noise_sample] = val; 3952 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20; 3953 3954 /* Compute maximum noise among last 20 samples. */ 3955 noise_ref = calib->noise_samples[0]; 3956 for (i = 1; i < 20; i++) 3957 noise_ref = MAX(noise_ref, calib->noise_samples[i]); 3958 3959 /* Compute maximum energy among 3 receivers. */ 3960 for (i = 0; i < 3; i++) 3961 energy[i] = le32toh(stats->general.energy[i]); 3962 val = MIN(energy[0], energy[1]); 3963 val = MIN(energy[2], val); 3964 /* Insert it into our samples table. */ 3965 calib->energy_samples[calib->cur_energy_sample] = val; 3966 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10; 3967 3968 /* Compute minimum energy among last 10 samples. */ 3969 energy_min = calib->energy_samples[0]; 3970 for (i = 1; i < 10; i++) 3971 energy_min = MAX(energy_min, calib->energy_samples[i]); 3972 energy_min += 6; 3973 3974 /* Compute number of false alarms since last call for CCK. */ 3975 fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck; 3976 fa += le32toh(stats->cck.fa) - calib->fa_cck; 3977 fa *= 200 * 1024; /* 200TU */ 3978 3979 /* Save counters values for next call. */ 3980 calib->bad_plcp_cck = le32toh(stats->cck.bad_plcp); 3981 calib->fa_cck = le32toh(stats->cck.fa); 3982 3983 if (fa > 50 * rxena) { 3984 /* High false alarm count, decrease sensitivity. */ 3985 DPRINTFN(2, ("CCK high false alarm count: %u\n", fa)); 3986 calib->cck_state = IWN_CCK_STATE_HIFA; 3987 calib->low_fa = 0; 3988 3989 if (calib->cck_x4 > 160) { 3990 calib->noise_ref = noise_ref; 3991 if (calib->energy_cck > 2) 3992 dec(calib->energy_cck, 2, energy_min); 3993 } 3994 if (calib->cck_x4 < 160) { 3995 calib->cck_x4 = 161; 3996 needs_update = 1; 3997 } else 3998 inc(calib->cck_x4, 3, limits->max_cck_x4); 3999 4000 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4); 4001 4002 } else if (fa < 5 * rxena) { 4003 /* Low false alarm count, increase sensitivity. */ 4004 DPRINTFN(2, ("CCK low false alarm count: %u\n", fa)); 4005 calib->cck_state = IWN_CCK_STATE_LOFA; 4006 calib->low_fa++; 4007 4008 if (calib->cck_state != IWN_CCK_STATE_INIT && 4009 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 || 4010 calib->low_fa > 100)) { 4011 inc(calib->energy_cck, 2, limits->min_energy_cck); 4012 dec(calib->cck_x4, 3, limits->min_cck_x4); 4013 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4); 4014 } 4015 } else { 4016 /* Not worth to increase or decrease sensitivity. */ 4017 DPRINTFN(2, ("CCK normal false alarm count: %u\n", fa)); 4018 calib->low_fa = 0; 4019 calib->noise_ref = noise_ref; 4020 4021 if (calib->cck_state == IWN_CCK_STATE_HIFA) { 4022 /* Previous interval had many false alarms. */ 4023 dec(calib->energy_cck, 8, energy_min); 4024 } 4025 calib->cck_state = IWN_CCK_STATE_INIT; 4026 } 4027 4028 if (needs_update) 4029 (void)iwn_send_sensitivity(sc); 4030 #undef dec 4031 #undef inc 4032 } 4033 4034 static int 4035 iwn_send_sensitivity(struct iwn_softc *sc) 4036 { 4037 struct iwn_calib_state *calib = &sc->calib; 4038 struct iwn_sensitivity_cmd cmd; 4039 4040 memset(&cmd, 0, sizeof cmd); 4041 cmd.which = IWN_SENSITIVITY_WORKTBL; 4042 /* OFDM modulation. */ 4043 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1); 4044 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1); 4045 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4); 4046 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4); 4047 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm); 4048 cmd.energy_ofdm_th = htole16(62); 4049 /* CCK modulation. */ 4050 cmd.corr_cck_x4 = htole16(calib->cck_x4); 4051 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4); 4052 cmd.energy_cck = htole16(calib->energy_cck); 4053 /* Barker modulation: use default values. */ 4054 cmd.corr_barker = htole16(190); 4055 cmd.corr_barker_mrc = htole16(390); 4056 4057 DPRINTFN(2, ("setting sensitivity %d/%d/%d/%d/%d/%d/%d\n", 4058 calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4, 4059 calib->ofdm_mrc_x4, calib->cck_x4, calib->cck_mrc_x4, 4060 calib->energy_cck)); 4061 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, sizeof cmd, 1); 4062 } 4063 4064 /* 4065 * Set STA mode power saving level (between 0 and 5). 4066 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 4067 */ 4068 static int 4069 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async) 4070 { 4071 struct iwn_pmgt_cmd cmd; 4072 const struct iwn_pmgt *pmgt; 4073 uint32_t maxp, skip_dtim; 4074 pcireg_t reg; 4075 int i; 4076 4077 /* Select which PS parameters to use. */ 4078 if (dtim <= 2) 4079 pmgt = &iwn_pmgt[0][level]; 4080 else if (dtim <= 10) 4081 pmgt = &iwn_pmgt[1][level]; 4082 else 4083 pmgt = &iwn_pmgt[2][level]; 4084 4085 memset(&cmd, 0, sizeof cmd); 4086 if (level != 0) /* not CAM */ 4087 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP); 4088 if (level == 5) 4089 cmd.flags |= htole16(IWN_PS_FAST_PD); 4090 /* Retrieve PCIe Active State Power Management (ASPM). */ 4091 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 4092 sc->sc_cap_off + PCIE_LCSR); 4093 if (!(reg & PCIE_LCSR_ASPM_L0S)) /* L0s Entry disabled. */ 4094 cmd.flags |= htole16(IWN_PS_PCI_PMGT); 4095 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024); 4096 cmd.txtimeout = htole32(pmgt->txtimeout * 1024); 4097 4098 if (dtim == 0) { 4099 dtim = 1; 4100 skip_dtim = 0; 4101 } else 4102 skip_dtim = pmgt->skip_dtim; 4103 if (skip_dtim != 0) { 4104 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM); 4105 maxp = pmgt->intval[4]; 4106 if (maxp == (uint32_t)-1) 4107 maxp = dtim * (skip_dtim + 1); 4108 else if (maxp > dtim) 4109 maxp = (maxp / dtim) * dtim; 4110 } else 4111 maxp = dtim; 4112 for (i = 0; i < 5; i++) 4113 cmd.intval[i] = htole32(MIN(maxp, pmgt->intval[i])); 4114 4115 DPRINTF(("setting power saving level to %d\n", level)); 4116 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 4117 } 4118 4119 int 4120 iwn5000_runtime_calib(struct iwn_softc *sc) 4121 { 4122 struct iwn5000_calib_config cmd; 4123 4124 memset(&cmd, 0, sizeof cmd); 4125 cmd.ucode.once.enable = 0xffffffff; 4126 cmd.ucode.once.start = IWN5000_CALIB_DC; 4127 DPRINTF(("configuring runtime calibration\n")); 4128 return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0); 4129 } 4130 4131 static int 4132 iwn_config_bt_coex_bluetooth(struct iwn_softc *sc) 4133 { 4134 struct iwn_bluetooth bluetooth; 4135 4136 memset(&bluetooth, 0, sizeof bluetooth); 4137 bluetooth.flags = IWN_BT_COEX_ENABLE; 4138 bluetooth.lead_time = IWN_BT_LEAD_TIME_DEF; 4139 bluetooth.max_kill = IWN_BT_MAX_KILL_DEF; 4140 4141 DPRINTF(("configuring bluetooth coexistence\n")); 4142 return iwn_cmd(sc, IWN_CMD_BT_COEX, &bluetooth, sizeof bluetooth, 0); 4143 } 4144 4145 static int 4146 iwn_config_bt_coex_prio_table(struct iwn_softc *sc) 4147 { 4148 uint8_t prio_table[16]; 4149 4150 memset(&prio_table, 0, sizeof prio_table); 4151 prio_table[ 0] = 6; /* init calibration 1 */ 4152 prio_table[ 1] = 7; /* init calibration 2 */ 4153 prio_table[ 2] = 2; /* periodic calib low 1 */ 4154 prio_table[ 3] = 3; /* periodic calib low 2 */ 4155 prio_table[ 4] = 4; /* periodic calib high 1 */ 4156 prio_table[ 5] = 5; /* periodic calib high 2 */ 4157 prio_table[ 6] = 6; /* dtim */ 4158 prio_table[ 7] = 8; /* scan52 */ 4159 prio_table[ 8] = 10; /* scan24 */ 4160 4161 DPRINTF(("sending priority lookup table\n")); 4162 return iwn_cmd(sc, IWN_CMD_BT_COEX_PRIO_TABLE, 4163 &prio_table, sizeof prio_table, 0); 4164 } 4165 4166 static int 4167 iwn_config_bt_coex_adv1(struct iwn_softc *sc) 4168 { 4169 int error; 4170 struct iwn_bt_adv1 d; 4171 4172 memset(&d, 0, sizeof d); 4173 d.basic.bt.flags = IWN_BT_COEX_ENABLE; 4174 d.basic.bt.lead_time = IWN_BT_LEAD_TIME_DEF; 4175 d.basic.bt.max_kill = IWN_BT_MAX_KILL_DEF; 4176 d.basic.bt.bt3_timer_t7_value = IWN_BT_BT3_T7_DEF; 4177 d.basic.bt.kill_ack_mask = IWN_BT_KILL_ACK_MASK_DEF; 4178 d.basic.bt.kill_cts_mask = IWN_BT_KILL_CTS_MASK_DEF; 4179 d.basic.bt3_prio_sample_time = IWN_BT_BT3_PRIO_SAMPLE_DEF; 4180 d.basic.bt3_timer_t2_value = IWN_BT_BT3_T2_DEF; 4181 d.basic.bt3_lookup_table[ 0] = htole32(0xaaaaaaaa); /* Normal */ 4182 d.basic.bt3_lookup_table[ 1] = htole32(0xaaaaaaaa); 4183 d.basic.bt3_lookup_table[ 2] = htole32(0xaeaaaaaa); 4184 d.basic.bt3_lookup_table[ 3] = htole32(0xaaaaaaaa); 4185 d.basic.bt3_lookup_table[ 4] = htole32(0xcc00ff28); 4186 d.basic.bt3_lookup_table[ 5] = htole32(0x0000aaaa); 4187 d.basic.bt3_lookup_table[ 6] = htole32(0xcc00aaaa); 4188 d.basic.bt3_lookup_table[ 7] = htole32(0x0000aaaa); 4189 d.basic.bt3_lookup_table[ 8] = htole32(0xc0004000); 4190 d.basic.bt3_lookup_table[ 9] = htole32(0x00004000); 4191 d.basic.bt3_lookup_table[10] = htole32(0xf0005000); 4192 d.basic.bt3_lookup_table[11] = htole32(0xf0005000); 4193 d.basic.reduce_txpower = 0; /* as not implemented */ 4194 d.basic.valid = IWN_BT_ALL_VALID_MASK; 4195 d.prio_boost = IWN_BT_PRIO_BOOST_DEF; 4196 d.tx_prio_boost = 0; 4197 d.rx_prio_boost = 0; 4198 4199 DPRINTF(("configuring advanced bluetooth coexistence v1\n")); 4200 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &d, sizeof d, 0); 4201 if (error != 0) { 4202 aprint_error_dev(sc->sc_dev, 4203 "could not configure advanced bluetooth coexistence\n"); 4204 return error; 4205 } 4206 4207 error = iwn_config_bt_coex_prio_table(sc); 4208 if (error != 0) { 4209 aprint_error_dev(sc->sc_dev, 4210 "could not configure send BT priority table\n"); 4211 return error; 4212 } 4213 4214 return error; 4215 } 4216 4217 static int 4218 iwn_config(struct iwn_softc *sc) 4219 { 4220 struct iwn_ops *ops = &sc->ops; 4221 struct ieee80211com *ic = &sc->sc_ic; 4222 struct ifnet *ifp = ic->ic_ifp; 4223 uint32_t txmask; 4224 uint16_t rxchain; 4225 int error; 4226 4227 error = ops->config_bt_coex(sc); 4228 if (error != 0) { 4229 aprint_error_dev(sc->sc_dev, 4230 "could not configure bluetooth coexistence\n"); 4231 return error; 4232 } 4233 4234 if (sc->hw_type == IWN_HW_REV_TYPE_6050 || 4235 sc->hw_type == IWN_HW_REV_TYPE_6005) { 4236 /* Configure runtime DC calibration. */ 4237 error = iwn5000_runtime_calib(sc); 4238 if (error != 0) { 4239 aprint_error_dev(sc->sc_dev, 4240 "could not configure runtime calibration\n"); 4241 return error; 4242 } 4243 } 4244 4245 /* Configure valid TX chains for 5000 Series. */ 4246 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4247 txmask = htole32(sc->txchainmask); 4248 DPRINTF(("configuring valid TX chains 0x%x\n", txmask)); 4249 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask, 4250 sizeof txmask, 0); 4251 if (error != 0) { 4252 aprint_error_dev(sc->sc_dev, 4253 "could not configure valid TX chains\n"); 4254 return error; 4255 } 4256 } 4257 4258 /* Set mode, channel, RX filter and enable RX. */ 4259 memset(&sc->rxon, 0, sizeof (struct iwn_rxon)); 4260 IEEE80211_ADDR_COPY(ic->ic_myaddr, CLLADDR(ifp->if_sadl)); 4261 IEEE80211_ADDR_COPY(sc->rxon.myaddr, ic->ic_myaddr); 4262 IEEE80211_ADDR_COPY(sc->rxon.wlap, ic->ic_myaddr); 4263 sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_ibss_chan); 4264 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 4265 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_ibss_chan)) 4266 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 4267 switch (ic->ic_opmode) { 4268 case IEEE80211_M_STA: 4269 sc->rxon.mode = IWN_MODE_STA; 4270 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST); 4271 break; 4272 case IEEE80211_M_MONITOR: 4273 sc->rxon.mode = IWN_MODE_MONITOR; 4274 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST | 4275 IWN_FILTER_CTL | IWN_FILTER_PROMISC); 4276 break; 4277 default: 4278 /* Should not get there. */ 4279 break; 4280 } 4281 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ 4282 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ 4283 sc->rxon.ht_single_mask = 0xff; 4284 sc->rxon.ht_dual_mask = 0xff; 4285 sc->rxon.ht_triple_mask = 0xff; 4286 rxchain = 4287 IWN_RXCHAIN_VALID(sc->rxchainmask) | 4288 IWN_RXCHAIN_MIMO_COUNT(2) | 4289 IWN_RXCHAIN_IDLE_COUNT(2); 4290 sc->rxon.rxchain = htole16(rxchain); 4291 DPRINTF(("setting configuration\n")); 4292 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 0); 4293 if (error != 0) { 4294 aprint_error_dev(sc->sc_dev, 4295 "RXON command failed\n"); 4296 return error; 4297 } 4298 4299 if ((error = iwn_add_broadcast_node(sc, 0)) != 0) { 4300 aprint_error_dev(sc->sc_dev, 4301 "could not add broadcast node\n"); 4302 return error; 4303 } 4304 4305 /* Configuration has changed, set TX power accordingly. */ 4306 if ((error = ops->set_txpower(sc, 0)) != 0) { 4307 aprint_error_dev(sc->sc_dev, 4308 "could not set TX power\n"); 4309 return error; 4310 } 4311 4312 if ((error = iwn_set_critical_temp(sc)) != 0) { 4313 aprint_error_dev(sc->sc_dev, 4314 "could not set critical temperature\n"); 4315 return error; 4316 } 4317 4318 /* Set power saving level to CAM during initialization. */ 4319 if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) { 4320 aprint_error_dev(sc->sc_dev, 4321 "could not set power saving level\n"); 4322 return error; 4323 } 4324 return 0; 4325 } 4326 4327 static int 4328 iwn_scan(struct iwn_softc *sc, uint16_t flags) 4329 { 4330 struct ieee80211com *ic = &sc->sc_ic; 4331 struct iwn_scan_hdr *hdr; 4332 struct iwn_cmd_data *tx; 4333 struct iwn_scan_essid *essid; 4334 struct iwn_scan_chan *chan; 4335 struct ieee80211_frame *wh; 4336 struct ieee80211_rateset *rs; 4337 struct ieee80211_channel *c; 4338 uint8_t *buf, *frm; 4339 uint16_t rxchain; 4340 uint8_t txant; 4341 int buflen, error; 4342 4343 buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 4344 if (buf == NULL) { 4345 aprint_error_dev(sc->sc_dev, 4346 "could not allocate buffer for scan command\n"); 4347 return ENOMEM; 4348 } 4349 hdr = (struct iwn_scan_hdr *)buf; 4350 /* 4351 * Move to the next channel if no frames are received within 10ms 4352 * after sending the probe request. 4353 */ 4354 hdr->quiet_time = htole16(10); /* timeout in milliseconds */ 4355 hdr->quiet_threshold = htole16(1); /* min # of packets */ 4356 4357 /* Select antennas for scanning. */ 4358 rxchain = 4359 IWN_RXCHAIN_VALID(sc->rxchainmask) | 4360 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) | 4361 IWN_RXCHAIN_DRIVER_FORCE; 4362 if ((flags & IEEE80211_CHAN_5GHZ) && 4363 sc->hw_type == IWN_HW_REV_TYPE_4965) { 4364 /* Ant A must be avoided in 5GHz because of an HW bug. */ 4365 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_BC); 4366 } else /* Use all available RX antennas. */ 4367 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask); 4368 hdr->rxchain = htole16(rxchain); 4369 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON); 4370 4371 tx = (struct iwn_cmd_data *)(hdr + 1); 4372 tx->flags = htole32(IWN_TX_AUTO_SEQ); 4373 tx->id = sc->broadcast_id; 4374 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 4375 4376 if (flags & IEEE80211_CHAN_5GHZ) { 4377 hdr->crc_threshold = 0xffff; 4378 /* Send probe requests at 6Mbps. */ 4379 tx->plcp = iwn_rates[IWN_RIDX_OFDM6].plcp; 4380 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 4381 } else { 4382 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO); 4383 /* Send probe requests at 1Mbps. */ 4384 tx->plcp = iwn_rates[IWN_RIDX_CCK1].plcp; 4385 tx->rflags = IWN_RFLAG_CCK; 4386 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 4387 } 4388 /* Use the first valid TX antenna. */ 4389 txant = IWN_LSB(sc->txchainmask); 4390 tx->rflags |= IWN_RFLAG_ANT(txant); 4391 4392 essid = (struct iwn_scan_essid *)(tx + 1); 4393 if (ic->ic_des_esslen != 0) { 4394 essid[0].id = IEEE80211_ELEMID_SSID; 4395 essid[0].len = ic->ic_des_esslen; 4396 memcpy(essid[0].data, ic->ic_des_essid, ic->ic_des_esslen); 4397 } 4398 /* 4399 * Build a probe request frame. Most of the following code is a 4400 * copy & paste of what is done in net80211. 4401 */ 4402 wh = (struct ieee80211_frame *)(essid + 20); 4403 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 4404 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 4405 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 4406 IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr); 4407 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr); 4408 IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr); 4409 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */ 4410 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */ 4411 4412 frm = (uint8_t *)(wh + 1); 4413 frm = ieee80211_add_ssid(frm, NULL, 0); 4414 frm = ieee80211_add_rates(frm, rs); 4415 #ifndef IEEE80211_NO_HT 4416 if (ic->ic_flags & IEEE80211_F_HTON) 4417 frm = ieee80211_add_htcaps(frm, ic); 4418 #endif 4419 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 4420 frm = ieee80211_add_xrates(frm, rs); 4421 4422 /* Set length of probe request. */ 4423 tx->len = htole16(frm - (uint8_t *)wh); 4424 4425 chan = (struct iwn_scan_chan *)frm; 4426 for (c = &ic->ic_channels[1]; 4427 c <= &ic->ic_channels[IEEE80211_CHAN_MAX]; c++) { 4428 if ((c->ic_flags & flags) != flags) 4429 continue; 4430 4431 chan->chan = htole16(ieee80211_chan2ieee(ic, c)); 4432 DPRINTFN(2, ("adding channel %d\n", chan->chan)); 4433 chan->flags = 0; 4434 if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) 4435 chan->flags |= htole32(IWN_CHAN_ACTIVE); 4436 if (ic->ic_des_esslen != 0) 4437 chan->flags |= htole32(IWN_CHAN_NPBREQS(1)); 4438 chan->dsp_gain = 0x6e; 4439 if (IEEE80211_IS_CHAN_5GHZ(c)) { 4440 chan->rf_gain = 0x3b; 4441 chan->active = htole16(24); 4442 chan->passive = htole16(110); 4443 } else { 4444 chan->rf_gain = 0x28; 4445 chan->active = htole16(36); 4446 chan->passive = htole16(120); 4447 } 4448 hdr->nchan++; 4449 chan++; 4450 } 4451 4452 buflen = (uint8_t *)chan - buf; 4453 hdr->len = htole16(buflen); 4454 4455 DPRINTF(("sending scan command nchan=%d\n", hdr->nchan)); 4456 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1); 4457 free(buf, M_DEVBUF); 4458 return error; 4459 } 4460 4461 static int 4462 iwn_auth(struct iwn_softc *sc) 4463 { 4464 struct iwn_ops *ops = &sc->ops; 4465 struct ieee80211com *ic = &sc->sc_ic; 4466 struct ieee80211_node *ni = ic->ic_bss; 4467 int error; 4468 4469 /* Update adapter configuration. */ 4470 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4471 sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan); 4472 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 4473 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 4474 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 4475 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4476 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT); 4477 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4478 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE); 4479 switch (ic->ic_curmode) { 4480 case IEEE80211_MODE_11A: 4481 sc->rxon.cck_mask = 0; 4482 sc->rxon.ofdm_mask = 0x15; 4483 break; 4484 case IEEE80211_MODE_11B: 4485 sc->rxon.cck_mask = 0x03; 4486 sc->rxon.ofdm_mask = 0; 4487 break; 4488 default: /* Assume 802.11b/g. */ 4489 sc->rxon.cck_mask = 0x0f; 4490 sc->rxon.ofdm_mask = 0x15; 4491 } 4492 DPRINTF(("rxon chan %d flags %x cck %x ofdm %x\n", sc->rxon.chan, 4493 sc->rxon.flags, sc->rxon.cck_mask, sc->rxon.ofdm_mask)); 4494 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1); 4495 if (error != 0) { 4496 aprint_error_dev(sc->sc_dev, 4497 "RXON command failed\n"); 4498 return error; 4499 } 4500 4501 /* Configuration has changed, set TX power accordingly. */ 4502 if ((error = ops->set_txpower(sc, 1)) != 0) { 4503 aprint_error_dev(sc->sc_dev, 4504 "could not set TX power\n"); 4505 return error; 4506 } 4507 /* 4508 * Reconfiguring RXON clears the firmware nodes table so we must 4509 * add the broadcast node again. 4510 */ 4511 if ((error = iwn_add_broadcast_node(sc, 1)) != 0) { 4512 aprint_error_dev(sc->sc_dev, 4513 "could not add broadcast node\n"); 4514 return error; 4515 } 4516 return 0; 4517 } 4518 4519 static int 4520 iwn_run(struct iwn_softc *sc) 4521 { 4522 struct iwn_ops *ops = &sc->ops; 4523 struct ieee80211com *ic = &sc->sc_ic; 4524 struct ieee80211_node *ni = ic->ic_bss; 4525 struct iwn_node_info node; 4526 int error; 4527 4528 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 4529 /* Link LED blinks while monitoring. */ 4530 iwn_set_led(sc, IWN_LED_LINK, 5, 5); 4531 return 0; 4532 } 4533 if ((error = iwn_set_timing(sc, ni)) != 0) { 4534 aprint_error_dev(sc->sc_dev, 4535 "could not set timing\n"); 4536 return error; 4537 } 4538 4539 /* Update adapter configuration. */ 4540 sc->rxon.associd = htole16(IEEE80211_AID(ni->ni_associd)); 4541 /* Short preamble and slot time are negotiated when associating. */ 4542 sc->rxon.flags &= ~htole32(IWN_RXON_SHPREAMBLE | IWN_RXON_SHSLOT); 4543 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4544 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT); 4545 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4546 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE); 4547 sc->rxon.filter |= htole32(IWN_FILTER_BSS); 4548 DPRINTF(("rxon chan %d flags %x\n", sc->rxon.chan, sc->rxon.flags)); 4549 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1); 4550 if (error != 0) { 4551 aprint_error_dev(sc->sc_dev, 4552 "could not update configuration\n"); 4553 return error; 4554 } 4555 4556 /* Configuration has changed, set TX power accordingly. */ 4557 if ((error = ops->set_txpower(sc, 1)) != 0) { 4558 aprint_error_dev(sc->sc_dev, 4559 "could not set TX power\n"); 4560 return error; 4561 } 4562 4563 /* Fake a join to initialize the TX rate. */ 4564 ((struct iwn_node *)ni)->id = IWN_ID_BSS; 4565 iwn_newassoc(ni, 1); 4566 4567 /* Add BSS node. */ 4568 memset(&node, 0, sizeof node); 4569 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 4570 node.id = IWN_ID_BSS; 4571 #ifdef notyet 4572 node.htflags = htole32(IWN_AMDPU_SIZE_FACTOR(3) | 4573 IWN_AMDPU_DENSITY(5)); /* 2us */ 4574 #endif 4575 DPRINTF(("adding BSS node\n")); 4576 error = ops->add_node(sc, &node, 1); 4577 if (error != 0) { 4578 aprint_error_dev(sc->sc_dev, 4579 "could not add BSS node\n"); 4580 return error; 4581 } 4582 DPRINTF(("setting link quality for node %d\n", node.id)); 4583 if ((error = iwn_set_link_quality(sc, ni)) != 0) { 4584 aprint_error_dev(sc->sc_dev, 4585 "could not setup link quality for node %d\n", node.id); 4586 return error; 4587 } 4588 4589 if ((error = iwn_init_sensitivity(sc)) != 0) { 4590 aprint_error_dev(sc->sc_dev, 4591 "could not set sensitivity\n"); 4592 return error; 4593 } 4594 /* Start periodic calibration timer. */ 4595 sc->calib.state = IWN_CALIB_STATE_ASSOC; 4596 sc->calib_cnt = 0; 4597 callout_schedule(&sc->calib_to, hz/2); 4598 4599 /* Link LED always on while associated. */ 4600 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 4601 return 0; 4602 } 4603 4604 #ifdef IWN_HWCRYPTO 4605 /* 4606 * We support CCMP hardware encryption/decryption of unicast frames only. 4607 * HW support for TKIP really sucks. We should let TKIP die anyway. 4608 */ 4609 static int 4610 iwn_set_key(struct ieee80211com *ic, struct ieee80211_node *ni, 4611 struct ieee80211_key *k) 4612 { 4613 struct iwn_softc *sc = ic->ic_softc; 4614 struct iwn_ops *ops = &sc->ops; 4615 struct iwn_node *wn = (void *)ni; 4616 struct iwn_node_info node; 4617 uint16_t kflags; 4618 4619 if ((k->k_flags & IEEE80211_KEY_GROUP) || 4620 k->k_cipher != IEEE80211_CIPHER_CCMP) 4621 return ieee80211_set_key(ic, ni, k); 4622 4623 kflags = IWN_KFLAG_CCMP | IWN_KFLAG_MAP | IWN_KFLAG_KID(k->k_id); 4624 if (k->k_flags & IEEE80211_KEY_GROUP) 4625 kflags |= IWN_KFLAG_GROUP; 4626 4627 memset(&node, 0, sizeof node); 4628 node.id = (k->k_flags & IEEE80211_KEY_GROUP) ? 4629 sc->broadcast_id : wn->id; 4630 node.control = IWN_NODE_UPDATE; 4631 node.flags = IWN_FLAG_SET_KEY; 4632 node.kflags = htole16(kflags); 4633 node.kid = k->k_id; 4634 memcpy(node.key, k->k_key, k->k_len); 4635 DPRINTF(("set key id=%d for node %d\n", k->k_id, node.id)); 4636 return ops->add_node(sc, &node, 1); 4637 } 4638 4639 static void 4640 iwn_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni, 4641 struct ieee80211_key *k) 4642 { 4643 struct iwn_softc *sc = ic->ic_softc; 4644 struct iwn_ops *ops = &sc->ops; 4645 struct iwn_node *wn = (void *)ni; 4646 struct iwn_node_info node; 4647 4648 if ((k->k_flags & IEEE80211_KEY_GROUP) || 4649 k->k_cipher != IEEE80211_CIPHER_CCMP) { 4650 /* See comment about other ciphers above. */ 4651 ieee80211_delete_key(ic, ni, k); 4652 return; 4653 } 4654 if (ic->ic_state != IEEE80211_S_RUN) 4655 return; /* Nothing to do. */ 4656 memset(&node, 0, sizeof node); 4657 node.id = (k->k_flags & IEEE80211_KEY_GROUP) ? 4658 sc->broadcast_id : wn->id; 4659 node.control = IWN_NODE_UPDATE; 4660 node.flags = IWN_FLAG_SET_KEY; 4661 node.kflags = htole16(IWN_KFLAG_INVALID); 4662 node.kid = 0xff; 4663 DPRINTF(("delete keys for node %d\n", node.id)); 4664 (void)ops->add_node(sc, &node, 1); 4665 } 4666 #endif 4667 4668 /* XXX Added for NetBSD (copied from rev 1.39). */ 4669 4670 static int 4671 iwn_wme_update(struct ieee80211com *ic) 4672 { 4673 #define IWN_EXP2(v) htole16((1 << (v)) - 1) 4674 #define IWN_USEC(v) htole16(IEEE80211_TXOP_TO_US(v)) 4675 struct iwn_softc *sc = ic->ic_ifp->if_softc; 4676 const struct wmeParams *wmep; 4677 struct iwn_edca_params cmd; 4678 int ac; 4679 4680 /* don't override default WME values if WME is not actually enabled */ 4681 if (!(ic->ic_flags & IEEE80211_F_WME)) 4682 return 0; 4683 cmd.flags = 0; 4684 for (ac = 0; ac < WME_NUM_AC; ac++) { 4685 wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 4686 cmd.ac[ac].aifsn = wmep->wmep_aifsn; 4687 cmd.ac[ac].cwmin = IWN_EXP2(wmep->wmep_logcwmin); 4688 cmd.ac[ac].cwmax = IWN_EXP2(wmep->wmep_logcwmax); 4689 cmd.ac[ac].txoplimit = IWN_USEC(wmep->wmep_txopLimit); 4690 4691 DPRINTF(("setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d " 4692 "txop=%d\n", ac, cmd.ac[ac].aifsn, 4693 cmd.ac[ac].cwmin, 4694 cmd.ac[ac].cwmax, cmd.ac[ac].txoplimit)); 4695 } 4696 return iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 4697 #undef IWN_USEC 4698 #undef IWN_EXP2 4699 } 4700 4701 #ifndef IEEE80211_NO_HT 4702 /* 4703 * This function is called by upper layer when an ADDBA request is received 4704 * from another STA and before the ADDBA response is sent. 4705 */ 4706 static int 4707 iwn_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 4708 uint8_t tid) 4709 { 4710 struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid]; 4711 struct iwn_softc *sc = ic->ic_softc; 4712 struct iwn_ops *ops = &sc->ops; 4713 struct iwn_node *wn = (void *)ni; 4714 struct iwn_node_info node; 4715 4716 memset(&node, 0, sizeof node); 4717 node.id = wn->id; 4718 node.control = IWN_NODE_UPDATE; 4719 node.flags = IWN_FLAG_SET_ADDBA; 4720 node.addba_tid = tid; 4721 node.addba_ssn = htole16(ba->ba_winstart); 4722 DPRINTFN(2, ("ADDBA RA=%d TID=%d SSN=%d\n", wn->id, tid, 4723 ba->ba_winstart)); 4724 return ops->add_node(sc, &node, 1); 4725 } 4726 4727 /* 4728 * This function is called by upper layer on teardown of an HT-immediate 4729 * Block Ack agreement (eg. uppon receipt of a DELBA frame). 4730 */ 4731 static void 4732 iwn_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni, 4733 uint8_t tid) 4734 { 4735 struct iwn_softc *sc = ic->ic_softc; 4736 struct iwn_ops *ops = &sc->ops; 4737 struct iwn_node *wn = (void *)ni; 4738 struct iwn_node_info node; 4739 4740 memset(&node, 0, sizeof node); 4741 node.id = wn->id; 4742 node.control = IWN_NODE_UPDATE; 4743 node.flags = IWN_FLAG_SET_DELBA; 4744 node.delba_tid = tid; 4745 DPRINTFN(2, ("DELBA RA=%d TID=%d\n", wn->id, tid)); 4746 (void)ops->add_node(sc, &node, 1); 4747 } 4748 4749 /* 4750 * This function is called by upper layer when an ADDBA response is received 4751 * from another STA. 4752 */ 4753 static int 4754 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 4755 uint8_t tid) 4756 { 4757 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid]; 4758 struct iwn_softc *sc = ic->ic_softc; 4759 struct iwn_ops *ops = &sc->ops; 4760 struct iwn_node *wn = (void *)ni; 4761 struct iwn_node_info node; 4762 int error; 4763 4764 /* Enable TX for the specified RA/TID. */ 4765 wn->disable_tid &= ~(1 << tid); 4766 memset(&node, 0, sizeof node); 4767 node.id = wn->id; 4768 node.control = IWN_NODE_UPDATE; 4769 node.flags = IWN_FLAG_SET_DISABLE_TID; 4770 node.disable_tid = htole16(wn->disable_tid); 4771 error = ops->add_node(sc, &node, 1); 4772 if (error != 0) 4773 return error; 4774 4775 if ((error = iwn_nic_lock(sc)) != 0) 4776 return error; 4777 ops->ampdu_tx_start(sc, ni, tid, ba->ba_winstart); 4778 iwn_nic_unlock(sc); 4779 return 0; 4780 } 4781 4782 static void 4783 iwn_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni, 4784 uint8_t tid) 4785 { 4786 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid]; 4787 struct iwn_softc *sc = ic->ic_softc; 4788 struct iwn_ops *ops = &sc->ops; 4789 4790 if (iwn_nic_lock(sc) != 0) 4791 return; 4792 ops->ampdu_tx_stop(sc, tid, ba->ba_winstart); 4793 iwn_nic_unlock(sc); 4794 } 4795 4796 static void 4797 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 4798 uint8_t tid, uint16_t ssn) 4799 { 4800 struct iwn_node *wn = (void *)ni; 4801 int qid = 7 + tid; 4802 4803 /* Stop TX scheduler while we're changing its configuration. */ 4804 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 4805 IWN4965_TXQ_STATUS_CHGACT); 4806 4807 /* Assign RA/TID translation to the queue. */ 4808 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid), 4809 wn->id << 4 | tid); 4810 4811 /* Enable chain-building mode for the queue. */ 4812 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid); 4813 4814 /* Set starting sequence number from the ADDBA request. */ 4815 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 4816 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 4817 4818 /* Set scheduler window size. */ 4819 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid), 4820 IWN_SCHED_WINSZ); 4821 /* Set scheduler frame limit. */ 4822 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 4823 IWN_SCHED_LIMIT << 16); 4824 4825 /* Enable interrupts for the queue. */ 4826 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 4827 4828 /* Mark the queue as active. */ 4829 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 4830 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA | 4831 iwn_tid2fifo[tid] << 1); 4832 } 4833 4834 static void 4835 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn) 4836 { 4837 int qid = 7 + tid; 4838 4839 /* Stop TX scheduler while we're changing its configuration. */ 4840 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 4841 IWN4965_TXQ_STATUS_CHGACT); 4842 4843 /* Set starting sequence number from the ADDBA request. */ 4844 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 4845 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 4846 4847 /* Disable interrupts for the queue. */ 4848 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 4849 4850 /* Mark the queue as inactive. */ 4851 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 4852 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1); 4853 } 4854 4855 static void 4856 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 4857 uint8_t tid, uint16_t ssn) 4858 { 4859 struct iwn_node *wn = (void *)ni; 4860 int qid = 10 + tid; 4861 4862 /* Stop TX scheduler while we're changing its configuration. */ 4863 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 4864 IWN5000_TXQ_STATUS_CHGACT); 4865 4866 /* Assign RA/TID translation to the queue. */ 4867 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid), 4868 wn->id << 4 | tid); 4869 4870 /* Enable chain-building mode for the queue. */ 4871 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid); 4872 4873 /* Enable aggregation for the queue. */ 4874 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 4875 4876 /* Set starting sequence number from the ADDBA request. */ 4877 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 4878 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 4879 4880 /* Set scheduler window size and frame limit. */ 4881 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 4882 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 4883 4884 /* Enable interrupts for the queue. */ 4885 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 4886 4887 /* Mark the queue as active. */ 4888 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 4889 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]); 4890 } 4891 4892 static void 4893 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn) 4894 { 4895 int qid = 10 + tid; 4896 4897 /* Stop TX scheduler while we're changing its configuration. */ 4898 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 4899 IWN5000_TXQ_STATUS_CHGACT); 4900 4901 /* Disable aggregation for the queue. */ 4902 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 4903 4904 /* Set starting sequence number from the ADDBA request. */ 4905 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 4906 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 4907 4908 /* Disable interrupts for the queue. */ 4909 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 4910 4911 /* Mark the queue as inactive. */ 4912 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 4913 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]); 4914 } 4915 #endif /* !IEEE80211_NO_HT */ 4916 4917 /* 4918 * Query calibration tables from the initialization firmware. We do this 4919 * only once at first boot. Called from a process context. 4920 */ 4921 static int 4922 iwn5000_query_calibration(struct iwn_softc *sc) 4923 { 4924 struct iwn5000_calib_config cmd; 4925 int error; 4926 4927 memset(&cmd, 0, sizeof cmd); 4928 cmd.ucode.once.enable = 0xffffffff; 4929 cmd.ucode.once.start = 0xffffffff; 4930 cmd.ucode.once.send = 0xffffffff; 4931 cmd.ucode.flags = 0xffffffff; 4932 DPRINTF(("sending calibration query\n")); 4933 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0); 4934 if (error != 0) 4935 return error; 4936 4937 /* Wait at most two seconds for calibration to complete. */ 4938 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) 4939 error = tsleep(sc, PCATCH, "iwncal", 2 * hz); 4940 return error; 4941 } 4942 4943 /* 4944 * Send calibration results to the runtime firmware. These results were 4945 * obtained on first boot from the initialization firmware. 4946 */ 4947 static int 4948 iwn5000_send_calibration(struct iwn_softc *sc) 4949 { 4950 int idx, error; 4951 4952 for (idx = 0; idx < 5; idx++) { 4953 if (sc->calibcmd[idx].buf == NULL) 4954 continue; /* No results available. */ 4955 DPRINTF(("send calibration result idx=%d len=%d\n", 4956 idx, sc->calibcmd[idx].len)); 4957 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf, 4958 sc->calibcmd[idx].len, 0); 4959 if (error != 0) { 4960 aprint_error_dev(sc->sc_dev, 4961 "could not send calibration result\n"); 4962 return error; 4963 } 4964 } 4965 return 0; 4966 } 4967 4968 static int 4969 iwn5000_send_wimax_coex(struct iwn_softc *sc) 4970 { 4971 struct iwn5000_wimax_coex wimax; 4972 4973 #ifdef notyet 4974 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 4975 /* Enable WiMAX coexistence for combo adapters. */ 4976 wimax.flags = 4977 IWN_WIMAX_COEX_ASSOC_WA_UNMASK | 4978 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK | 4979 IWN_WIMAX_COEX_STA_TABLE_VALID | 4980 IWN_WIMAX_COEX_ENABLE; 4981 memcpy(wimax.events, iwn6050_wimax_events, 4982 sizeof iwn6050_wimax_events); 4983 } else 4984 #endif 4985 { 4986 /* Disable WiMAX coexistence. */ 4987 wimax.flags = 0; 4988 memset(wimax.events, 0, sizeof wimax.events); 4989 } 4990 DPRINTF(("Configuring WiMAX coexistence\n")); 4991 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0); 4992 } 4993 4994 /* 4995 * This function is called after the runtime firmware notifies us of its 4996 * readiness (called in a process context). 4997 */ 4998 static int 4999 iwn4965_post_alive(struct iwn_softc *sc) 5000 { 5001 int error, qid; 5002 5003 if ((error = iwn_nic_lock(sc)) != 0) 5004 return error; 5005 5006 /* Clear TX scheduler state in SRAM. */ 5007 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 5008 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0, 5009 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t)); 5010 5011 /* Set physical address of TX scheduler rings (1KB aligned). */ 5012 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 5013 5014 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 5015 5016 /* Disable chain mode for all our 16 queues. */ 5017 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0); 5018 5019 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) { 5020 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0); 5021 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 5022 5023 /* Set scheduler window size. */ 5024 iwn_mem_write(sc, sc->sched_base + 5025 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ); 5026 /* Set scheduler frame limit. */ 5027 iwn_mem_write(sc, sc->sched_base + 5028 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 5029 IWN_SCHED_LIMIT << 16); 5030 } 5031 5032 /* Enable interrupts for all our 16 queues. */ 5033 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff); 5034 /* Identify TX FIFO rings (0-7). */ 5035 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff); 5036 5037 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 5038 for (qid = 0; qid < 7; qid++) { 5039 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 }; 5040 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5041 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1); 5042 } 5043 iwn_nic_unlock(sc); 5044 return 0; 5045 } 5046 5047 /* 5048 * This function is called after the initialization or runtime firmware 5049 * notifies us of its readiness (called in a process context). 5050 */ 5051 static int 5052 iwn5000_post_alive(struct iwn_softc *sc) 5053 { 5054 int error, qid; 5055 5056 /* Switch to using ICT interrupt mode. */ 5057 iwn5000_ict_reset(sc); 5058 5059 if ((error = iwn_nic_lock(sc)) != 0) 5060 return error; 5061 5062 /* Clear TX scheduler state in SRAM. */ 5063 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 5064 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0, 5065 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t)); 5066 5067 /* Set physical address of TX scheduler rings (1KB aligned). */ 5068 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 5069 5070 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 5071 5072 /* Enable chain mode for all queues, except command queue. */ 5073 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef); 5074 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0); 5075 5076 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) { 5077 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0); 5078 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 5079 5080 iwn_mem_write(sc, sc->sched_base + 5081 IWN5000_SCHED_QUEUE_OFFSET(qid), 0); 5082 /* Set scheduler window size and frame limit. */ 5083 iwn_mem_write(sc, sc->sched_base + 5084 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 5085 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 5086 } 5087 5088 /* Enable interrupts for all our 20 queues. */ 5089 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff); 5090 /* Identify TX FIFO rings (0-7). */ 5091 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff); 5092 5093 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 5094 for (qid = 0; qid < 7; qid++) { 5095 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 }; 5096 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5097 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 5098 } 5099 iwn_nic_unlock(sc); 5100 5101 /* Configure WiMAX coexistence for combo adapters. */ 5102 error = iwn5000_send_wimax_coex(sc); 5103 if (error != 0) { 5104 aprint_error_dev(sc->sc_dev, 5105 "could not configure WiMAX coexistence\n"); 5106 return error; 5107 } 5108 if (sc->hw_type != IWN_HW_REV_TYPE_5150) { 5109 struct iwn5000_phy_calib_crystal cmd; 5110 5111 /* Perform crystal calibration. */ 5112 memset(&cmd, 0, sizeof cmd); 5113 cmd.code = IWN5000_PHY_CALIB_CRYSTAL; 5114 cmd.ngroups = 1; 5115 cmd.isvalid = 1; 5116 cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff; 5117 cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff; 5118 DPRINTF(("sending crystal calibration %d, %d\n", 5119 cmd.cap_pin[0], cmd.cap_pin[1])); 5120 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 5121 if (error != 0) { 5122 aprint_error_dev(sc->sc_dev, 5123 "crystal calibration failed\n"); 5124 return error; 5125 } 5126 } 5127 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) { 5128 /* Query calibration from the initialization firmware. */ 5129 if ((error = iwn5000_query_calibration(sc)) != 0) { 5130 aprint_error_dev(sc->sc_dev, 5131 "could not query calibration\n"); 5132 return error; 5133 } 5134 /* 5135 * We have the calibration results now, reboot with the 5136 * runtime firmware (call ourselves recursively!) 5137 */ 5138 iwn_hw_stop(sc); 5139 error = iwn_hw_init(sc); 5140 } else { 5141 /* Send calibration results to runtime firmware. */ 5142 error = iwn5000_send_calibration(sc); 5143 } 5144 return error; 5145 } 5146 5147 /* 5148 * The firmware boot code is small and is intended to be copied directly into 5149 * the NIC internal memory (no DMA transfer). 5150 */ 5151 static int 5152 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size) 5153 { 5154 int error, ntries; 5155 5156 size /= sizeof (uint32_t); 5157 5158 if ((error = iwn_nic_lock(sc)) != 0) 5159 return error; 5160 5161 /* Copy microcode image into NIC memory. */ 5162 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE, 5163 (const uint32_t *)ucode, size); 5164 5165 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0); 5166 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE); 5167 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size); 5168 5169 /* Start boot load now. */ 5170 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START); 5171 5172 /* Wait for transfer to complete. */ 5173 for (ntries = 0; ntries < 1000; ntries++) { 5174 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) & 5175 IWN_BSM_WR_CTRL_START)) 5176 break; 5177 DELAY(10); 5178 } 5179 if (ntries == 1000) { 5180 aprint_error_dev(sc->sc_dev, 5181 "could not load boot firmware\n"); 5182 iwn_nic_unlock(sc); 5183 return ETIMEDOUT; 5184 } 5185 5186 /* Enable boot after power up. */ 5187 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN); 5188 5189 iwn_nic_unlock(sc); 5190 return 0; 5191 } 5192 5193 static int 5194 iwn4965_load_firmware(struct iwn_softc *sc) 5195 { 5196 struct iwn_fw_info *fw = &sc->fw; 5197 struct iwn_dma_info *dma = &sc->fw_dma; 5198 int error; 5199 5200 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 5201 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 5202 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, fw->init.datasz, 5203 BUS_DMASYNC_PREWRITE); 5204 memcpy((char *)dma->vaddr + IWN4965_FW_DATA_MAXSZ, 5205 fw->init.text, fw->init.textsz); 5206 bus_dmamap_sync(sc->sc_dmat, dma->map, IWN4965_FW_DATA_MAXSZ, 5207 fw->init.textsz, BUS_DMASYNC_PREWRITE); 5208 5209 /* Tell adapter where to find initialization sections. */ 5210 if ((error = iwn_nic_lock(sc)) != 0) 5211 return error; 5212 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 5213 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz); 5214 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 5215 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 5216 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 5217 iwn_nic_unlock(sc); 5218 5219 /* Load firmware boot code. */ 5220 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 5221 if (error != 0) { 5222 aprint_error_dev(sc->sc_dev, 5223 "could not load boot firmware\n"); 5224 return error; 5225 } 5226 /* Now press "execute". */ 5227 IWN_WRITE(sc, IWN_RESET, 0); 5228 5229 /* Wait at most one second for first alive notification. */ 5230 if ((error = tsleep(sc, PCATCH, "iwninit", hz)) != 0) { 5231 aprint_error_dev(sc->sc_dev, 5232 "timeout waiting for adapter to initialize\n"); 5233 return error; 5234 } 5235 5236 /* Retrieve current temperature for initial TX power calibration. */ 5237 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz; 5238 sc->temp = iwn4965_get_temperature(sc); 5239 5240 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 5241 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 5242 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, fw->main.datasz, 5243 BUS_DMASYNC_PREWRITE); 5244 memcpy((char *)dma->vaddr + IWN4965_FW_DATA_MAXSZ, 5245 fw->main.text, fw->main.textsz); 5246 bus_dmamap_sync(sc->sc_dmat, dma->map, IWN4965_FW_DATA_MAXSZ, 5247 fw->main.textsz, BUS_DMASYNC_PREWRITE); 5248 5249 /* Tell adapter where to find runtime sections. */ 5250 if ((error = iwn_nic_lock(sc)) != 0) 5251 return error; 5252 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 5253 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz); 5254 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 5255 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 5256 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, 5257 IWN_FW_UPDATED | fw->main.textsz); 5258 iwn_nic_unlock(sc); 5259 5260 return 0; 5261 } 5262 5263 static int 5264 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst, 5265 const uint8_t *section, int size) 5266 { 5267 struct iwn_dma_info *dma = &sc->fw_dma; 5268 int error; 5269 5270 /* Copy firmware section into pre-allocated DMA-safe memory. */ 5271 memcpy(dma->vaddr, section, size); 5272 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, size, BUS_DMASYNC_PREWRITE); 5273 5274 if ((error = iwn_nic_lock(sc)) != 0) 5275 return error; 5276 5277 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 5278 IWN_FH_TX_CONFIG_DMA_PAUSE); 5279 5280 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst); 5281 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL), 5282 IWN_LOADDR(dma->paddr)); 5283 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL), 5284 IWN_HIADDR(dma->paddr) << 28 | size); 5285 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL), 5286 IWN_FH_TXBUF_STATUS_TBNUM(1) | 5287 IWN_FH_TXBUF_STATUS_TBIDX(1) | 5288 IWN_FH_TXBUF_STATUS_TFBD_VALID); 5289 5290 /* Kick Flow Handler to start DMA transfer. */ 5291 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 5292 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD); 5293 5294 iwn_nic_unlock(sc); 5295 5296 /* Wait at most five seconds for FH DMA transfer to complete. */ 5297 return tsleep(sc, PCATCH, "iwninit", 5 * hz); 5298 } 5299 5300 static int 5301 iwn5000_load_firmware(struct iwn_softc *sc) 5302 { 5303 struct iwn_fw_part *fw; 5304 int error; 5305 5306 /* Load the initialization firmware on first boot only. */ 5307 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ? 5308 &sc->fw.main : &sc->fw.init; 5309 5310 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE, 5311 fw->text, fw->textsz); 5312 if (error != 0) { 5313 aprint_error_dev(sc->sc_dev, 5314 "could not load firmware %s section\n", ".text"); 5315 return error; 5316 } 5317 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE, 5318 fw->data, fw->datasz); 5319 if (error != 0) { 5320 aprint_error_dev(sc->sc_dev, 5321 "could not load firmware %s section\n", ".data"); 5322 return error; 5323 } 5324 5325 /* Now press "execute". */ 5326 IWN_WRITE(sc, IWN_RESET, 0); 5327 return 0; 5328 } 5329 5330 /* 5331 * Extract text and data sections from a legacy firmware image. 5332 */ 5333 static int 5334 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw) 5335 { 5336 const uint32_t *ptr; 5337 size_t hdrlen = 24; 5338 uint32_t rev; 5339 5340 ptr = (const uint32_t *)fw->data; 5341 rev = le32toh(*ptr++); 5342 5343 /* Check firmware API version. */ 5344 if (IWN_FW_API(rev) <= 1) { 5345 aprint_error_dev(sc->sc_dev, 5346 "bad firmware, need API version >=2\n"); 5347 return EINVAL; 5348 } 5349 if (IWN_FW_API(rev) >= 3) { 5350 /* Skip build number (version 2 header). */ 5351 hdrlen += 4; 5352 ptr++; 5353 } 5354 if (fw->size < hdrlen) { 5355 aprint_error_dev(sc->sc_dev, 5356 "firmware too short: %zd bytes\n", fw->size); 5357 return EINVAL; 5358 } 5359 fw->main.textsz = le32toh(*ptr++); 5360 fw->main.datasz = le32toh(*ptr++); 5361 fw->init.textsz = le32toh(*ptr++); 5362 fw->init.datasz = le32toh(*ptr++); 5363 fw->boot.textsz = le32toh(*ptr++); 5364 5365 /* Check that all firmware sections fit. */ 5366 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz + 5367 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 5368 aprint_error_dev(sc->sc_dev, 5369 "firmware too short: %zd bytes\n", fw->size); 5370 return EINVAL; 5371 } 5372 5373 /* Get pointers to firmware sections. */ 5374 fw->main.text = (const uint8_t *)ptr; 5375 fw->main.data = fw->main.text + fw->main.textsz; 5376 fw->init.text = fw->main.data + fw->main.datasz; 5377 fw->init.data = fw->init.text + fw->init.textsz; 5378 fw->boot.text = fw->init.data + fw->init.datasz; 5379 return 0; 5380 } 5381 5382 /* 5383 * Extract text and data sections from a TLV firmware image. 5384 */ 5385 static int 5386 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw, 5387 uint16_t alt) 5388 { 5389 const struct iwn_fw_tlv_hdr *hdr; 5390 const struct iwn_fw_tlv *tlv; 5391 const uint8_t *ptr, *end; 5392 uint64_t altmask; 5393 uint32_t len; 5394 5395 if (fw->size < sizeof (*hdr)) { 5396 aprint_error_dev(sc->sc_dev, 5397 "firmware too short: %zd bytes\n", fw->size); 5398 return EINVAL; 5399 } 5400 hdr = (const struct iwn_fw_tlv_hdr *)fw->data; 5401 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) { 5402 aprint_error_dev(sc->sc_dev, 5403 "bad firmware signature 0x%08x\n", le32toh(hdr->signature)); 5404 return EINVAL; 5405 } 5406 DPRINTF(("FW: \"%.64s\", build 0x%x\n", hdr->descr, 5407 le32toh(hdr->build))); 5408 5409 /* 5410 * Select the closest supported alternative that is less than 5411 * or equal to the specified one. 5412 */ 5413 altmask = le64toh(hdr->altmask); 5414 while (alt > 0 && !(altmask & (1ULL << alt))) 5415 alt--; /* Downgrade. */ 5416 DPRINTF(("using alternative %d\n", alt)); 5417 5418 ptr = (const uint8_t *)(hdr + 1); 5419 end = (const uint8_t *)(fw->data + fw->size); 5420 5421 /* Parse type-length-value fields. */ 5422 while (ptr + sizeof (*tlv) <= end) { 5423 tlv = (const struct iwn_fw_tlv *)ptr; 5424 len = le32toh(tlv->len); 5425 5426 ptr += sizeof (*tlv); 5427 if (ptr + len > end) { 5428 aprint_error_dev(sc->sc_dev, 5429 "firmware too short: %zd bytes\n", fw->size); 5430 return EINVAL; 5431 } 5432 /* Skip other alternatives. */ 5433 if (tlv->alt != 0 && tlv->alt != htole16(alt)) 5434 goto next; 5435 5436 switch (le16toh(tlv->type)) { 5437 case IWN_FW_TLV_MAIN_TEXT: 5438 fw->main.text = ptr; 5439 fw->main.textsz = len; 5440 break; 5441 case IWN_FW_TLV_MAIN_DATA: 5442 fw->main.data = ptr; 5443 fw->main.datasz = len; 5444 break; 5445 case IWN_FW_TLV_INIT_TEXT: 5446 fw->init.text = ptr; 5447 fw->init.textsz = len; 5448 break; 5449 case IWN_FW_TLV_INIT_DATA: 5450 fw->init.data = ptr; 5451 fw->init.datasz = len; 5452 break; 5453 case IWN_FW_TLV_BOOT_TEXT: 5454 fw->boot.text = ptr; 5455 fw->boot.textsz = len; 5456 break; 5457 default: 5458 DPRINTF(("TLV type %d not handled\n", 5459 le16toh(tlv->type))); 5460 break; 5461 } 5462 next: /* TLV fields are 32-bit aligned. */ 5463 ptr += (len + 3) & ~3; 5464 } 5465 return 0; 5466 } 5467 5468 static int 5469 iwn_read_firmware(struct iwn_softc *sc) 5470 { 5471 struct iwn_fw_info *fw = &sc->fw; 5472 firmware_handle_t fwh; 5473 int error; 5474 5475 /* Initialize for error returns */ 5476 fw->data = NULL; 5477 fw->size = 0; 5478 5479 /* Open firmware image. */ 5480 if ((error = firmware_open("if_iwn", sc->fwname, &fwh)) != 0) { 5481 aprint_error_dev(sc->sc_dev, 5482 "could not get firmware handle %s\n", sc->fwname); 5483 return error; 5484 } 5485 fw->size = firmware_get_size(fwh); 5486 if (fw->size < sizeof (uint32_t)) { 5487 aprint_error_dev(sc->sc_dev, 5488 "firmware too short: %zd bytes\n", fw->size); 5489 firmware_close(fwh); 5490 return EINVAL; 5491 } 5492 5493 /* Read the firmware. */ 5494 fw->data = firmware_malloc(fw->size); 5495 if (fw->data == NULL) { 5496 aprint_error_dev(sc->sc_dev, 5497 "not enough memory to stock firmware %s\n", sc->fwname); 5498 firmware_close(fwh); 5499 return ENOMEM; 5500 } 5501 error = firmware_read(fwh, 0, fw->data, fw->size); 5502 firmware_close(fwh); 5503 if (error != 0) { 5504 aprint_error_dev(sc->sc_dev, 5505 "could not read firmware %s\n", sc->fwname); 5506 goto out; 5507 } 5508 5509 /* Retrieve text and data sections. */ 5510 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */ 5511 error = iwn_read_firmware_leg(sc, fw); 5512 else 5513 error = iwn_read_firmware_tlv(sc, fw, 1); 5514 if (error != 0) { 5515 aprint_error_dev(sc->sc_dev, 5516 "could not read firmware sections\n"); 5517 goto out; 5518 } 5519 5520 /* Make sure text and data sections fit in hardware memory. */ 5521 if (fw->main.textsz > sc->fw_text_maxsz || 5522 fw->main.datasz > sc->fw_data_maxsz || 5523 fw->init.textsz > sc->fw_text_maxsz || 5524 fw->init.datasz > sc->fw_data_maxsz || 5525 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ || 5526 (fw->boot.textsz & 3) != 0) { 5527 aprint_error_dev(sc->sc_dev, 5528 "firmware sections too large\n"); 5529 goto out; 5530 } 5531 5532 /* We can proceed with loading the firmware. */ 5533 return 0; 5534 out: 5535 firmware_free(fw->data, fw->size); 5536 fw->data = NULL; 5537 fw->size = 0; 5538 return error ? error : EINVAL; 5539 } 5540 5541 static int 5542 iwn_clock_wait(struct iwn_softc *sc) 5543 { 5544 int ntries; 5545 5546 /* Set "initialization complete" bit. */ 5547 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 5548 5549 /* Wait for clock stabilization. */ 5550 for (ntries = 0; ntries < 2500; ntries++) { 5551 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY) 5552 return 0; 5553 DELAY(10); 5554 } 5555 aprint_error_dev(sc->sc_dev, 5556 "timeout waiting for clock stabilization\n"); 5557 return ETIMEDOUT; 5558 } 5559 5560 static int 5561 iwn_apm_init(struct iwn_softc *sc) 5562 { 5563 pcireg_t reg; 5564 int error; 5565 5566 /* Disable L0s exit timer (NMI bug workaround). */ 5567 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER); 5568 /* Don't wait for ICH L0s (ICH bug workaround). */ 5569 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX); 5570 5571 /* Set FH wait threshold to max (HW bug under stress workaround). */ 5572 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000); 5573 5574 /* Enable HAP INTA to move adapter from L1a to L0s. */ 5575 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A); 5576 5577 /* Retrieve PCIe Active State Power Management (ASPM). */ 5578 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 5579 sc->sc_cap_off + PCIE_LCSR); 5580 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 5581 if (reg & PCIE_LCSR_ASPM_L1) /* L1 Entry enabled. */ 5582 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 5583 else 5584 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 5585 5586 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 5587 sc->hw_type <= IWN_HW_REV_TYPE_1000) 5588 IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT); 5589 5590 /* Wait for clock stabilization before accessing prph. */ 5591 if ((error = iwn_clock_wait(sc)) != 0) 5592 return error; 5593 5594 if ((error = iwn_nic_lock(sc)) != 0) 5595 return error; 5596 if (sc->hw_type == IWN_HW_REV_TYPE_4965) { 5597 /* Enable DMA and BSM (Bootstrap State Machine). */ 5598 iwn_prph_write(sc, IWN_APMG_CLK_EN, 5599 IWN_APMG_CLK_CTRL_DMA_CLK_RQT | 5600 IWN_APMG_CLK_CTRL_BSM_CLK_RQT); 5601 } else { 5602 /* Enable DMA. */ 5603 iwn_prph_write(sc, IWN_APMG_CLK_EN, 5604 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 5605 } 5606 DELAY(20); 5607 /* Disable L1-Active. */ 5608 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS); 5609 iwn_nic_unlock(sc); 5610 5611 return 0; 5612 } 5613 5614 static void 5615 iwn_apm_stop_master(struct iwn_softc *sc) 5616 { 5617 int ntries; 5618 5619 /* Stop busmaster DMA activity. */ 5620 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER); 5621 for (ntries = 0; ntries < 100; ntries++) { 5622 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED) 5623 return; 5624 DELAY(10); 5625 } 5626 aprint_error_dev(sc->sc_dev, 5627 "timeout waiting for master\n"); 5628 } 5629 5630 static void 5631 iwn_apm_stop(struct iwn_softc *sc) 5632 { 5633 iwn_apm_stop_master(sc); 5634 5635 /* Reset the entire device. */ 5636 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW); 5637 DELAY(10); 5638 /* Clear "initialization complete" bit. */ 5639 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 5640 } 5641 5642 static int 5643 iwn4965_nic_config(struct iwn_softc *sc) 5644 { 5645 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) { 5646 /* 5647 * I don't believe this to be correct but this is what the 5648 * vendor driver is doing. Probably the bits should not be 5649 * shifted in IWN_RFCFG_*. 5650 */ 5651 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 5652 IWN_RFCFG_TYPE(sc->rfcfg) | 5653 IWN_RFCFG_STEP(sc->rfcfg) | 5654 IWN_RFCFG_DASH(sc->rfcfg)); 5655 } 5656 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 5657 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 5658 return 0; 5659 } 5660 5661 static int 5662 iwn5000_nic_config(struct iwn_softc *sc) 5663 { 5664 uint32_t tmp; 5665 int error; 5666 5667 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) { 5668 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 5669 IWN_RFCFG_TYPE(sc->rfcfg) | 5670 IWN_RFCFG_STEP(sc->rfcfg) | 5671 IWN_RFCFG_DASH(sc->rfcfg)); 5672 } 5673 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 5674 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 5675 5676 if ((error = iwn_nic_lock(sc)) != 0) 5677 return error; 5678 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS); 5679 5680 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 5681 /* 5682 * Select first Switching Voltage Regulator (1.32V) to 5683 * solve a stability issue related to noisy DC2DC line 5684 * in the silicon of 1000 Series. 5685 */ 5686 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR); 5687 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK; 5688 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32; 5689 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp); 5690 } 5691 iwn_nic_unlock(sc); 5692 5693 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) { 5694 /* Use internal power amplifier only. */ 5695 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA); 5696 } 5697 if ((sc->hw_type == IWN_HW_REV_TYPE_6050 || 5698 sc->hw_type == IWN_HW_REV_TYPE_6005) && sc->calib_ver >= 6) { 5699 /* Indicate that ROM calibration version is >=6. */ 5700 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6); 5701 } 5702 if (sc->hw_type == IWN_HW_REV_TYPE_6005) 5703 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_6050_1X2); 5704 return 0; 5705 } 5706 5707 /* 5708 * Take NIC ownership over Intel Active Management Technology (AMT). 5709 */ 5710 static int 5711 iwn_hw_prepare(struct iwn_softc *sc) 5712 { 5713 int ntries; 5714 5715 /* Check if hardware is ready. */ 5716 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 5717 for (ntries = 0; ntries < 5; ntries++) { 5718 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 5719 IWN_HW_IF_CONFIG_NIC_READY) 5720 return 0; 5721 DELAY(10); 5722 } 5723 5724 /* Hardware not ready, force into ready state. */ 5725 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE); 5726 for (ntries = 0; ntries < 15000; ntries++) { 5727 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) & 5728 IWN_HW_IF_CONFIG_PREPARE_DONE)) 5729 break; 5730 DELAY(10); 5731 } 5732 if (ntries == 15000) 5733 return ETIMEDOUT; 5734 5735 /* Hardware should be ready now. */ 5736 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 5737 for (ntries = 0; ntries < 5; ntries++) { 5738 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 5739 IWN_HW_IF_CONFIG_NIC_READY) 5740 return 0; 5741 DELAY(10); 5742 } 5743 return ETIMEDOUT; 5744 } 5745 5746 static int 5747 iwn_hw_init(struct iwn_softc *sc) 5748 { 5749 struct iwn_ops *ops = &sc->ops; 5750 int error, chnl, qid; 5751 5752 /* Clear pending interrupts. */ 5753 IWN_WRITE(sc, IWN_INT, 0xffffffff); 5754 5755 if ((error = iwn_apm_init(sc)) != 0) { 5756 aprint_error_dev(sc->sc_dev, 5757 "could not power ON adapter\n"); 5758 return error; 5759 } 5760 5761 /* Select VMAIN power source. */ 5762 if ((error = iwn_nic_lock(sc)) != 0) 5763 return error; 5764 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK); 5765 iwn_nic_unlock(sc); 5766 5767 /* Perform adapter-specific initialization. */ 5768 if ((error = ops->nic_config(sc)) != 0) 5769 return error; 5770 5771 /* Initialize RX ring. */ 5772 if ((error = iwn_nic_lock(sc)) != 0) 5773 return error; 5774 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 5775 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0); 5776 /* Set physical address of RX ring (256-byte aligned). */ 5777 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8); 5778 /* Set physical address of RX status (16-byte aligned). */ 5779 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4); 5780 /* Enable RX. */ 5781 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 5782 IWN_FH_RX_CONFIG_ENA | 5783 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */ 5784 IWN_FH_RX_CONFIG_IRQ_DST_HOST | 5785 IWN_FH_RX_CONFIG_SINGLE_FRAME | 5786 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) | 5787 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG)); 5788 iwn_nic_unlock(sc); 5789 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7); 5790 5791 if ((error = iwn_nic_lock(sc)) != 0) 5792 return error; 5793 5794 /* Initialize TX scheduler. */ 5795 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 5796 5797 /* Set physical address of "keep warm" page (16-byte aligned). */ 5798 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4); 5799 5800 /* Initialize TX rings. */ 5801 for (qid = 0; qid < sc->ntxqs; qid++) { 5802 struct iwn_tx_ring *txq = &sc->txq[qid]; 5803 5804 /* Set physical address of TX ring (256-byte aligned). */ 5805 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid), 5806 txq->desc_dma.paddr >> 8); 5807 } 5808 iwn_nic_unlock(sc); 5809 5810 /* Enable DMA channels. */ 5811 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 5812 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 5813 IWN_FH_TX_CONFIG_DMA_ENA | 5814 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA); 5815 } 5816 5817 /* Clear "radio off" and "commands blocked" bits. */ 5818 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 5819 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED); 5820 5821 /* Clear pending interrupts. */ 5822 IWN_WRITE(sc, IWN_INT, 0xffffffff); 5823 /* Enable interrupt coalescing. */ 5824 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8); 5825 /* Enable interrupts. */ 5826 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 5827 5828 /* _Really_ make sure "radio off" bit is cleared! */ 5829 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 5830 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 5831 5832 /* Enable shadow registers. */ 5833 if (sc->hw_type >= IWN_HW_REV_TYPE_6000) 5834 IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff); 5835 5836 if ((error = ops->load_firmware(sc)) != 0) { 5837 aprint_error_dev(sc->sc_dev, 5838 "could not load firmware\n"); 5839 return error; 5840 } 5841 /* Wait at most one second for firmware alive notification. */ 5842 if ((error = tsleep(sc, PCATCH, "iwninit", hz)) != 0) { 5843 aprint_error_dev(sc->sc_dev, 5844 "timeout waiting for adapter to initialize\n"); 5845 return error; 5846 } 5847 /* Do post-firmware initialization. */ 5848 return ops->post_alive(sc); 5849 } 5850 5851 static void 5852 iwn_hw_stop(struct iwn_softc *sc) 5853 { 5854 int chnl, qid, ntries; 5855 5856 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO); 5857 5858 /* Disable interrupts. */ 5859 IWN_WRITE(sc, IWN_INT_MASK, 0); 5860 IWN_WRITE(sc, IWN_INT, 0xffffffff); 5861 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff); 5862 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 5863 5864 /* Make sure we no longer hold the NIC lock. */ 5865 iwn_nic_unlock(sc); 5866 5867 /* Stop TX scheduler. */ 5868 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 5869 5870 /* Stop all DMA channels. */ 5871 if (iwn_nic_lock(sc) == 0) { 5872 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 5873 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0); 5874 for (ntries = 0; ntries < 200; ntries++) { 5875 if (IWN_READ(sc, IWN_FH_TX_STATUS) & 5876 IWN_FH_TX_STATUS_IDLE(chnl)) 5877 break; 5878 DELAY(10); 5879 } 5880 } 5881 iwn_nic_unlock(sc); 5882 } 5883 5884 /* Stop RX ring. */ 5885 iwn_reset_rx_ring(sc, &sc->rxq); 5886 5887 /* Reset all TX rings. */ 5888 for (qid = 0; qid < sc->ntxqs; qid++) 5889 iwn_reset_tx_ring(sc, &sc->txq[qid]); 5890 5891 if (iwn_nic_lock(sc) == 0) { 5892 iwn_prph_write(sc, IWN_APMG_CLK_DIS, 5893 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 5894 iwn_nic_unlock(sc); 5895 } 5896 DELAY(5); 5897 /* Power OFF adapter. */ 5898 iwn_apm_stop(sc); 5899 } 5900 5901 static int 5902 iwn_init(struct ifnet *ifp) 5903 { 5904 struct iwn_softc *sc = ifp->if_softc; 5905 struct ieee80211com *ic = &sc->sc_ic; 5906 int error; 5907 5908 mutex_enter(&sc->sc_mtx); 5909 if (sc->sc_flags & IWN_FLAG_HW_INITED) 5910 goto out; 5911 if ((error = iwn_hw_prepare(sc)) != 0) { 5912 aprint_error_dev(sc->sc_dev, 5913 "hardware not ready\n"); 5914 goto fail; 5915 } 5916 5917 /* Check that the radio is not disabled by hardware switch. */ 5918 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) { 5919 aprint_error_dev(sc->sc_dev, 5920 "radio is disabled by hardware switch\n"); 5921 error = EPERM; /* :-) */ 5922 goto fail; 5923 } 5924 5925 /* Read firmware images from the filesystem. */ 5926 if ((error = iwn_read_firmware(sc)) != 0) { 5927 aprint_error_dev(sc->sc_dev, 5928 "could not read firmware\n"); 5929 goto fail; 5930 } 5931 5932 /* Initialize interrupt mask to default value. */ 5933 sc->int_mask = IWN_INT_MASK_DEF; 5934 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 5935 5936 /* Initialize hardware and upload firmware. */ 5937 KASSERT(sc->fw.data != NULL && sc->fw.size > 0); 5938 error = iwn_hw_init(sc); 5939 firmware_free(sc->fw.data, sc->fw.size); 5940 sc->fw.data = NULL; 5941 sc->fw.size = 0; 5942 if (error != 0) { 5943 aprint_error_dev(sc->sc_dev, 5944 "could not initialize hardware\n"); 5945 goto fail; 5946 } 5947 5948 /* Configure adapter now that it is ready. */ 5949 if ((error = iwn_config(sc)) != 0) { 5950 aprint_error_dev(sc->sc_dev, 5951 "could not configure device\n"); 5952 goto fail; 5953 } 5954 5955 ifp->if_flags &= ~IFF_OACTIVE; 5956 ifp->if_flags |= IFF_RUNNING; 5957 5958 if (ic->ic_opmode != IEEE80211_M_MONITOR) 5959 ieee80211_begin_scan(ic, 0); 5960 else 5961 ieee80211_new_state(ic, IEEE80211_S_RUN, -1); 5962 5963 sc->sc_flags |= IWN_FLAG_HW_INITED; 5964 out: 5965 mutex_exit(&sc->sc_mtx); 5966 return 0; 5967 5968 fail: mutex_exit(&sc->sc_mtx); 5969 iwn_stop(ifp, 1); 5970 return error; 5971 } 5972 5973 static void 5974 iwn_stop(struct ifnet *ifp, int disable) 5975 { 5976 struct iwn_softc *sc = ifp->if_softc; 5977 struct ieee80211com *ic = &sc->sc_ic; 5978 5979 if (!disable) 5980 mutex_enter(&sc->sc_mtx); 5981 sc->sc_flags &= ~IWN_FLAG_HW_INITED; 5982 ifp->if_timer = sc->sc_tx_timer = 0; 5983 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 5984 5985 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 5986 5987 /* Power OFF hardware. */ 5988 iwn_hw_stop(sc); 5989 5990 if (!disable) 5991 mutex_exit(&sc->sc_mtx); 5992 } 5993 5994 /* 5995 * XXX MCLGETI alternative 5996 * 5997 * With IWN_USE_RBUF defined it uses the rbuf cache for receive buffers 5998 * as long as there are available free buffers then it uses MEXTMALLOC., 5999 * Without IWN_USE_RBUF defined it uses MEXTMALLOC exclusively. 6000 * The MCLGET4K code is used for testing an alternative mbuf cache. 6001 */ 6002 6003 static struct mbuf * 6004 MCLGETIalt(struct iwn_softc *sc, int how, 6005 struct ifnet *ifp __unused, u_int size) 6006 { 6007 struct mbuf *m; 6008 #ifdef IWN_USE_RBUF 6009 struct iwn_rbuf *rbuf; 6010 #endif 6011 6012 MGETHDR(m, how, MT_DATA); 6013 if (m == NULL) 6014 return NULL; 6015 6016 #ifdef IWN_USE_RBUF 6017 if (sc->rxq.nb_free_entries > 0 && 6018 (rbuf = iwn_alloc_rbuf(sc)) != NULL) { 6019 /* Attach buffer to mbuf header. */ 6020 MEXTADD(m, rbuf->vaddr, size, 0, iwn_free_rbuf, rbuf); 6021 m->m_flags |= M_EXT_RW; 6022 } 6023 else { 6024 MEXTMALLOC(m, size, how); 6025 if ((m->m_flags & M_EXT) == 0) { 6026 m_freem(m); 6027 return NULL; 6028 } 6029 } 6030 6031 #else 6032 #ifdef MCLGET4K 6033 if (size == 4096) 6034 MCLGET4K(m, how); 6035 else 6036 panic("size must be 4k"); 6037 #else 6038 MEXTMALLOC(m, size, how); 6039 #endif 6040 if ((m->m_flags & M_EXT) == 0) { 6041 m_freem(m); 6042 return NULL; 6043 } 6044 #endif 6045 6046 return m; 6047 } 6048 6049 #ifdef IWN_USE_RBUF 6050 static struct iwn_rbuf * 6051 iwn_alloc_rbuf(struct iwn_softc *sc) 6052 { 6053 struct iwn_rbuf *rbuf; 6054 mutex_enter(&sc->rxq.freelist_mtx); 6055 6056 rbuf = SLIST_FIRST(&sc->rxq.freelist); 6057 if (rbuf != NULL) { 6058 SLIST_REMOVE_HEAD(&sc->rxq.freelist, next); 6059 sc->rxq.nb_free_entries --; 6060 } 6061 mutex_exit(&sc->rxq.freelist_mtx); 6062 return rbuf; 6063 } 6064 6065 /* 6066 * This is called automatically by the network stack when the mbuf to which 6067 * our RX buffer is attached is freed. 6068 */ 6069 static void 6070 iwn_free_rbuf(struct mbuf* m, void *buf, size_t size, void *arg) 6071 { 6072 struct iwn_rbuf *rbuf = arg; 6073 struct iwn_softc *sc = rbuf->sc; 6074 6075 /* Put the RX buffer back in the free list. */ 6076 mutex_enter(&sc->rxq.freelist_mtx); 6077 SLIST_INSERT_HEAD(&sc->rxq.freelist, rbuf, next); 6078 mutex_exit(&sc->rxq.freelist_mtx); 6079 6080 sc->rxq.nb_free_entries ++; 6081 if (__predict_true(m != NULL)) 6082 pool_cache_put(mb_cache, m); 6083 } 6084 6085 static int 6086 iwn_alloc_rpool(struct iwn_softc *sc) 6087 { 6088 struct iwn_rx_ring *ring = &sc->rxq; 6089 struct iwn_rbuf *rbuf; 6090 int i, error; 6091 6092 mutex_init(&ring->freelist_mtx, MUTEX_DEFAULT, IPL_NET); 6093 6094 /* Allocate a big chunk of DMA'able memory... */ 6095 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->buf_dma, NULL, 6096 IWN_RBUF_COUNT * IWN_RBUF_SIZE, PAGE_SIZE); 6097 if (error != 0) { 6098 aprint_error_dev(sc->sc_dev, 6099 "could not allocate RX buffers DMA memory\n"); 6100 return error; 6101 } 6102 /* ...and split it into chunks of IWN_RBUF_SIZE bytes. */ 6103 SLIST_INIT(&ring->freelist); 6104 for (i = 0; i < IWN_RBUF_COUNT; i++) { 6105 rbuf = &ring->rbuf[i]; 6106 6107 rbuf->sc = sc; /* Backpointer for callbacks. */ 6108 rbuf->vaddr = (void *)((vaddr_t)ring->buf_dma.vaddr + i * IWN_RBUF_SIZE); 6109 rbuf->paddr = ring->buf_dma.paddr + i * IWN_RBUF_SIZE; 6110 6111 SLIST_INSERT_HEAD(&ring->freelist, rbuf, next); 6112 } 6113 ring->nb_free_entries = IWN_RBUF_COUNT; 6114 return 0; 6115 } 6116 6117 static void 6118 iwn_free_rpool(struct iwn_softc *sc) 6119 { 6120 iwn_dma_contig_free(&sc->rxq.buf_dma); 6121 } 6122 #endif 6123 6124 /* 6125 * XXX code from OpenBSD src/sys/net80211/ieee80211_output.c 6126 * Copyright (c) 2001 Atsushi Onoe 6127 * Copyright (c) 2002, 2003 Sam Leffler, Errno Consulting 6128 * Copyright (c) 2007-2009 Damien Bergamini 6129 * All rights reserved. 6130 */ 6131 6132 /* 6133 * Add an SSID element to a frame (see 7.3.2.1). 6134 */ 6135 static u_int8_t * 6136 ieee80211_add_ssid(u_int8_t *frm, const u_int8_t *ssid, u_int len) 6137 { 6138 *frm++ = IEEE80211_ELEMID_SSID; 6139 *frm++ = len; 6140 memcpy(frm, ssid, len); 6141 return frm + len; 6142 } 6143 6144 /* 6145 * Add a supported rates element to a frame (see 7.3.2.2). 6146 */ 6147 static u_int8_t * 6148 ieee80211_add_rates(u_int8_t *frm, const struct ieee80211_rateset *rs) 6149 { 6150 int nrates; 6151 6152 *frm++ = IEEE80211_ELEMID_RATES; 6153 nrates = min(rs->rs_nrates, IEEE80211_RATE_SIZE); 6154 *frm++ = nrates; 6155 memcpy(frm, rs->rs_rates, nrates); 6156 return frm + nrates; 6157 } 6158 6159 /* 6160 * Add an extended supported rates element to a frame (see 7.3.2.14). 6161 */ 6162 static u_int8_t * 6163 ieee80211_add_xrates(u_int8_t *frm, const struct ieee80211_rateset *rs) 6164 { 6165 int nrates; 6166 6167 KASSERT(rs->rs_nrates > IEEE80211_RATE_SIZE); 6168 6169 *frm++ = IEEE80211_ELEMID_XRATES; 6170 nrates = rs->rs_nrates - IEEE80211_RATE_SIZE; 6171 *frm++ = nrates; 6172 memcpy(frm, rs->rs_rates + IEEE80211_RATE_SIZE, nrates); 6173 return frm + nrates; 6174 } 6175 6176 /* 6177 * XXX: Hack to set the current channel to the value advertised in beacons or 6178 * probe responses. Only used during AP detection. 6179 * XXX: Duplicated from if_iwi.c 6180 */ 6181 static void 6182 iwn_fix_channel(struct ieee80211com *ic, struct mbuf *m) 6183 { 6184 struct ieee80211_frame *wh; 6185 uint8_t subtype; 6186 uint8_t *frm, *efrm; 6187 6188 wh = mtod(m, struct ieee80211_frame *); 6189 6190 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT) 6191 return; 6192 6193 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 6194 6195 if (subtype != IEEE80211_FC0_SUBTYPE_BEACON && 6196 subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP) 6197 return; 6198 6199 frm = (uint8_t *)(wh + 1); 6200 efrm = mtod(m, uint8_t *) + m->m_len; 6201 6202 frm += 12; /* skip tstamp, bintval and capinfo fields */ 6203 while (frm < efrm) { 6204 if (*frm == IEEE80211_ELEMID_DSPARMS) 6205 #if IEEE80211_CHAN_MAX < 255 6206 if (frm[2] <= IEEE80211_CHAN_MAX) 6207 #endif 6208 ic->ic_curchan = &ic->ic_channels[frm[2]]; 6209 6210 frm += frm[1] + 2; 6211 } 6212 } 6213 6214 #ifdef notyetMODULE 6215 6216 MODULE(MODULE_CLASS_DRIVER, if_iwn, "pci"); 6217 6218 #ifdef _MODULE 6219 #include "ioconf.c" 6220 #endif 6221 6222 static int 6223 if_iwn_modcmd(modcmd_t cmd, void *data) 6224 { 6225 int error = 0; 6226 6227 switch (cmd) { 6228 case MODULE_CMD_INIT: 6229 #ifdef _MODULE 6230 error = config_init_component(cfdriver_ioconf_if_iwn, 6231 cfattach_ioconf_if_iwn, cfdata_ioconf_if_iwn); 6232 #endif 6233 return error; 6234 case MODULE_CMD_FINI: 6235 #ifdef _MODULE 6236 error = config_fini_component(cfdriver_ioconf_if_iwn, 6237 cfattach_ioconf_if_iwn, cfdata_ioconf_if_iwn); 6238 #endif 6239 return error; 6240 case MODULE_CMD_AUTOUNLOAD: 6241 #ifdef _MODULE 6242 /* XXX This is not optional! */ 6243 #endif 6244 return error; 6245 default: 6246 return ENOTTY; 6247 } 6248 } 6249 #endif 6250