1 /* $NetBSD: if_iwn.c,v 1.94 2020/03/20 16:35:41 sevan Exp $ */ 2 /* $OpenBSD: if_iwn.c,v 1.135 2014/09/10 07:22:09 dcoppa Exp $ */ 3 4 /*- 5 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /* 21 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network 22 * adapters. 23 */ 24 #include <sys/cdefs.h> 25 __KERNEL_RCSID(0, "$NetBSD: if_iwn.c,v 1.94 2020/03/20 16:35:41 sevan Exp $"); 26 27 #define IWN_USE_RBUF /* Use local storage for RX */ 28 #undef IWN_HWCRYPTO /* XXX does not even compile yet */ 29 30 #include <sys/param.h> 31 #include <sys/sockio.h> 32 #include <sys/proc.h> 33 #include <sys/mbuf.h> 34 #include <sys/kernel.h> 35 #include <sys/socket.h> 36 #include <sys/systm.h> 37 #include <sys/malloc.h> 38 #ifdef notyetMODULE 39 #include <sys/module.h> 40 #endif 41 #include <sys/mutex.h> 42 #include <sys/conf.h> 43 #include <sys/kauth.h> 44 #include <sys/callout.h> 45 46 #include <dev/sysmon/sysmonvar.h> 47 48 #include <sys/bus.h> 49 #include <machine/endian.h> 50 #include <sys/intr.h> 51 52 #include <dev/pci/pcireg.h> 53 #include <dev/pci/pcivar.h> 54 #include <dev/pci/pcidevs.h> 55 56 #include <net/bpf.h> 57 #include <net/if.h> 58 #include <net/if_arp.h> 59 #include <net/if_dl.h> 60 #include <net/if_media.h> 61 #include <net/if_types.h> 62 63 #include <netinet/in.h> 64 #include <netinet/in_systm.h> 65 #include <netinet/in_var.h> 66 #include <net/if_ether.h> 67 #include <netinet/ip.h> 68 69 #include <net80211/ieee80211_var.h> 70 #include <net80211/ieee80211_amrr.h> 71 #include <net80211/ieee80211_radiotap.h> 72 73 #include <dev/firmload.h> 74 75 #include <dev/pci/if_iwnreg.h> 76 #include <dev/pci/if_iwnvar.h> 77 78 static const pci_product_id_t iwn_devices[] = { 79 PCI_PRODUCT_INTEL_WIFI_LINK_1030_1, 80 PCI_PRODUCT_INTEL_WIFI_LINK_1030_2, 81 PCI_PRODUCT_INTEL_WIFI_LINK_4965_1, 82 PCI_PRODUCT_INTEL_WIFI_LINK_4965_2, 83 PCI_PRODUCT_INTEL_WIFI_LINK_4965_3, 84 PCI_PRODUCT_INTEL_WIFI_LINK_4965_4, 85 PCI_PRODUCT_INTEL_WIFI_LINK_5100_1, 86 PCI_PRODUCT_INTEL_WIFI_LINK_5100_2, 87 PCI_PRODUCT_INTEL_WIFI_LINK_5150_1, 88 PCI_PRODUCT_INTEL_WIFI_LINK_5150_2, 89 PCI_PRODUCT_INTEL_WIFI_LINK_5300_1, 90 PCI_PRODUCT_INTEL_WIFI_LINK_5300_2, 91 PCI_PRODUCT_INTEL_WIFI_LINK_5350_1, 92 PCI_PRODUCT_INTEL_WIFI_LINK_5350_2, 93 PCI_PRODUCT_INTEL_WIFI_LINK_1000_1, 94 PCI_PRODUCT_INTEL_WIFI_LINK_1000_2, 95 PCI_PRODUCT_INTEL_WIFI_LINK_6000_3X3_1, 96 PCI_PRODUCT_INTEL_WIFI_LINK_6000_3X3_2, 97 PCI_PRODUCT_INTEL_WIFI_LINK_6000_IPA_1, 98 PCI_PRODUCT_INTEL_WIFI_LINK_6000_IPA_2, 99 PCI_PRODUCT_INTEL_WIFI_LINK_6050_2X2_1, 100 PCI_PRODUCT_INTEL_WIFI_LINK_6050_2X2_2, 101 PCI_PRODUCT_INTEL_WIFI_LINK_6005_2X2_1, 102 PCI_PRODUCT_INTEL_WIFI_LINK_6005_2X2_2, 103 PCI_PRODUCT_INTEL_WIFI_LINK_6230_1, 104 PCI_PRODUCT_INTEL_WIFI_LINK_6230_2, 105 PCI_PRODUCT_INTEL_WIFI_LINK_6235, 106 PCI_PRODUCT_INTEL_WIFI_LINK_6235_2, 107 PCI_PRODUCT_INTEL_WIFI_LINK_100_1, 108 PCI_PRODUCT_INTEL_WIFI_LINK_100_2, 109 PCI_PRODUCT_INTEL_WIFI_LINK_130_1, 110 PCI_PRODUCT_INTEL_WIFI_LINK_130_2, 111 PCI_PRODUCT_INTEL_WIFI_LINK_2230_1, 112 PCI_PRODUCT_INTEL_WIFI_LINK_2230_2, 113 PCI_PRODUCT_INTEL_WIFI_LINK_2200_1, 114 PCI_PRODUCT_INTEL_WIFI_LINK_2200_2, 115 PCI_PRODUCT_INTEL_WIFI_LINK_135_1, 116 PCI_PRODUCT_INTEL_WIFI_LINK_135_2, 117 PCI_PRODUCT_INTEL_WIFI_LINK_105_1, 118 PCI_PRODUCT_INTEL_WIFI_LINK_105_2, 119 }; 120 121 static int iwn_match(device_t , struct cfdata *, void *); 122 static void iwn_attach(device_t , device_t , void *); 123 static int iwn4965_attach(struct iwn_softc *, pci_product_id_t); 124 static int iwn5000_attach(struct iwn_softc *, pci_product_id_t); 125 static void iwn_radiotap_attach(struct iwn_softc *); 126 static int iwn_detach(device_t , int); 127 #if 0 128 static void iwn_power(int, void *); 129 #endif 130 static bool iwn_resume(device_t, const pmf_qual_t *); 131 static int iwn_nic_lock(struct iwn_softc *); 132 static int iwn_eeprom_lock(struct iwn_softc *); 133 static int iwn_init_otprom(struct iwn_softc *); 134 static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int); 135 static int iwn_dma_contig_alloc(bus_dma_tag_t, struct iwn_dma_info *, 136 void **, bus_size_t, bus_size_t); 137 static void iwn_dma_contig_free(struct iwn_dma_info *); 138 static int iwn_alloc_sched(struct iwn_softc *); 139 static void iwn_free_sched(struct iwn_softc *); 140 static int iwn_alloc_kw(struct iwn_softc *); 141 static void iwn_free_kw(struct iwn_softc *); 142 static int iwn_alloc_ict(struct iwn_softc *); 143 static void iwn_free_ict(struct iwn_softc *); 144 static int iwn_alloc_fwmem(struct iwn_softc *); 145 static void iwn_free_fwmem(struct iwn_softc *); 146 static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 147 static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 148 static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 149 static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *, 150 int); 151 static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 152 static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 153 static void iwn5000_ict_reset(struct iwn_softc *); 154 static int iwn_read_eeprom(struct iwn_softc *); 155 static void iwn4965_read_eeprom(struct iwn_softc *); 156 157 #ifdef IWN_DEBUG 158 static void iwn4965_print_power_group(struct iwn_softc *, int); 159 #endif 160 static void iwn5000_read_eeprom(struct iwn_softc *); 161 static void iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t); 162 static void iwn_read_eeprom_enhinfo(struct iwn_softc *); 163 static struct ieee80211_node *iwn_node_alloc(struct ieee80211_node_table *); 164 static void iwn_newassoc(struct ieee80211_node *, int); 165 static int iwn_media_change(struct ifnet *); 166 static int iwn_newstate(struct ieee80211com *, enum ieee80211_state, int); 167 static void iwn_iter_func(void *, struct ieee80211_node *); 168 static void iwn_calib_timeout(void *); 169 static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *, 170 struct iwn_rx_data *); 171 static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *, 172 struct iwn_rx_data *); 173 #ifndef IEEE80211_NO_HT 174 static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *, 175 struct iwn_rx_data *); 176 #endif 177 static void iwn5000_rx_calib_results(struct iwn_softc *, 178 struct iwn_rx_desc *, struct iwn_rx_data *); 179 static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *, 180 struct iwn_rx_data *); 181 static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 182 struct iwn_rx_data *); 183 static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 184 struct iwn_rx_data *); 185 static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int, 186 uint8_t); 187 static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *); 188 static void iwn_notif_intr(struct iwn_softc *); 189 static void iwn_wakeup_intr(struct iwn_softc *); 190 static void iwn_fatal_intr(struct iwn_softc *); 191 static int iwn_intr(void *); 192 static void iwn_softintr(void *); 193 static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t, 194 uint16_t); 195 static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t, 196 uint16_t); 197 #ifdef notyet 198 static void iwn5000_reset_sched(struct iwn_softc *, int, int); 199 #endif 200 static int iwn_tx(struct iwn_softc *, struct mbuf *, 201 struct ieee80211_node *, int); 202 static void iwn_start(struct ifnet *); 203 static void iwn_watchdog(struct ifnet *); 204 static int iwn_ioctl(struct ifnet *, u_long, void *); 205 static int iwn_cmd(struct iwn_softc *, int, const void *, int, int); 206 static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *, 207 int); 208 static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *, 209 int); 210 static int iwn_set_link_quality(struct iwn_softc *, 211 struct ieee80211_node *); 212 static int iwn_add_broadcast_node(struct iwn_softc *, int); 213 static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t); 214 static int iwn_set_critical_temp(struct iwn_softc *); 215 static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *); 216 static void iwn4965_power_calibration(struct iwn_softc *, int); 217 static int iwn4965_set_txpower(struct iwn_softc *, int); 218 static int iwn5000_set_txpower(struct iwn_softc *, int); 219 static int iwn4965_get_rssi(const struct iwn_rx_stat *); 220 static int iwn5000_get_rssi(const struct iwn_rx_stat *); 221 static int iwn_get_noise(const struct iwn_rx_general_stats *); 222 static int iwn4965_get_temperature(struct iwn_softc *); 223 static int iwn5000_get_temperature(struct iwn_softc *); 224 static int iwn_init_sensitivity(struct iwn_softc *); 225 static void iwn_collect_noise(struct iwn_softc *, 226 const struct iwn_rx_general_stats *); 227 static int iwn4965_init_gains(struct iwn_softc *); 228 static int iwn5000_init_gains(struct iwn_softc *); 229 static int iwn4965_set_gains(struct iwn_softc *); 230 static int iwn5000_set_gains(struct iwn_softc *); 231 static void iwn_tune_sensitivity(struct iwn_softc *, 232 const struct iwn_rx_stats *); 233 static int iwn_send_sensitivity(struct iwn_softc *); 234 static int iwn_set_pslevel(struct iwn_softc *, int, int, int); 235 static int iwn5000_runtime_calib(struct iwn_softc *); 236 237 static int iwn_config_bt_coex_bluetooth(struct iwn_softc *); 238 static int iwn_config_bt_coex_prio_table(struct iwn_softc *); 239 static int iwn_config_bt_coex_adv1(struct iwn_softc *); 240 static int iwn_config_bt_coex_adv2(struct iwn_softc *); 241 242 static int iwn_config(struct iwn_softc *); 243 static uint16_t iwn_get_active_dwell_time(struct iwn_softc *, uint16_t, 244 uint8_t); 245 static uint16_t iwn_limit_dwell(struct iwn_softc *, uint16_t); 246 static uint16_t iwn_get_passive_dwell_time(struct iwn_softc *, uint16_t); 247 static int iwn_scan(struct iwn_softc *, uint16_t); 248 static int iwn_auth(struct iwn_softc *); 249 static int iwn_run(struct iwn_softc *); 250 #ifdef IWN_HWCRYPTO 251 static int iwn_set_key(struct ieee80211com *, struct ieee80211_node *, 252 struct ieee80211_key *); 253 static void iwn_delete_key(struct ieee80211com *, struct ieee80211_node *, 254 struct ieee80211_key *); 255 #endif 256 static int iwn_wme_update(struct ieee80211com *); 257 #ifndef IEEE80211_NO_HT 258 static int iwn_ampdu_rx_start(struct ieee80211com *, 259 struct ieee80211_node *, uint8_t); 260 static void iwn_ampdu_rx_stop(struct ieee80211com *, 261 struct ieee80211_node *, uint8_t); 262 static int iwn_ampdu_tx_start(struct ieee80211com *, 263 struct ieee80211_node *, uint8_t); 264 static void iwn_ampdu_tx_stop(struct ieee80211com *, 265 struct ieee80211_node *, uint8_t); 266 static void iwn4965_ampdu_tx_start(struct iwn_softc *, 267 struct ieee80211_node *, uint8_t, uint16_t); 268 static void iwn4965_ampdu_tx_stop(struct iwn_softc *, 269 uint8_t, uint16_t); 270 static void iwn5000_ampdu_tx_start(struct iwn_softc *, 271 struct ieee80211_node *, uint8_t, uint16_t); 272 static void iwn5000_ampdu_tx_stop(struct iwn_softc *, 273 uint8_t, uint16_t); 274 #endif 275 static int iwn5000_query_calibration(struct iwn_softc *); 276 static int iwn5000_send_calibration(struct iwn_softc *); 277 static int iwn5000_send_wimax_coex(struct iwn_softc *); 278 static int iwn6000_temp_offset_calib(struct iwn_softc *); 279 static int iwn2000_temp_offset_calib(struct iwn_softc *); 280 static int iwn4965_post_alive(struct iwn_softc *); 281 static int iwn5000_post_alive(struct iwn_softc *); 282 static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *, 283 int); 284 static int iwn4965_load_firmware(struct iwn_softc *); 285 static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t, 286 const uint8_t *, int); 287 static int iwn5000_load_firmware(struct iwn_softc *); 288 static int iwn_read_firmware_leg(struct iwn_softc *, 289 struct iwn_fw_info *); 290 static int iwn_read_firmware_tlv(struct iwn_softc *, 291 struct iwn_fw_info *, uint16_t); 292 static int iwn_read_firmware(struct iwn_softc *); 293 static int iwn_clock_wait(struct iwn_softc *); 294 static int iwn_apm_init(struct iwn_softc *); 295 static void iwn_apm_stop_master(struct iwn_softc *); 296 static void iwn_apm_stop(struct iwn_softc *); 297 static int iwn4965_nic_config(struct iwn_softc *); 298 static int iwn5000_nic_config(struct iwn_softc *); 299 static int iwn_hw_prepare(struct iwn_softc *); 300 static int iwn_hw_init(struct iwn_softc *); 301 static void iwn_hw_stop(struct iwn_softc *); 302 static int iwn_init(struct ifnet *); 303 static void iwn_stop(struct ifnet *, int); 304 305 /* XXX MCLGETI alternative */ 306 static struct mbuf *MCLGETIalt(struct iwn_softc *, int, 307 struct ifnet *, u_int); 308 #ifdef IWN_USE_RBUF 309 static struct iwn_rbuf *iwn_alloc_rbuf(struct iwn_softc *); 310 static void iwn_free_rbuf(struct mbuf *, void *, size_t, void *); 311 static int iwn_alloc_rpool(struct iwn_softc *); 312 static void iwn_free_rpool(struct iwn_softc *); 313 #endif 314 315 static void iwn_fix_channel(struct ieee80211com *, struct mbuf *, 316 struct iwn_rx_stat *); 317 318 #ifdef IWN_DEBUG 319 #define DPRINTF(x) do { if (iwn_debug > 0) printf x; } while (0) 320 #define DPRINTFN(n, x) do { if (iwn_debug >= (n)) printf x; } while (0) 321 int iwn_debug = 0; 322 #else 323 #define DPRINTF(x) 324 #define DPRINTFN(n, x) 325 #endif 326 327 CFATTACH_DECL_NEW(iwn, sizeof(struct iwn_softc), iwn_match, iwn_attach, 328 iwn_detach, NULL); 329 330 static int 331 iwn_match(device_t parent, cfdata_t match __unused, void *aux) 332 { 333 struct pci_attach_args *pa = aux; 334 size_t i; 335 336 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL) 337 return 0; 338 339 for (i = 0; i < __arraycount(iwn_devices); i++) 340 if (PCI_PRODUCT(pa->pa_id) == iwn_devices[i]) 341 return 1; 342 343 return 0; 344 } 345 346 static void 347 iwn_attach(device_t parent __unused, device_t self, void *aux) 348 { 349 struct iwn_softc *sc = device_private(self); 350 struct ieee80211com *ic = &sc->sc_ic; 351 struct ifnet *ifp = &sc->sc_ec.ec_if; 352 struct pci_attach_args *pa = aux; 353 const char *intrstr; 354 pcireg_t memtype, reg; 355 int i, error; 356 char intrbuf[PCI_INTRSTR_LEN]; 357 358 sc->sc_dev = self; 359 sc->sc_pct = pa->pa_pc; 360 sc->sc_pcitag = pa->pa_tag; 361 sc->sc_dmat = pa->pa_dmat; 362 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE); 363 364 callout_init(&sc->calib_to, 0); 365 callout_setfunc(&sc->calib_to, iwn_calib_timeout, sc); 366 367 pci_aprint_devinfo(pa, NULL); 368 369 /* 370 * Get the offset of the PCI Express Capability Structure in PCI 371 * Configuration Space. 372 */ 373 error = pci_get_capability(sc->sc_pct, sc->sc_pcitag, 374 PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL); 375 if (error == 0) { 376 aprint_error_dev(self, 377 "PCIe capability structure not found!\n"); 378 return; 379 } 380 381 /* Clear device-specific "PCI retry timeout" register (41h). */ 382 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40); 383 if (reg & 0xff00) 384 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00); 385 386 /* Enable bus-mastering. */ 387 /* XXX verify the bus-mastering is really needed (not in OpenBSD) */ 388 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 389 reg |= PCI_COMMAND_MASTER_ENABLE; 390 pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg); 391 392 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IWN_PCI_BAR0); 393 error = pci_mapreg_map(pa, IWN_PCI_BAR0, memtype, 0, &sc->sc_st, 394 &sc->sc_sh, NULL, &sc->sc_sz); 395 if (error != 0) { 396 aprint_error_dev(self, "can't map mem space\n"); 397 return; 398 } 399 400 sc->sc_soft_ih = softint_establish(SOFTINT_NET, iwn_softintr, sc); 401 if (sc->sc_soft_ih == NULL) { 402 aprint_error_dev(self, "can't establish soft interrupt\n"); 403 goto unmap; 404 } 405 406 /* Install interrupt handler. */ 407 error = pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0); 408 if (error) { 409 aprint_error_dev(self, "can't allocate interrupt\n"); 410 goto failsi; 411 } 412 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 413 if (pci_intr_type(sc->sc_pct, sc->sc_pihp[0]) == PCI_INTR_TYPE_INTX) 414 CLR(reg, PCI_COMMAND_INTERRUPT_DISABLE); 415 else 416 SET(reg, PCI_COMMAND_INTERRUPT_DISABLE); 417 pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg); 418 intrstr = pci_intr_string(sc->sc_pct, sc->sc_pihp[0], intrbuf, 419 sizeof(intrbuf)); 420 sc->sc_ih = pci_intr_establish_xname(sc->sc_pct, sc->sc_pihp[0], 421 IPL_NET, iwn_intr, sc, device_xname(self)); 422 if (sc->sc_ih == NULL) { 423 aprint_error_dev(self, "can't establish interrupt"); 424 if (intrstr != NULL) 425 aprint_error(" at %s", intrstr); 426 aprint_error("\n"); 427 goto failia; 428 } 429 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 430 431 /* Read hardware revision and attach. */ 432 sc->hw_type = 433 (IWN_READ(sc, IWN_HW_REV) & IWN_HW_REV_TYPE_MASK) 434 >> IWN_HW_REV_TYPE_SHIFT; 435 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 436 error = iwn4965_attach(sc, PCI_PRODUCT(pa->pa_id)); 437 else 438 error = iwn5000_attach(sc, PCI_PRODUCT(pa->pa_id)); 439 if (error != 0) { 440 aprint_error_dev(self, "could not attach device\n"); 441 goto failih; 442 } 443 444 if ((error = iwn_hw_prepare(sc)) != 0) { 445 aprint_error_dev(self, "hardware not ready\n"); 446 goto failih; 447 } 448 449 /* Read MAC address, channels, etc from EEPROM. */ 450 if ((error = iwn_read_eeprom(sc)) != 0) { 451 aprint_error_dev(self, "could not read EEPROM\n"); 452 goto failih; 453 } 454 455 /* Allocate DMA memory for firmware transfers. */ 456 if ((error = iwn_alloc_fwmem(sc)) != 0) { 457 aprint_error_dev(self, 458 "could not allocate memory for firmware\n"); 459 goto failih; 460 } 461 462 /* Allocate "Keep Warm" page. */ 463 if ((error = iwn_alloc_kw(sc)) != 0) { 464 aprint_error_dev(self, "could not allocate keep warm page\n"); 465 goto fail1; 466 } 467 468 /* Allocate ICT table for 5000 Series. */ 469 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 470 (error = iwn_alloc_ict(sc)) != 0) { 471 aprint_error_dev(self, "could not allocate ICT table\n"); 472 goto fail2; 473 } 474 475 /* Allocate TX scheduler "rings". */ 476 if ((error = iwn_alloc_sched(sc)) != 0) { 477 aprint_error_dev(self, 478 "could not allocate TX scheduler rings\n"); 479 goto fail3; 480 } 481 482 #ifdef IWN_USE_RBUF 483 /* Allocate RX buffers. */ 484 if ((error = iwn_alloc_rpool(sc)) != 0) { 485 aprint_error_dev(self, "could not allocate RX buffers\n"); 486 goto fail3; 487 } 488 #endif 489 490 /* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */ 491 for (i = 0; i < sc->ntxqs; i++) { 492 if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 493 aprint_error_dev(self, 494 "could not allocate TX ring %d\n", i); 495 goto fail4; 496 } 497 } 498 499 /* Allocate RX ring. */ 500 if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) { 501 aprint_error_dev(self, "could not allocate RX ring\n"); 502 goto fail4; 503 } 504 505 /* Clear pending interrupts. */ 506 IWN_WRITE(sc, IWN_INT, 0xffffffff); 507 508 /* Count the number of available chains. */ 509 sc->ntxchains = 510 ((sc->txchainmask >> 2) & 1) + 511 ((sc->txchainmask >> 1) & 1) + 512 ((sc->txchainmask >> 0) & 1); 513 sc->nrxchains = 514 ((sc->rxchainmask >> 2) & 1) + 515 ((sc->rxchainmask >> 1) & 1) + 516 ((sc->rxchainmask >> 0) & 1); 517 aprint_normal_dev(self, "MIMO %dT%dR, %.4s, address %s\n", 518 sc->ntxchains, sc->nrxchains, sc->eeprom_domain, 519 ether_sprintf(ic->ic_myaddr)); 520 521 ic->ic_ifp = ifp; 522 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 523 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 524 ic->ic_state = IEEE80211_S_INIT; 525 526 /* 527 * Set device capabilities. 528 * XXX OpenBSD has IEEE80211_C_WEP, IEEE80211_C_RSN, and 529 * IEEE80211_C_PMGT too. 530 */ 531 ic->ic_caps = 532 IEEE80211_C_IBSS | /* IBSS mode support */ 533 IEEE80211_C_WPA | /* 802.11i */ 534 IEEE80211_C_MONITOR | /* monitor mode supported */ 535 IEEE80211_C_TXPMGT | /* tx power management */ 536 IEEE80211_C_SHSLOT | /* short slot time supported */ 537 IEEE80211_C_SHPREAMBLE | /* short preamble supported */ 538 IEEE80211_C_WME; /* 802.11e */ 539 540 #ifndef IEEE80211_NO_HT 541 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 542 /* Set HT capabilities. */ 543 ic->ic_htcaps = 544 #if IWN_RBUF_SIZE == 8192 545 IEEE80211_HTCAP_AMSDU7935 | 546 #endif 547 IEEE80211_HTCAP_CBW20_40 | 548 IEEE80211_HTCAP_SGI20 | 549 IEEE80211_HTCAP_SGI40; 550 if (sc->hw_type != IWN_HW_REV_TYPE_4965) 551 ic->ic_htcaps |= IEEE80211_HTCAP_GF; 552 if (sc->hw_type == IWN_HW_REV_TYPE_6050) 553 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DYN; 554 else 555 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DIS; 556 } 557 #endif /* !IEEE80211_NO_HT */ 558 559 /* Set supported legacy rates. */ 560 ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b; 561 ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g; 562 if (sc->sc_flags & IWN_FLAG_HAS_5GHZ) { 563 ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a; 564 } 565 #ifndef IEEE80211_NO_HT 566 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 567 /* Set supported HT rates. */ 568 ic->ic_sup_mcs[0] = 0xff; /* MCS 0-7 */ 569 if (sc->nrxchains > 1) 570 ic->ic_sup_mcs[1] = 0xff; /* MCS 7-15 */ 571 if (sc->nrxchains > 2) 572 ic->ic_sup_mcs[2] = 0xff; /* MCS 16-23 */ 573 } 574 #endif 575 576 /* IBSS channel undefined for now. */ 577 ic->ic_ibss_chan = &ic->ic_channels[0]; 578 579 ifp->if_softc = sc; 580 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 581 ifp->if_init = iwn_init; 582 ifp->if_ioctl = iwn_ioctl; 583 ifp->if_start = iwn_start; 584 ifp->if_stop = iwn_stop; 585 ifp->if_watchdog = iwn_watchdog; 586 IFQ_SET_READY(&ifp->if_snd); 587 memcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 588 589 error = if_initialize(ifp); 590 if (error != 0) { 591 aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n", 592 error); 593 goto fail5; 594 } 595 ieee80211_ifattach(ic); 596 /* Use common softint-based if_input */ 597 ifp->if_percpuq = if_percpuq_create(ifp); 598 if_register(ifp); 599 600 ic->ic_node_alloc = iwn_node_alloc; 601 ic->ic_newassoc = iwn_newassoc; 602 #ifdef IWN_HWCRYPTO 603 ic->ic_crypto.cs_key_set = iwn_set_key; 604 ic->ic_crypto.cs_key_delete = iwn_delete_key; 605 #endif 606 ic->ic_wme.wme_update = iwn_wme_update; 607 #ifndef IEEE80211_NO_HT 608 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start; 609 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop; 610 ic->ic_ampdu_tx_start = iwn_ampdu_tx_start; 611 ic->ic_ampdu_tx_stop = iwn_ampdu_tx_stop; 612 #endif 613 614 /* Override 802.11 state transition machine. */ 615 sc->sc_newstate = ic->ic_newstate; 616 ic->ic_newstate = iwn_newstate; 617 618 /* XXX media locking needs revisiting */ 619 mutex_init(&sc->sc_media_mtx, MUTEX_DEFAULT, IPL_SOFTNET); 620 ieee80211_media_init_with_lock(ic, 621 iwn_media_change, ieee80211_media_status, &sc->sc_media_mtx); 622 623 sc->amrr.amrr_min_success_threshold = 1; 624 sc->amrr.amrr_max_success_threshold = 15; 625 626 iwn_radiotap_attach(sc); 627 628 /* 629 * XXX for NetBSD, OpenBSD timeout_set replaced by 630 * callout_init and callout_setfunc, above. 631 */ 632 633 if (pmf_device_register(self, NULL, iwn_resume)) 634 pmf_class_network_register(self, ifp); 635 else 636 aprint_error_dev(self, "couldn't establish power handler\n"); 637 638 /* XXX NetBSD add call to ieee80211_announce for dmesg. */ 639 ieee80211_announce(ic); 640 641 sc->sc_flags |= IWN_FLAG_ATTACHED; 642 return; 643 644 /* Free allocated memory if something failed during attachment. */ 645 fail5: iwn_free_rx_ring(sc, &sc->rxq); 646 fail4: while (--i >= 0) 647 iwn_free_tx_ring(sc, &sc->txq[i]); 648 #ifdef IWN_USE_RBUF 649 iwn_free_rpool(sc); 650 #endif 651 iwn_free_sched(sc); 652 fail3: if (sc->ict != NULL) 653 iwn_free_ict(sc); 654 fail2: iwn_free_kw(sc); 655 fail1: iwn_free_fwmem(sc); 656 failih: pci_intr_disestablish(sc->sc_pct, sc->sc_ih); 657 sc->sc_ih = NULL; 658 failia: pci_intr_release(sc->sc_pct, sc->sc_pihp, 1); 659 sc->sc_pihp = NULL; 660 failsi: softint_disestablish(sc->sc_soft_ih); 661 sc->sc_soft_ih = NULL; 662 unmap: bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz); 663 } 664 665 int 666 iwn4965_attach(struct iwn_softc *sc, pci_product_id_t pid) 667 { 668 struct iwn_ops *ops = &sc->ops; 669 670 ops->load_firmware = iwn4965_load_firmware; 671 ops->read_eeprom = iwn4965_read_eeprom; 672 ops->post_alive = iwn4965_post_alive; 673 ops->nic_config = iwn4965_nic_config; 674 ops->config_bt_coex = iwn_config_bt_coex_bluetooth; 675 ops->update_sched = iwn4965_update_sched; 676 ops->get_temperature = iwn4965_get_temperature; 677 ops->get_rssi = iwn4965_get_rssi; 678 ops->set_txpower = iwn4965_set_txpower; 679 ops->init_gains = iwn4965_init_gains; 680 ops->set_gains = iwn4965_set_gains; 681 ops->add_node = iwn4965_add_node; 682 ops->tx_done = iwn4965_tx_done; 683 #ifndef IEEE80211_NO_HT 684 ops->ampdu_tx_start = iwn4965_ampdu_tx_start; 685 ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop; 686 #endif 687 sc->ntxqs = IWN4965_NTXQUEUES; 688 sc->ndmachnls = IWN4965_NDMACHNLS; 689 sc->broadcast_id = IWN4965_ID_BROADCAST; 690 sc->rxonsz = IWN4965_RXONSZ; 691 sc->schedsz = IWN4965_SCHEDSZ; 692 sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ; 693 sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ; 694 sc->fwsz = IWN4965_FWSZ; 695 sc->sched_txfact_addr = IWN4965_SCHED_TXFACT; 696 sc->limits = &iwn4965_sensitivity_limits; 697 sc->fwname = "iwlwifi-4965-2.ucode"; 698 /* Override chains masks, ROM is known to be broken. */ 699 sc->txchainmask = IWN_ANT_AB; 700 sc->rxchainmask = IWN_ANT_ABC; 701 702 return 0; 703 } 704 705 int 706 iwn5000_attach(struct iwn_softc *sc, pci_product_id_t pid) 707 { 708 struct iwn_ops *ops = &sc->ops; 709 710 ops->load_firmware = iwn5000_load_firmware; 711 ops->read_eeprom = iwn5000_read_eeprom; 712 ops->post_alive = iwn5000_post_alive; 713 ops->nic_config = iwn5000_nic_config; 714 ops->config_bt_coex = iwn_config_bt_coex_bluetooth; 715 ops->update_sched = iwn5000_update_sched; 716 ops->get_temperature = iwn5000_get_temperature; 717 ops->get_rssi = iwn5000_get_rssi; 718 ops->set_txpower = iwn5000_set_txpower; 719 ops->init_gains = iwn5000_init_gains; 720 ops->set_gains = iwn5000_set_gains; 721 ops->add_node = iwn5000_add_node; 722 ops->tx_done = iwn5000_tx_done; 723 #ifndef IEEE80211_NO_HT 724 ops->ampdu_tx_start = iwn5000_ampdu_tx_start; 725 ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop; 726 #endif 727 sc->ntxqs = IWN5000_NTXQUEUES; 728 sc->ndmachnls = IWN5000_NDMACHNLS; 729 sc->broadcast_id = IWN5000_ID_BROADCAST; 730 sc->rxonsz = IWN5000_RXONSZ; 731 sc->schedsz = IWN5000_SCHEDSZ; 732 sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ; 733 sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ; 734 sc->fwsz = IWN5000_FWSZ; 735 sc->sched_txfact_addr = IWN5000_SCHED_TXFACT; 736 737 switch (sc->hw_type) { 738 case IWN_HW_REV_TYPE_5100: 739 sc->limits = &iwn5000_sensitivity_limits; 740 sc->fwname = "iwlwifi-5000-2.ucode"; 741 /* Override chains masks, ROM is known to be broken. */ 742 sc->txchainmask = IWN_ANT_B; 743 sc->rxchainmask = IWN_ANT_AB; 744 break; 745 case IWN_HW_REV_TYPE_5150: 746 sc->limits = &iwn5150_sensitivity_limits; 747 sc->fwname = "iwlwifi-5150-2.ucode"; 748 break; 749 case IWN_HW_REV_TYPE_5300: 750 case IWN_HW_REV_TYPE_5350: 751 sc->limits = &iwn5000_sensitivity_limits; 752 sc->fwname = "iwlwifi-5000-2.ucode"; 753 break; 754 case IWN_HW_REV_TYPE_1000: 755 sc->limits = &iwn1000_sensitivity_limits; 756 if (pid == PCI_PRODUCT_INTEL_WIFI_LINK_100_1 || 757 pid == PCI_PRODUCT_INTEL_WIFI_LINK_100_2) 758 sc->fwname = "iwlwifi-100-5.ucode"; 759 else 760 sc->fwname = "iwlwifi-1000-3.ucode"; 761 break; 762 case IWN_HW_REV_TYPE_6000: 763 sc->limits = &iwn6000_sensitivity_limits; 764 sc->fwname = "iwlwifi-6000-4.ucode"; 765 if (pid == PCI_PRODUCT_INTEL_WIFI_LINK_6000_IPA_1 || 766 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6000_IPA_2) { 767 sc->sc_flags |= IWN_FLAG_INTERNAL_PA; 768 /* Override chains masks, ROM is known to be broken. */ 769 sc->txchainmask = IWN_ANT_BC; 770 sc->rxchainmask = IWN_ANT_BC; 771 } 772 break; 773 case IWN_HW_REV_TYPE_6050: 774 sc->limits = &iwn6000_sensitivity_limits; 775 sc->fwname = "iwlwifi-6050-5.ucode"; 776 break; 777 case IWN_HW_REV_TYPE_6005: 778 sc->limits = &iwn6000_sensitivity_limits; 779 /* Type 6030 cards return IWN_HW_REV_TYPE_6005 */ 780 if (pid == PCI_PRODUCT_INTEL_WIFI_LINK_1030_1 || 781 pid == PCI_PRODUCT_INTEL_WIFI_LINK_1030_2 || 782 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6230_1 || 783 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6230_2 || 784 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6235 || 785 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6235_2) { 786 sc->fwname = "iwlwifi-6000g2b-6.ucode"; 787 ops->config_bt_coex = iwn_config_bt_coex_adv1; 788 } 789 /* 790 * This covers: 791 * PCI_PRODUCT_INTEL_WIFI_LINK_6005_2X2_1 792 * PCI_PRODUCT_INTEL_WIFI_LINK_6005_2X2_2 793 */ 794 else 795 sc->fwname = "iwlwifi-6000g2a-5.ucode"; 796 break; 797 case IWN_HW_REV_TYPE_2030: 798 sc->limits = &iwn2030_sensitivity_limits; 799 sc->fwname = "iwlwifi-2030-6.ucode"; 800 ops->config_bt_coex = iwn_config_bt_coex_adv2; 801 break; 802 case IWN_HW_REV_TYPE_2000: 803 sc->limits = &iwn2000_sensitivity_limits; 804 sc->fwname = "iwlwifi-2000-6.ucode"; 805 break; 806 case IWN_HW_REV_TYPE_135: 807 sc->limits = &iwn2000_sensitivity_limits; 808 sc->fwname = "iwlwifi-135-6.ucode"; 809 ops->config_bt_coex = iwn_config_bt_coex_adv2; 810 break; 811 case IWN_HW_REV_TYPE_105: 812 sc->limits = &iwn2000_sensitivity_limits; 813 sc->fwname = "iwlwifi-105-6.ucode"; 814 break; 815 default: 816 aprint_normal(": adapter type %d not supported\n", sc->hw_type); 817 return ENOTSUP; 818 } 819 return 0; 820 } 821 822 /* 823 * Attach the interface to 802.11 radiotap. 824 */ 825 static void 826 iwn_radiotap_attach(struct iwn_softc *sc) 827 { 828 struct ifnet *ifp = sc->sc_ic.ic_ifp; 829 830 bpf_attach2(ifp, DLT_IEEE802_11_RADIO, 831 sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN, 832 &sc->sc_drvbpf); 833 834 sc->sc_rxtap_len = sizeof sc->sc_rxtapu; 835 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); 836 sc->sc_rxtap.wr_ihdr.it_present = htole32(IWN_RX_RADIOTAP_PRESENT); 837 838 sc->sc_txtap_len = sizeof sc->sc_txtapu; 839 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); 840 sc->sc_txtap.wt_ihdr.it_present = htole32(IWN_TX_RADIOTAP_PRESENT); 841 } 842 843 static int 844 iwn_detach(device_t self, int flags __unused) 845 { 846 struct iwn_softc *sc = device_private(self); 847 struct ifnet *ifp = sc->sc_ic.ic_ifp; 848 int qid; 849 850 if (!(sc->sc_flags & IWN_FLAG_ATTACHED)) 851 return 0; 852 853 callout_stop(&sc->calib_to); 854 855 /* Uninstall interrupt handler. */ 856 if (sc->sc_ih != NULL) 857 pci_intr_disestablish(sc->sc_pct, sc->sc_ih); 858 if (sc->sc_pihp != NULL) 859 pci_intr_release(sc->sc_pct, sc->sc_pihp, 1); 860 if (sc->sc_soft_ih != NULL) 861 softint_disestablish(sc->sc_soft_ih); 862 863 /* Free DMA resources. */ 864 iwn_free_rx_ring(sc, &sc->rxq); 865 for (qid = 0; qid < sc->ntxqs; qid++) 866 iwn_free_tx_ring(sc, &sc->txq[qid]); 867 #ifdef IWN_USE_RBUF 868 iwn_free_rpool(sc); 869 #endif 870 iwn_free_sched(sc); 871 iwn_free_kw(sc); 872 if (sc->ict != NULL) 873 iwn_free_ict(sc); 874 iwn_free_fwmem(sc); 875 876 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz); 877 878 ieee80211_ifdetach(&sc->sc_ic); 879 if_detach(ifp); 880 881 return 0; 882 } 883 884 #if 0 885 /* 886 * XXX Investigate if clearing the PCI retry timeout could eliminate 887 * the repeated scan calls. Also the calls to if_init and if_start 888 * are similar to the effect of adding the call to ifioctl_common . 889 */ 890 static void 891 iwn_power(int why, void *arg) 892 { 893 struct iwn_softc *sc = arg; 894 struct ifnet *ifp; 895 pcireg_t reg; 896 int s; 897 898 if (why != PWR_RESUME) 899 return; 900 901 /* Clear device-specific "PCI retry timeout" register (41h). */ 902 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40); 903 if (reg & 0xff00) 904 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00); 905 906 s = splnet(); 907 ifp = &sc->sc_ic.ic_if; 908 if (ifp->if_flags & IFF_UP) { 909 ifp->if_init(ifp); 910 if (ifp->if_flags & IFF_RUNNING) 911 ifp->if_start(ifp); 912 } 913 splx(s); 914 } 915 #endif 916 917 static bool 918 iwn_resume(device_t dv, const pmf_qual_t *qual) 919 { 920 return true; 921 } 922 923 static int 924 iwn_nic_lock(struct iwn_softc *sc) 925 { 926 int ntries; 927 928 /* Request exclusive access to NIC. */ 929 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 930 931 /* Spin until we actually get the lock. */ 932 for (ntries = 0; ntries < 1000; ntries++) { 933 if ((IWN_READ(sc, IWN_GP_CNTRL) & 934 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) == 935 IWN_GP_CNTRL_MAC_ACCESS_ENA) 936 return 0; 937 DELAY(10); 938 } 939 return ETIMEDOUT; 940 } 941 942 static __inline void 943 iwn_nic_unlock(struct iwn_softc *sc) 944 { 945 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 946 } 947 948 static __inline uint32_t 949 iwn_prph_read(struct iwn_softc *sc, uint32_t addr) 950 { 951 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr); 952 IWN_BARRIER_READ_WRITE(sc); 953 return IWN_READ(sc, IWN_PRPH_RDATA); 954 } 955 956 static __inline void 957 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 958 { 959 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr); 960 IWN_BARRIER_WRITE(sc); 961 IWN_WRITE(sc, IWN_PRPH_WDATA, data); 962 } 963 964 static __inline void 965 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 966 { 967 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask); 968 } 969 970 static __inline void 971 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 972 { 973 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask); 974 } 975 976 static __inline void 977 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr, 978 const uint32_t *data, int count) 979 { 980 for (; count > 0; count--, data++, addr += 4) 981 iwn_prph_write(sc, addr, *data); 982 } 983 984 static __inline uint32_t 985 iwn_mem_read(struct iwn_softc *sc, uint32_t addr) 986 { 987 IWN_WRITE(sc, IWN_MEM_RADDR, addr); 988 IWN_BARRIER_READ_WRITE(sc); 989 return IWN_READ(sc, IWN_MEM_RDATA); 990 } 991 992 static __inline void 993 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 994 { 995 IWN_WRITE(sc, IWN_MEM_WADDR, addr); 996 IWN_BARRIER_WRITE(sc); 997 IWN_WRITE(sc, IWN_MEM_WDATA, data); 998 } 999 1000 #ifndef IEEE80211_NO_HT 1001 static __inline void 1002 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data) 1003 { 1004 uint32_t tmp; 1005 1006 tmp = iwn_mem_read(sc, addr & ~3); 1007 if (addr & 3) 1008 tmp = (tmp & 0x0000ffff) | data << 16; 1009 else 1010 tmp = (tmp & 0xffff0000) | data; 1011 iwn_mem_write(sc, addr & ~3, tmp); 1012 } 1013 #endif 1014 1015 static __inline void 1016 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data, 1017 int count) 1018 { 1019 for (; count > 0; count--, addr += 4) 1020 *data++ = iwn_mem_read(sc, addr); 1021 } 1022 1023 static __inline void 1024 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val, 1025 int count) 1026 { 1027 for (; count > 0; count--, addr += 4) 1028 iwn_mem_write(sc, addr, val); 1029 } 1030 1031 static int 1032 iwn_eeprom_lock(struct iwn_softc *sc) 1033 { 1034 int i, ntries; 1035 1036 for (i = 0; i < 100; i++) { 1037 /* Request exclusive access to EEPROM. */ 1038 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 1039 IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1040 1041 /* Spin until we actually get the lock. */ 1042 for (ntries = 0; ntries < 100; ntries++) { 1043 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 1044 IWN_HW_IF_CONFIG_EEPROM_LOCKED) 1045 return 0; 1046 DELAY(10); 1047 } 1048 } 1049 return ETIMEDOUT; 1050 } 1051 1052 static __inline void 1053 iwn_eeprom_unlock(struct iwn_softc *sc) 1054 { 1055 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1056 } 1057 1058 /* 1059 * Initialize access by host to One Time Programmable ROM. 1060 * NB: This kind of ROM can be found on 1000 or 6000 Series only. 1061 */ 1062 static int 1063 iwn_init_otprom(struct iwn_softc *sc) 1064 { 1065 uint16_t prev = 0, base, next; 1066 int count, error; 1067 1068 /* Wait for clock stabilization before accessing prph. */ 1069 if ((error = iwn_clock_wait(sc)) != 0) 1070 return error; 1071 1072 if ((error = iwn_nic_lock(sc)) != 0) 1073 return error; 1074 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1075 DELAY(5); 1076 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1077 iwn_nic_unlock(sc); 1078 1079 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */ 1080 if (sc->hw_type != IWN_HW_REV_TYPE_1000) { 1081 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT, 1082 IWN_RESET_LINK_PWR_MGMT_DIS); 1083 } 1084 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER); 1085 /* Clear ECC status. */ 1086 IWN_SETBITS(sc, IWN_OTP_GP, 1087 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS); 1088 1089 /* 1090 * Find the block before last block (contains the EEPROM image) 1091 * for HW without OTP shadow RAM. 1092 */ 1093 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 1094 /* Switch to absolute addressing mode. */ 1095 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS); 1096 base = 0; 1097 for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) { 1098 error = iwn_read_prom_data(sc, base, &next, 2); 1099 if (error != 0) 1100 return error; 1101 if (next == 0) /* End of linked-list. */ 1102 break; 1103 prev = base; 1104 base = le16toh(next); 1105 } 1106 if (count == 0 || count == IWN1000_OTP_NBLOCKS) 1107 return EIO; 1108 /* Skip "next" word. */ 1109 sc->prom_base = prev + 1; 1110 } 1111 return 0; 1112 } 1113 1114 static int 1115 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count) 1116 { 1117 uint8_t *out = data; 1118 uint32_t val, tmp; 1119 int ntries; 1120 1121 addr += sc->prom_base; 1122 for (; count > 0; count -= 2, addr++) { 1123 IWN_WRITE(sc, IWN_EEPROM, addr << 2); 1124 for (ntries = 0; ntries < 10; ntries++) { 1125 val = IWN_READ(sc, IWN_EEPROM); 1126 if (val & IWN_EEPROM_READ_VALID) 1127 break; 1128 DELAY(5); 1129 } 1130 if (ntries == 10) { 1131 aprint_error_dev(sc->sc_dev, 1132 "timeout reading ROM at 0x%x\n", addr); 1133 return ETIMEDOUT; 1134 } 1135 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1136 /* OTPROM, check for ECC errors. */ 1137 tmp = IWN_READ(sc, IWN_OTP_GP); 1138 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) { 1139 aprint_error_dev(sc->sc_dev, 1140 "OTPROM ECC error at 0x%x\n", addr); 1141 return EIO; 1142 } 1143 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) { 1144 /* Correctable ECC error, clear bit. */ 1145 IWN_SETBITS(sc, IWN_OTP_GP, 1146 IWN_OTP_GP_ECC_CORR_STTS); 1147 } 1148 } 1149 *out++ = val >> 16; 1150 if (count > 1) 1151 *out++ = val >> 24; 1152 } 1153 return 0; 1154 } 1155 1156 static int 1157 iwn_dma_contig_alloc(bus_dma_tag_t tag, struct iwn_dma_info *dma, void **kvap, 1158 bus_size_t size, bus_size_t alignment) 1159 { 1160 int nsegs, error; 1161 1162 dma->tag = tag; 1163 dma->size = size; 1164 1165 error = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT, 1166 &dma->map); 1167 if (error != 0) 1168 goto fail; 1169 1170 error = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs, 1171 BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_ZERO */ 1172 if (error != 0) 1173 goto fail; 1174 1175 error = bus_dmamem_map(tag, &dma->seg, 1, size, &dma->vaddr, 1176 BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_COHERENT */ 1177 if (error != 0) 1178 goto fail; 1179 1180 error = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL, 1181 BUS_DMA_NOWAIT); 1182 if (error != 0) 1183 goto fail; 1184 1185 /* XXX Presumably needed because of missing BUS_DMA_ZERO, above. */ 1186 memset(dma->vaddr, 0, size); 1187 bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE); 1188 1189 dma->paddr = dma->map->dm_segs[0].ds_addr; 1190 if (kvap != NULL) 1191 *kvap = dma->vaddr; 1192 1193 return 0; 1194 1195 fail: iwn_dma_contig_free(dma); 1196 return error; 1197 } 1198 1199 static void 1200 iwn_dma_contig_free(struct iwn_dma_info *dma) 1201 { 1202 if (dma->map != NULL) { 1203 if (dma->vaddr != NULL) { 1204 bus_dmamap_sync(dma->tag, dma->map, 0, dma->size, 1205 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1206 bus_dmamap_unload(dma->tag, dma->map); 1207 bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size); 1208 bus_dmamem_free(dma->tag, &dma->seg, 1); 1209 dma->vaddr = NULL; 1210 } 1211 bus_dmamap_destroy(dma->tag, dma->map); 1212 dma->map = NULL; 1213 } 1214 } 1215 1216 static int 1217 iwn_alloc_sched(struct iwn_softc *sc) 1218 { 1219 /* TX scheduler rings must be aligned on a 1KB boundary. */ 1220 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma, 1221 (void **)&sc->sched, sc->schedsz, 1024); 1222 } 1223 1224 static void 1225 iwn_free_sched(struct iwn_softc *sc) 1226 { 1227 iwn_dma_contig_free(&sc->sched_dma); 1228 } 1229 1230 static int 1231 iwn_alloc_kw(struct iwn_softc *sc) 1232 { 1233 /* "Keep Warm" page must be aligned on a 4KB boundary. */ 1234 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, NULL, 4096, 1235 4096); 1236 } 1237 1238 static void 1239 iwn_free_kw(struct iwn_softc *sc) 1240 { 1241 iwn_dma_contig_free(&sc->kw_dma); 1242 } 1243 1244 static int 1245 iwn_alloc_ict(struct iwn_softc *sc) 1246 { 1247 /* ICT table must be aligned on a 4KB boundary. */ 1248 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, 1249 (void **)&sc->ict, IWN_ICT_SIZE, 4096); 1250 } 1251 1252 static void 1253 iwn_free_ict(struct iwn_softc *sc) 1254 { 1255 iwn_dma_contig_free(&sc->ict_dma); 1256 } 1257 1258 static int 1259 iwn_alloc_fwmem(struct iwn_softc *sc) 1260 { 1261 /* Must be aligned on a 16-byte boundary. */ 1262 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, NULL, 1263 sc->fwsz, 16); 1264 } 1265 1266 static void 1267 iwn_free_fwmem(struct iwn_softc *sc) 1268 { 1269 iwn_dma_contig_free(&sc->fw_dma); 1270 } 1271 1272 static int 1273 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1274 { 1275 bus_size_t size; 1276 int i, error; 1277 1278 ring->cur = 0; 1279 1280 /* Allocate RX descriptors (256-byte aligned). */ 1281 size = IWN_RX_RING_COUNT * sizeof (uint32_t); 1282 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, 1283 (void **)&ring->desc, size, 256); 1284 if (error != 0) { 1285 aprint_error_dev(sc->sc_dev, 1286 "could not allocate RX ring DMA memory\n"); 1287 goto fail; 1288 } 1289 1290 /* Allocate RX status area (16-byte aligned). */ 1291 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, 1292 (void **)&ring->stat, sizeof (struct iwn_rx_status), 16); 1293 if (error != 0) { 1294 aprint_error_dev(sc->sc_dev, 1295 "could not allocate RX status DMA memory\n"); 1296 goto fail; 1297 } 1298 1299 /* 1300 * Allocate and map RX buffers. 1301 */ 1302 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1303 struct iwn_rx_data *data = &ring->data[i]; 1304 1305 error = bus_dmamap_create(sc->sc_dmat, IWN_RBUF_SIZE, 1, 1306 IWN_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1307 &data->map); 1308 if (error != 0) { 1309 aprint_error_dev(sc->sc_dev, 1310 "could not create RX buf DMA map\n"); 1311 goto fail; 1312 } 1313 1314 data->m = MCLGETIalt(sc, M_DONTWAIT, NULL, IWN_RBUF_SIZE); 1315 if (data->m == NULL) { 1316 aprint_error_dev(sc->sc_dev, 1317 "could not allocate RX mbuf\n"); 1318 error = ENOBUFS; 1319 goto fail; 1320 } 1321 1322 error = bus_dmamap_load(sc->sc_dmat, data->map, 1323 mtod(data->m, void *), IWN_RBUF_SIZE, NULL, 1324 BUS_DMA_NOWAIT | BUS_DMA_READ); 1325 if (error != 0) { 1326 aprint_error_dev(sc->sc_dev, 1327 "can't not map mbuf (error %d)\n", error); 1328 goto fail; 1329 } 1330 1331 /* Set physical address of RX buffer (256-byte aligned). */ 1332 ring->desc[i] = htole32(data->map->dm_segs[0].ds_addr >> 8); 1333 } 1334 1335 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0, size, 1336 BUS_DMASYNC_PREWRITE); 1337 1338 return 0; 1339 1340 fail: iwn_free_rx_ring(sc, ring); 1341 return error; 1342 } 1343 1344 static void 1345 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1346 { 1347 int ntries; 1348 1349 if (iwn_nic_lock(sc) == 0) { 1350 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 1351 for (ntries = 0; ntries < 1000; ntries++) { 1352 if (IWN_READ(sc, IWN_FH_RX_STATUS) & 1353 IWN_FH_RX_STATUS_IDLE) 1354 break; 1355 DELAY(10); 1356 } 1357 iwn_nic_unlock(sc); 1358 } 1359 ring->cur = 0; 1360 sc->last_rx_valid = 0; 1361 } 1362 1363 static void 1364 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1365 { 1366 int i; 1367 1368 iwn_dma_contig_free(&ring->desc_dma); 1369 iwn_dma_contig_free(&ring->stat_dma); 1370 1371 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1372 struct iwn_rx_data *data = &ring->data[i]; 1373 1374 if (data->m != NULL) { 1375 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1376 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1377 bus_dmamap_unload(sc->sc_dmat, data->map); 1378 m_freem(data->m); 1379 } 1380 if (data->map != NULL) 1381 bus_dmamap_destroy(sc->sc_dmat, data->map); 1382 } 1383 } 1384 1385 static int 1386 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid) 1387 { 1388 bus_addr_t paddr; 1389 bus_size_t size; 1390 int i, error; 1391 1392 ring->qid = qid; 1393 ring->queued = 0; 1394 ring->cur = 0; 1395 1396 /* Allocate TX descriptors (256-byte aligned). */ 1397 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc); 1398 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, 1399 (void **)&ring->desc, size, 256); 1400 if (error != 0) { 1401 aprint_error_dev(sc->sc_dev, 1402 "could not allocate TX ring DMA memory\n"); 1403 goto fail; 1404 } 1405 /* 1406 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need 1407 * to allocate commands space for other rings. 1408 * XXX Do we really need to allocate descriptors for other rings? 1409 */ 1410 if (qid > 4) 1411 return 0; 1412 1413 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd); 1414 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, 1415 (void **)&ring->cmd, size, 4); 1416 if (error != 0) { 1417 aprint_error_dev(sc->sc_dev, 1418 "could not allocate TX cmd DMA memory\n"); 1419 goto fail; 1420 } 1421 1422 paddr = ring->cmd_dma.paddr; 1423 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1424 struct iwn_tx_data *data = &ring->data[i]; 1425 1426 data->cmd_paddr = paddr; 1427 data->scratch_paddr = paddr + 12; 1428 paddr += sizeof (struct iwn_tx_cmd); 1429 1430 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1431 IWN_MAX_SCATTER - 1, MCLBYTES, 0, BUS_DMA_NOWAIT, 1432 &data->map); 1433 if (error != 0) { 1434 aprint_error_dev(sc->sc_dev, 1435 "could not create TX buf DMA map\n"); 1436 goto fail; 1437 } 1438 } 1439 return 0; 1440 1441 fail: iwn_free_tx_ring(sc, ring); 1442 return error; 1443 } 1444 1445 static void 1446 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1447 { 1448 int i; 1449 1450 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1451 struct iwn_tx_data *data = &ring->data[i]; 1452 1453 if (data->m != NULL) { 1454 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1455 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1456 bus_dmamap_unload(sc->sc_dmat, data->map); 1457 m_freem(data->m); 1458 data->m = NULL; 1459 } 1460 } 1461 /* Clear TX descriptors. */ 1462 memset(ring->desc, 0, ring->desc_dma.size); 1463 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0, 1464 ring->desc_dma.size, BUS_DMASYNC_PREWRITE); 1465 sc->qfullmsk &= ~(1 << ring->qid); 1466 ring->queued = 0; 1467 ring->cur = 0; 1468 } 1469 1470 static void 1471 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1472 { 1473 int i; 1474 1475 iwn_dma_contig_free(&ring->desc_dma); 1476 iwn_dma_contig_free(&ring->cmd_dma); 1477 1478 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1479 struct iwn_tx_data *data = &ring->data[i]; 1480 1481 if (data->m != NULL) { 1482 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1483 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1484 bus_dmamap_unload(sc->sc_dmat, data->map); 1485 m_freem(data->m); 1486 } 1487 if (data->map != NULL) 1488 bus_dmamap_destroy(sc->sc_dmat, data->map); 1489 } 1490 } 1491 1492 static void 1493 iwn5000_ict_reset(struct iwn_softc *sc) 1494 { 1495 /* Disable interrupts. */ 1496 IWN_WRITE(sc, IWN_INT_MASK, 0); 1497 1498 /* Reset ICT table. */ 1499 memset(sc->ict, 0, IWN_ICT_SIZE); 1500 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map, 0, IWN_ICT_SIZE, 1501 BUS_DMASYNC_PREWRITE); 1502 sc->ict_cur = 0; 1503 1504 /* Set physical address of ICT table (4KB aligned). */ 1505 DPRINTF(("enabling ICT\n")); 1506 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE | 1507 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12); 1508 1509 /* Enable periodic RX interrupt. */ 1510 sc->int_mask |= IWN_INT_RX_PERIODIC; 1511 /* Switch to ICT interrupt mode in driver. */ 1512 sc->sc_flags |= IWN_FLAG_USE_ICT; 1513 1514 /* Re-enable interrupts. */ 1515 IWN_WRITE(sc, IWN_INT, 0xffffffff); 1516 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 1517 } 1518 1519 static int 1520 iwn_read_eeprom(struct iwn_softc *sc) 1521 { 1522 struct iwn_ops *ops = &sc->ops; 1523 struct ieee80211com *ic = &sc->sc_ic; 1524 uint16_t val; 1525 int error; 1526 1527 /* Check whether adapter has an EEPROM or an OTPROM. */ 1528 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 && 1529 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP)) 1530 sc->sc_flags |= IWN_FLAG_HAS_OTPROM; 1531 DPRINTF(("%s found\n", (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? 1532 "OTPROM" : "EEPROM")); 1533 1534 /* Adapter has to be powered on for EEPROM access to work. */ 1535 if ((error = iwn_apm_init(sc)) != 0) { 1536 aprint_error_dev(sc->sc_dev, 1537 "could not power ON adapter\n"); 1538 return error; 1539 } 1540 1541 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) { 1542 aprint_error_dev(sc->sc_dev, 1543 "bad ROM signature\n"); 1544 return EIO; 1545 } 1546 if ((error = iwn_eeprom_lock(sc)) != 0) { 1547 aprint_error_dev(sc->sc_dev, 1548 "could not lock ROM (error=%d)\n", error); 1549 return error; 1550 } 1551 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1552 if ((error = iwn_init_otprom(sc)) != 0) { 1553 aprint_error_dev(sc->sc_dev, 1554 "could not initialize OTPROM\n"); 1555 return error; 1556 } 1557 } 1558 1559 iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2); 1560 DPRINTF(("SKU capabilities=0x%04x\n", le16toh(val))); 1561 /* Check if HT support is bonded out. */ 1562 if (val & htole16(IWN_EEPROM_SKU_CAP_11N)) 1563 sc->sc_flags |= IWN_FLAG_HAS_11N; 1564 1565 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2); 1566 sc->rfcfg = le16toh(val); 1567 DPRINTF(("radio config=0x%04x\n", sc->rfcfg)); 1568 /* Read Tx/Rx chains from ROM unless it's known to be broken. */ 1569 if (sc->txchainmask == 0) 1570 sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg); 1571 if (sc->rxchainmask == 0) 1572 sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg); 1573 1574 /* Read MAC address. */ 1575 iwn_read_prom_data(sc, IWN_EEPROM_MAC, ic->ic_myaddr, ETHER_ADDR_LEN); 1576 1577 /* Read adapter-specific information from EEPROM. */ 1578 ops->read_eeprom(sc); 1579 1580 iwn_apm_stop(sc); /* Power OFF adapter. */ 1581 1582 iwn_eeprom_unlock(sc); 1583 return 0; 1584 } 1585 1586 static void 1587 iwn4965_read_eeprom(struct iwn_softc *sc) 1588 { 1589 uint32_t addr; 1590 uint16_t val; 1591 int i; 1592 1593 /* Read regulatory domain (4 ASCII characters). */ 1594 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4); 1595 1596 /* Read the list of authorized channels (20MHz ones only). */ 1597 for (i = 0; i < 5; i++) { 1598 addr = iwn4965_regulatory_bands[i]; 1599 iwn_read_eeprom_channels(sc, i, addr); 1600 } 1601 1602 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */ 1603 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2); 1604 sc->maxpwr2GHz = val & 0xff; 1605 sc->maxpwr5GHz = val >> 8; 1606 /* Check that EEPROM values are within valid range. */ 1607 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50) 1608 sc->maxpwr5GHz = 38; 1609 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50) 1610 sc->maxpwr2GHz = 38; 1611 DPRINTF(("maxpwr 2GHz=%d 5GHz=%d\n", sc->maxpwr2GHz, sc->maxpwr5GHz)); 1612 1613 /* Read samples for each TX power group. */ 1614 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands, 1615 sizeof sc->bands); 1616 1617 /* Read voltage at which samples were taken. */ 1618 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2); 1619 sc->eeprom_voltage = (int16_t)le16toh(val); 1620 DPRINTF(("voltage=%d (in 0.3V)\n", sc->eeprom_voltage)); 1621 1622 #ifdef IWN_DEBUG 1623 /* Print samples. */ 1624 if (iwn_debug > 0) { 1625 for (i = 0; i < IWN_NBANDS; i++) 1626 iwn4965_print_power_group(sc, i); 1627 } 1628 #endif 1629 } 1630 1631 #ifdef IWN_DEBUG 1632 static void 1633 iwn4965_print_power_group(struct iwn_softc *sc, int i) 1634 { 1635 struct iwn4965_eeprom_band *band = &sc->bands[i]; 1636 struct iwn4965_eeprom_chan_samples *chans = band->chans; 1637 int j, c; 1638 1639 aprint_normal("===band %d===\n", i); 1640 aprint_normal("chan lo=%d, chan hi=%d\n", band->lo, band->hi); 1641 aprint_normal("chan1 num=%d\n", chans[0].num); 1642 for (c = 0; c < 2; c++) { 1643 for (j = 0; j < IWN_NSAMPLES; j++) { 1644 aprint_normal("chain %d, sample %d: temp=%d gain=%d " 1645 "power=%d pa_det=%d\n", c, j, 1646 chans[0].samples[c][j].temp, 1647 chans[0].samples[c][j].gain, 1648 chans[0].samples[c][j].power, 1649 chans[0].samples[c][j].pa_det); 1650 } 1651 } 1652 aprint_normal("chan2 num=%d\n", chans[1].num); 1653 for (c = 0; c < 2; c++) { 1654 for (j = 0; j < IWN_NSAMPLES; j++) { 1655 aprint_normal("chain %d, sample %d: temp=%d gain=%d " 1656 "power=%d pa_det=%d\n", c, j, 1657 chans[1].samples[c][j].temp, 1658 chans[1].samples[c][j].gain, 1659 chans[1].samples[c][j].power, 1660 chans[1].samples[c][j].pa_det); 1661 } 1662 } 1663 } 1664 #endif 1665 1666 static void 1667 iwn5000_read_eeprom(struct iwn_softc *sc) 1668 { 1669 struct iwn5000_eeprom_calib_hdr hdr; 1670 int32_t volt; 1671 uint32_t base, addr; 1672 uint16_t val; 1673 int i; 1674 1675 /* Read regulatory domain (4 ASCII characters). */ 1676 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 1677 base = le16toh(val); 1678 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN, 1679 sc->eeprom_domain, 4); 1680 1681 /* Read the list of authorized channels (20MHz ones only). */ 1682 for (i = 0; i < 5; i++) { 1683 addr = base + iwn5000_regulatory_bands[i]; 1684 iwn_read_eeprom_channels(sc, i, addr); 1685 } 1686 1687 /* Read enhanced TX power information for 6000 Series. */ 1688 if (sc->hw_type >= IWN_HW_REV_TYPE_6000) 1689 iwn_read_eeprom_enhinfo(sc); 1690 1691 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2); 1692 base = le16toh(val); 1693 iwn_read_prom_data(sc, base, &hdr, sizeof hdr); 1694 DPRINTF(("calib version=%u pa type=%u voltage=%u\n", 1695 hdr.version, hdr.pa_type, le16toh(hdr.volt))); 1696 sc->calib_ver = hdr.version; 1697 1698 if (sc->hw_type == IWN_HW_REV_TYPE_2030 || 1699 sc->hw_type == IWN_HW_REV_TYPE_2000 || 1700 sc->hw_type == IWN_HW_REV_TYPE_135 || 1701 sc->hw_type == IWN_HW_REV_TYPE_105) { 1702 sc->eeprom_voltage = le16toh(hdr.volt); 1703 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 1704 sc->eeprom_temp = le16toh(val); 1705 iwn_read_prom_data(sc, base + IWN2000_EEPROM_RAWTEMP, &val, 2); 1706 sc->eeprom_rawtemp = le16toh(val); 1707 } 1708 1709 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 1710 /* Compute temperature offset. */ 1711 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 1712 sc->eeprom_temp = le16toh(val); 1713 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 1714 volt = le16toh(val); 1715 sc->temp_off = sc->eeprom_temp - (volt / -5); 1716 DPRINTF(("temp=%d volt=%d offset=%dK\n", 1717 sc->eeprom_temp, volt, sc->temp_off)); 1718 } else { 1719 /* Read crystal calibration. */ 1720 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL, 1721 &sc->eeprom_crystal, sizeof (uint32_t)); 1722 DPRINTF(("crystal calibration 0x%08x\n", 1723 le32toh(sc->eeprom_crystal))); 1724 } 1725 } 1726 1727 static void 1728 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr) 1729 { 1730 struct ieee80211com *ic = &sc->sc_ic; 1731 const struct iwn_chan_band *band = &iwn_bands[n]; 1732 struct iwn_eeprom_chan channels[IWN_MAX_CHAN_PER_BAND]; 1733 uint8_t chan; 1734 int i; 1735 1736 iwn_read_prom_data(sc, addr, channels, 1737 band->nchan * sizeof (struct iwn_eeprom_chan)); 1738 1739 for (i = 0; i < band->nchan; i++) { 1740 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) 1741 continue; 1742 1743 chan = band->chan[i]; 1744 1745 if (n == 0) { /* 2GHz band */ 1746 ic->ic_channels[chan].ic_freq = 1747 ieee80211_ieee2mhz(chan, IEEE80211_CHAN_2GHZ); 1748 ic->ic_channels[chan].ic_flags = 1749 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM | 1750 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; 1751 1752 } else { /* 5GHz band */ 1753 /* 1754 * Some adapters support channels 7, 8, 11 and 12 1755 * both in the 2GHz and 4.9GHz bands. 1756 * Because of limitations in our net80211 layer, 1757 * we don't support them in the 4.9GHz band. 1758 */ 1759 if (chan <= 14) 1760 continue; 1761 1762 ic->ic_channels[chan].ic_freq = 1763 ieee80211_ieee2mhz(chan, IEEE80211_CHAN_5GHZ); 1764 ic->ic_channels[chan].ic_flags = IEEE80211_CHAN_A; 1765 /* We have at least one valid 5GHz channel. */ 1766 sc->sc_flags |= IWN_FLAG_HAS_5GHZ; 1767 } 1768 1769 /* Is active scan allowed on this channel? */ 1770 if (!(channels[i].flags & IWN_EEPROM_CHAN_ACTIVE)) { 1771 ic->ic_channels[chan].ic_flags |= 1772 IEEE80211_CHAN_PASSIVE; 1773 } 1774 1775 /* Save maximum allowed TX power for this channel. */ 1776 sc->maxpwr[chan] = channels[i].maxpwr; 1777 1778 DPRINTF(("adding chan %d flags=0x%x maxpwr=%d\n", 1779 chan, channels[i].flags, sc->maxpwr[chan])); 1780 } 1781 } 1782 1783 static void 1784 iwn_read_eeprom_enhinfo(struct iwn_softc *sc) 1785 { 1786 struct iwn_eeprom_enhinfo enhinfo[35]; 1787 uint16_t val, base; 1788 int8_t maxpwr; 1789 uint8_t flags; 1790 int i; 1791 1792 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 1793 base = le16toh(val); 1794 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO, 1795 enhinfo, sizeof enhinfo); 1796 1797 memset(sc->enh_maxpwr, 0, sizeof sc->enh_maxpwr); 1798 for (i = 0; i < __arraycount(enhinfo); i++) { 1799 flags = enhinfo[i].flags; 1800 if (!(flags & IWN_ENHINFO_VALID)) 1801 continue; /* Skip invalid entries. */ 1802 1803 maxpwr = 0; 1804 if (sc->txchainmask & IWN_ANT_A) 1805 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]); 1806 if (sc->txchainmask & IWN_ANT_B) 1807 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]); 1808 if (sc->txchainmask & IWN_ANT_C) 1809 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]); 1810 if (sc->ntxchains == 2) 1811 maxpwr = MAX(maxpwr, enhinfo[i].mimo2); 1812 else if (sc->ntxchains == 3) 1813 maxpwr = MAX(maxpwr, enhinfo[i].mimo3); 1814 maxpwr /= 2; /* Convert half-dBm to dBm. */ 1815 1816 DPRINTF(("enhinfo %d, maxpwr=%d\n", i, maxpwr)); 1817 sc->enh_maxpwr[i] = maxpwr; 1818 } 1819 } 1820 1821 static struct ieee80211_node * 1822 iwn_node_alloc(struct ieee80211_node_table *ic __unused) 1823 { 1824 return malloc(sizeof (struct iwn_node), M_80211_NODE, M_NOWAIT | M_ZERO); 1825 } 1826 1827 static void 1828 iwn_newassoc(struct ieee80211_node *ni, int isnew) 1829 { 1830 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 1831 struct iwn_node *wn = (void *)ni; 1832 uint8_t rate; 1833 int ridx, i; 1834 1835 ieee80211_amrr_node_init(&sc->amrr, &wn->amn); 1836 /* Start at lowest available bit-rate, AMRR will raise. */ 1837 ni->ni_txrate = 0; 1838 1839 for (i = 0; i < ni->ni_rates.rs_nrates; i++) { 1840 rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL; 1841 /* Map 802.11 rate to HW rate index. */ 1842 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) 1843 if (iwn_rates[ridx].rate == rate) 1844 break; 1845 wn->ridx[i] = ridx; 1846 } 1847 } 1848 1849 static int 1850 iwn_media_change(struct ifnet *ifp) 1851 { 1852 struct iwn_softc *sc = ifp->if_softc; 1853 struct ieee80211com *ic = &sc->sc_ic; 1854 uint8_t rate, ridx; 1855 int error; 1856 1857 error = ieee80211_media_change(ifp); 1858 if (error != ENETRESET) 1859 return error; 1860 1861 if (ic->ic_fixed_rate != -1) { 1862 rate = ic->ic_sup_rates[ic->ic_curmode]. 1863 rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL; 1864 /* Map 802.11 rate to HW rate index. */ 1865 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) 1866 if (iwn_rates[ridx].rate == rate) 1867 break; 1868 sc->fixed_ridx = ridx; 1869 } 1870 1871 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 1872 (IFF_UP | IFF_RUNNING)) { 1873 iwn_stop(ifp, 0); 1874 error = iwn_init(ifp); 1875 } 1876 return error; 1877 } 1878 1879 static int 1880 iwn_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg) 1881 { 1882 struct ifnet *ifp = ic->ic_ifp; 1883 struct iwn_softc *sc = ifp->if_softc; 1884 int error; 1885 1886 callout_stop(&sc->calib_to); 1887 1888 switch (nstate) { 1889 case IEEE80211_S_SCAN: 1890 /* XXX Do not abort a running scan. */ 1891 if (sc->sc_flags & IWN_FLAG_SCANNING) { 1892 if (ic->ic_state != nstate) 1893 aprint_debug_dev(sc->sc_dev, "scan request(%d) " 1894 "while scanning(%d) ignored\n", nstate, 1895 ic->ic_state); 1896 break; 1897 } 1898 1899 /* XXX Not sure if call and flags are needed. */ 1900 ieee80211_node_table_reset(&ic->ic_scan); 1901 ic->ic_flags |= IEEE80211_F_SCAN | IEEE80211_F_ASCAN; 1902 sc->sc_flags |= IWN_FLAG_SCANNING_2GHZ; 1903 1904 /* Make the link LED blink while we're scanning. */ 1905 iwn_set_led(sc, IWN_LED_LINK, 10, 10); 1906 1907 if ((error = iwn_scan(sc, IEEE80211_CHAN_2GHZ)) != 0) { 1908 aprint_error_dev(sc->sc_dev, 1909 "could not initiate scan\n"); 1910 return error; 1911 } 1912 ic->ic_state = nstate; 1913 return 0; 1914 1915 case IEEE80211_S_ASSOC: 1916 if (ic->ic_state != IEEE80211_S_RUN) 1917 break; 1918 /* FALLTHROUGH */ 1919 case IEEE80211_S_AUTH: 1920 /* Reset state to handle reassociations correctly. */ 1921 sc->rxon.associd = 0; 1922 sc->rxon.filter &= ~htole32(IWN_FILTER_BSS); 1923 sc->calib.state = IWN_CALIB_STATE_INIT; 1924 1925 /* Wait until we hear a beacon before we transmit */ 1926 if (IEEE80211_IS_CHAN_PASSIVE(ic->ic_curchan)) 1927 sc->sc_beacon_wait = 1; 1928 1929 if ((error = iwn_auth(sc)) != 0) { 1930 aprint_error_dev(sc->sc_dev, 1931 "could not move to auth state\n"); 1932 return error; 1933 } 1934 break; 1935 1936 case IEEE80211_S_RUN: 1937 /* 1938 * RUN -> RUN transition; Just restart timers. 1939 */ 1940 if (ic->ic_state == IEEE80211_S_RUN) { 1941 sc->calib_cnt = 0; 1942 break; 1943 } 1944 1945 /* Wait until we hear a beacon before we transmit */ 1946 if (IEEE80211_IS_CHAN_PASSIVE(ic->ic_curchan)) 1947 sc->sc_beacon_wait = 1; 1948 1949 if ((error = iwn_run(sc)) != 0) { 1950 aprint_error_dev(sc->sc_dev, 1951 "could not move to run state\n"); 1952 return error; 1953 } 1954 break; 1955 1956 case IEEE80211_S_INIT: 1957 sc->sc_flags &= ~IWN_FLAG_SCANNING; 1958 sc->calib.state = IWN_CALIB_STATE_INIT; 1959 /* 1960 * Purge the xmit queue so we don't have old frames 1961 * during a new association attempt. 1962 */ 1963 sc->sc_beacon_wait = 0; 1964 ifp->if_flags &= ~IFF_OACTIVE; 1965 iwn_start(ifp); 1966 break; 1967 } 1968 1969 return sc->sc_newstate(ic, nstate, arg); 1970 } 1971 1972 static void 1973 iwn_iter_func(void *arg, struct ieee80211_node *ni) 1974 { 1975 struct iwn_softc *sc = arg; 1976 struct iwn_node *wn = (struct iwn_node *)ni; 1977 1978 ieee80211_amrr_choose(&sc->amrr, ni, &wn->amn); 1979 } 1980 1981 static void 1982 iwn_calib_timeout(void *arg) 1983 { 1984 struct iwn_softc *sc = arg; 1985 struct ieee80211com *ic = &sc->sc_ic; 1986 int s; 1987 1988 s = splnet(); 1989 if (ic->ic_fixed_rate == -1) { 1990 if (ic->ic_opmode == IEEE80211_M_STA) 1991 iwn_iter_func(sc, ic->ic_bss); 1992 else 1993 ieee80211_iterate_nodes(&ic->ic_sta, iwn_iter_func, sc); 1994 } 1995 /* Force automatic TX power calibration every 60 secs. */ 1996 if (++sc->calib_cnt >= 120) { 1997 uint32_t flags = 0; 1998 1999 DPRINTF(("sending request for statistics\n")); 2000 (void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, 2001 sizeof flags, 1); 2002 sc->calib_cnt = 0; 2003 } 2004 splx(s); 2005 2006 /* Automatic rate control triggered every 500ms. */ 2007 callout_schedule(&sc->calib_to, mstohz(500)); 2008 } 2009 2010 /* 2011 * Process an RX_PHY firmware notification. This is usually immediately 2012 * followed by an MPDU_RX_DONE notification. 2013 */ 2014 static void 2015 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2016 struct iwn_rx_data *data) 2017 { 2018 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1); 2019 2020 DPRINTFN(2, ("received PHY stats\n")); 2021 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2022 sizeof (*stat), BUS_DMASYNC_POSTREAD); 2023 2024 /* Save RX statistics, they will be used on MPDU_RX_DONE. */ 2025 memcpy(&sc->last_rx_stat, stat, sizeof (*stat)); 2026 sc->last_rx_valid = 1; 2027 } 2028 2029 /* 2030 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification. 2031 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one. 2032 */ 2033 static void 2034 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2035 struct iwn_rx_data *data) 2036 { 2037 struct iwn_ops *ops = &sc->ops; 2038 struct ieee80211com *ic = &sc->sc_ic; 2039 struct ifnet *ifp = ic->ic_ifp; 2040 struct iwn_rx_ring *ring = &sc->rxq; 2041 struct ieee80211_frame *wh; 2042 struct ieee80211_node *ni; 2043 struct mbuf *m, *m1; 2044 struct iwn_rx_stat *stat; 2045 char *head; 2046 uint32_t flags; 2047 int error, len, rssi, s; 2048 2049 if (desc->type == IWN_MPDU_RX_DONE) { 2050 /* Check for prior RX_PHY notification. */ 2051 if (!sc->last_rx_valid) { 2052 DPRINTF(("missing RX_PHY\n")); 2053 return; 2054 } 2055 sc->last_rx_valid = 0; 2056 stat = &sc->last_rx_stat; 2057 } else 2058 stat = (struct iwn_rx_stat *)(desc + 1); 2059 2060 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWN_RBUF_SIZE, 2061 BUS_DMASYNC_POSTREAD); 2062 2063 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) { 2064 aprint_error_dev(sc->sc_dev, 2065 "invalid RX statistic header\n"); 2066 return; 2067 } 2068 if (desc->type == IWN_MPDU_RX_DONE) { 2069 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1); 2070 head = (char *)(mpdu + 1); 2071 len = le16toh(mpdu->len); 2072 } else { 2073 head = (char *)(stat + 1) + stat->cfg_phy_len; 2074 len = le16toh(stat->len); 2075 } 2076 2077 flags = le32toh(*(uint32_t *)(head + len)); 2078 2079 /* Discard frames with a bad FCS early. */ 2080 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) { 2081 DPRINTFN(2, ("RX flags error %x\n", flags)); 2082 if_statinc(ifp, if_ierrors); 2083 return; 2084 } 2085 /* Discard frames that are too short. */ 2086 if (len < sizeof (*wh)) { 2087 DPRINTF(("frame too short: %d\n", len)); 2088 ic->ic_stats.is_rx_tooshort++; 2089 if_statinc(ifp, if_ierrors); 2090 return; 2091 } 2092 2093 m1 = MCLGETIalt(sc, M_DONTWAIT, NULL, IWN_RBUF_SIZE); 2094 if (m1 == NULL) { 2095 ic->ic_stats.is_rx_nobuf++; 2096 if_statinc(ifp, if_ierrors); 2097 return; 2098 } 2099 bus_dmamap_unload(sc->sc_dmat, data->map); 2100 2101 error = bus_dmamap_load(sc->sc_dmat, data->map, mtod(m1, void *), 2102 IWN_RBUF_SIZE, NULL, BUS_DMA_NOWAIT | BUS_DMA_READ); 2103 if (error != 0) { 2104 m_freem(m1); 2105 2106 /* Try to reload the old mbuf. */ 2107 error = bus_dmamap_load(sc->sc_dmat, data->map, 2108 mtod(data->m, void *), IWN_RBUF_SIZE, NULL, 2109 BUS_DMA_NOWAIT | BUS_DMA_READ); 2110 if (error != 0) { 2111 panic("%s: could not load old RX mbuf", 2112 device_xname(sc->sc_dev)); 2113 } 2114 /* Physical address may have changed. */ 2115 ring->desc[ring->cur] = 2116 htole32(data->map->dm_segs[0].ds_addr >> 8); 2117 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 2118 ring->cur * sizeof (uint32_t), sizeof (uint32_t), 2119 BUS_DMASYNC_PREWRITE); 2120 if_statinc(ifp, if_ierrors); 2121 return; 2122 } 2123 2124 m = data->m; 2125 data->m = m1; 2126 /* Update RX descriptor. */ 2127 ring->desc[ring->cur] = htole32(data->map->dm_segs[0].ds_addr >> 8); 2128 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 2129 ring->cur * sizeof (uint32_t), sizeof (uint32_t), 2130 BUS_DMASYNC_PREWRITE); 2131 2132 /* Finalize mbuf. */ 2133 m_set_rcvif(m, ifp); 2134 m->m_data = head; 2135 m->m_pkthdr.len = m->m_len = len; 2136 2137 s = splnet(); 2138 2139 /* Grab a reference to the source node. */ 2140 wh = mtod(m, struct ieee80211_frame *); 2141 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 2142 2143 /* XXX OpenBSD adds decryption here (see also comments in iwn_tx). */ 2144 /* NetBSD does decryption in ieee80211_input. */ 2145 2146 rssi = ops->get_rssi(stat); 2147 2148 /* XXX Added for NetBSD: scans never stop without it */ 2149 if (ic->ic_state == IEEE80211_S_SCAN) 2150 iwn_fix_channel(ic, m, stat); 2151 2152 if (sc->sc_drvbpf != NULL) { 2153 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap; 2154 2155 tap->wr_flags = 0; 2156 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE)) 2157 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2158 tap->wr_chan_freq = 2159 htole16(ic->ic_channels[stat->chan].ic_freq); 2160 tap->wr_chan_flags = 2161 htole16(ic->ic_channels[stat->chan].ic_flags); 2162 tap->wr_dbm_antsignal = (int8_t)rssi; 2163 tap->wr_dbm_antnoise = (int8_t)sc->noise; 2164 tap->wr_tsft = stat->tstamp; 2165 switch (stat->rate) { 2166 /* CCK rates. */ 2167 case 10: tap->wr_rate = 2; break; 2168 case 20: tap->wr_rate = 4; break; 2169 case 55: tap->wr_rate = 11; break; 2170 case 110: tap->wr_rate = 22; break; 2171 /* OFDM rates. */ 2172 case 0xd: tap->wr_rate = 12; break; 2173 case 0xf: tap->wr_rate = 18; break; 2174 case 0x5: tap->wr_rate = 24; break; 2175 case 0x7: tap->wr_rate = 36; break; 2176 case 0x9: tap->wr_rate = 48; break; 2177 case 0xb: tap->wr_rate = 72; break; 2178 case 0x1: tap->wr_rate = 96; break; 2179 case 0x3: tap->wr_rate = 108; break; 2180 /* Unknown rate: should not happen. */ 2181 default: tap->wr_rate = 0; 2182 } 2183 2184 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m, BPF_D_IN); 2185 } 2186 2187 /* 2188 * If it's a beacon and we're waiting, then do the wakeup. 2189 */ 2190 if (sc->sc_beacon_wait) { 2191 uint8_t type, subtype; 2192 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2193 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2194 /* 2195 * This assumes at this point we've received our own 2196 * beacon. 2197 */ 2198 if (type == IEEE80211_FC0_TYPE_MGT && 2199 subtype == IEEE80211_FC0_SUBTYPE_BEACON) { 2200 sc->sc_beacon_wait = 0; 2201 ifp->if_flags &= ~IFF_OACTIVE; 2202 iwn_start(ifp); 2203 } 2204 } 2205 2206 /* Send the frame to the 802.11 layer. */ 2207 ieee80211_input(ic, m, ni, rssi, 0); 2208 2209 /* Node is no longer needed. */ 2210 ieee80211_free_node(ni); 2211 2212 splx(s); 2213 } 2214 2215 #ifndef IEEE80211_NO_HT 2216 /* Process an incoming Compressed BlockAck. */ 2217 static void 2218 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2219 struct iwn_rx_data *data) 2220 { 2221 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1); 2222 struct iwn_tx_ring *txq; 2223 2224 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), sizeof (*ba), 2225 BUS_DMASYNC_POSTREAD); 2226 2227 txq = &sc->txq[le16toh(ba->qid)]; 2228 /* XXX TBD */ 2229 } 2230 #endif 2231 2232 /* 2233 * Process a CALIBRATION_RESULT notification sent by the initialization 2234 * firmware on response to a CMD_CALIB_CONFIG command (5000 only). 2235 */ 2236 static void 2237 iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2238 struct iwn_rx_data *data) 2239 { 2240 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1); 2241 int len, idx = -1; 2242 2243 /* Runtime firmware should not send such a notification. */ 2244 if (sc->sc_flags & IWN_FLAG_CALIB_DONE) 2245 return; 2246 2247 len = (le32toh(desc->len) & 0x3fff) - 4; 2248 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), len, 2249 BUS_DMASYNC_POSTREAD); 2250 2251 switch (calib->code) { 2252 case IWN5000_PHY_CALIB_DC: 2253 if (sc->hw_type == IWN_HW_REV_TYPE_5150 || 2254 sc->hw_type == IWN_HW_REV_TYPE_2030 || 2255 sc->hw_type == IWN_HW_REV_TYPE_2000 || 2256 sc->hw_type == IWN_HW_REV_TYPE_135 || 2257 sc->hw_type == IWN_HW_REV_TYPE_105) 2258 idx = 0; 2259 break; 2260 case IWN5000_PHY_CALIB_LO: 2261 idx = 1; 2262 break; 2263 case IWN5000_PHY_CALIB_TX_IQ: 2264 idx = 2; 2265 break; 2266 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC: 2267 if (sc->hw_type < IWN_HW_REV_TYPE_6000 && 2268 sc->hw_type != IWN_HW_REV_TYPE_5150) 2269 idx = 3; 2270 break; 2271 case IWN5000_PHY_CALIB_BASE_BAND: 2272 idx = 4; 2273 break; 2274 } 2275 if (idx == -1) /* Ignore other results. */ 2276 return; 2277 2278 /* Save calibration result. */ 2279 if (sc->calibcmd[idx].buf != NULL) 2280 free(sc->calibcmd[idx].buf, M_DEVBUF); 2281 sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT); 2282 if (sc->calibcmd[idx].buf == NULL) { 2283 DPRINTF(("not enough memory for calibration result %d\n", 2284 calib->code)); 2285 return; 2286 } 2287 DPRINTF(("saving calibration result code=%d len=%d\n", 2288 calib->code, len)); 2289 sc->calibcmd[idx].len = len; 2290 memcpy(sc->calibcmd[idx].buf, calib, len); 2291 } 2292 2293 /* 2294 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification. 2295 * The latter is sent by the firmware after each received beacon. 2296 */ 2297 static void 2298 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2299 struct iwn_rx_data *data) 2300 { 2301 struct iwn_ops *ops = &sc->ops; 2302 struct ieee80211com *ic = &sc->sc_ic; 2303 struct iwn_calib_state *calib = &sc->calib; 2304 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1); 2305 int temp; 2306 2307 /* Ignore statistics received during a scan. */ 2308 if (ic->ic_state != IEEE80211_S_RUN) 2309 return; 2310 2311 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2312 sizeof (*stats), BUS_DMASYNC_POSTREAD); 2313 2314 DPRINTFN(3, ("received statistics (cmd=%d)\n", desc->type)); 2315 sc->calib_cnt = 0; /* Reset TX power calibration timeout. */ 2316 2317 /* Test if temperature has changed. */ 2318 if (stats->general.temp != sc->rawtemp) { 2319 /* Convert "raw" temperature to degC. */ 2320 sc->rawtemp = stats->general.temp; 2321 temp = ops->get_temperature(sc); 2322 DPRINTFN(2, ("temperature=%dC\n", temp)); 2323 2324 /* Update TX power if need be (4965AGN only). */ 2325 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 2326 iwn4965_power_calibration(sc, temp); 2327 } 2328 2329 if (desc->type != IWN_BEACON_STATISTICS) 2330 return; /* Reply to a statistics request. */ 2331 2332 sc->noise = iwn_get_noise(&stats->rx.general); 2333 2334 /* Test that RSSI and noise are present in stats report. */ 2335 if (le32toh(stats->rx.general.flags) != 1) { 2336 DPRINTF(("received statistics without RSSI\n")); 2337 return; 2338 } 2339 2340 /* 2341 * XXX Differential gain calibration makes the 6005 firmware 2342 * crap out, so skip it for now. This effectively disables 2343 * sensitivity tuning as well. 2344 */ 2345 if (sc->hw_type == IWN_HW_REV_TYPE_6005) 2346 return; 2347 2348 if (calib->state == IWN_CALIB_STATE_ASSOC) 2349 iwn_collect_noise(sc, &stats->rx.general); 2350 else if (calib->state == IWN_CALIB_STATE_RUN) 2351 iwn_tune_sensitivity(sc, &stats->rx); 2352 } 2353 2354 /* 2355 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN 2356 * and 5000 adapters have different incompatible TX status formats. 2357 */ 2358 static void 2359 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2360 struct iwn_rx_data *data) 2361 { 2362 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1); 2363 2364 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2365 sizeof (*stat), BUS_DMASYNC_POSTREAD); 2366 iwn_tx_done(sc, desc, stat->ackfailcnt, le32toh(stat->status) & 0xff); 2367 } 2368 2369 static void 2370 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2371 struct iwn_rx_data *data) 2372 { 2373 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1); 2374 2375 #ifdef notyet 2376 /* Reset TX scheduler slot. */ 2377 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx); 2378 #endif 2379 2380 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2381 sizeof (*stat), BUS_DMASYNC_POSTREAD); 2382 iwn_tx_done(sc, desc, stat->ackfailcnt, le16toh(stat->status) & 0xff); 2383 } 2384 2385 /* 2386 * Adapter-independent backend for TX_DONE firmware notifications. 2387 */ 2388 static void 2389 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt, 2390 uint8_t status) 2391 { 2392 struct ieee80211com *ic = &sc->sc_ic; 2393 struct ifnet *ifp = ic->ic_ifp; 2394 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf]; 2395 struct iwn_tx_data *data = &ring->data[desc->idx]; 2396 struct iwn_node *wn = (struct iwn_node *)data->ni; 2397 int s; 2398 2399 s = splnet(); 2400 2401 /* Update rate control statistics. */ 2402 wn->amn.amn_txcnt++; 2403 if (ackfailcnt > 0) 2404 wn->amn.amn_retrycnt++; 2405 2406 if (status != 1 && status != 2) 2407 if_statinc(ifp, if_oerrors); 2408 else 2409 if_statinc(ifp, if_opackets); 2410 2411 /* Unmap and free mbuf. */ 2412 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize, 2413 BUS_DMASYNC_POSTWRITE); 2414 bus_dmamap_unload(sc->sc_dmat, data->map); 2415 m_freem(data->m); 2416 data->m = NULL; 2417 ieee80211_free_node(data->ni); 2418 data->ni = NULL; 2419 2420 sc->sc_tx_timer = 0; 2421 if (--ring->queued < IWN_TX_RING_LOMARK) { 2422 sc->qfullmsk &= ~(1 << ring->qid); 2423 if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) { 2424 ifp->if_flags &= ~IFF_OACTIVE; 2425 iwn_start(ifp); 2426 } 2427 } 2428 2429 splx(s); 2430 } 2431 2432 /* 2433 * Process a "command done" firmware notification. This is where we wakeup 2434 * processes waiting for a synchronous command completion. 2435 */ 2436 static void 2437 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc) 2438 { 2439 struct iwn_tx_ring *ring = &sc->txq[4]; 2440 struct iwn_tx_data *data; 2441 2442 if ((desc->qid & 0xf) != 4) 2443 return; /* Not a command ack. */ 2444 2445 data = &ring->data[desc->idx]; 2446 2447 /* If the command was mapped in an mbuf, free it. */ 2448 if (data->m != NULL) { 2449 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 2450 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2451 bus_dmamap_unload(sc->sc_dmat, data->map); 2452 m_freem(data->m); 2453 data->m = NULL; 2454 } 2455 wakeup(&ring->desc[desc->idx]); 2456 } 2457 2458 /* 2459 * Process an INT_FH_RX or INT_SW_RX interrupt. 2460 */ 2461 static void 2462 iwn_notif_intr(struct iwn_softc *sc) 2463 { 2464 struct iwn_ops *ops = &sc->ops; 2465 struct ieee80211com *ic = &sc->sc_ic; 2466 struct ifnet *ifp = ic->ic_ifp; 2467 uint16_t hw; 2468 int s; 2469 2470 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map, 2471 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD); 2472 2473 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff; 2474 while (sc->rxq.cur != hw) { 2475 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 2476 struct iwn_rx_desc *desc; 2477 2478 bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof (*desc), 2479 BUS_DMASYNC_POSTREAD); 2480 desc = mtod(data->m, struct iwn_rx_desc *); 2481 2482 DPRINTFN(4, ("notification qid=%d idx=%d flags=%x type=%d\n", 2483 desc->qid & 0xf, desc->idx, desc->flags, desc->type)); 2484 2485 if (!(desc->qid & 0x80)) /* Reply to a command. */ 2486 iwn_cmd_done(sc, desc); 2487 2488 switch (desc->type) { 2489 case IWN_RX_PHY: 2490 iwn_rx_phy(sc, desc, data); 2491 break; 2492 2493 case IWN_RX_DONE: /* 4965AGN only. */ 2494 case IWN_MPDU_RX_DONE: 2495 /* An 802.11 frame has been received. */ 2496 iwn_rx_done(sc, desc, data); 2497 break; 2498 #ifndef IEEE80211_NO_HT 2499 case IWN_RX_COMPRESSED_BA: 2500 /* A Compressed BlockAck has been received. */ 2501 iwn_rx_compressed_ba(sc, desc, data); 2502 break; 2503 #endif 2504 case IWN_TX_DONE: 2505 /* An 802.11 frame has been transmitted. */ 2506 ops->tx_done(sc, desc, data); 2507 break; 2508 2509 case IWN_RX_STATISTICS: 2510 case IWN_BEACON_STATISTICS: 2511 iwn_rx_statistics(sc, desc, data); 2512 break; 2513 2514 case IWN_BEACON_MISSED: 2515 { 2516 struct iwn_beacon_missed *miss = 2517 (struct iwn_beacon_missed *)(desc + 1); 2518 2519 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2520 sizeof (*miss), BUS_DMASYNC_POSTREAD); 2521 /* 2522 * If more than 5 consecutive beacons are missed, 2523 * reinitialize the sensitivity state machine. 2524 */ 2525 DPRINTF(("beacons missed %d/%d\n", 2526 le32toh(miss->consecutive), le32toh(miss->total))); 2527 if (ic->ic_state == IEEE80211_S_RUN && 2528 le32toh(miss->consecutive) > 5) 2529 (void)iwn_init_sensitivity(sc); 2530 break; 2531 } 2532 case IWN_UC_READY: 2533 { 2534 struct iwn_ucode_info *uc = 2535 (struct iwn_ucode_info *)(desc + 1); 2536 2537 /* The microcontroller is ready. */ 2538 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2539 sizeof (*uc), BUS_DMASYNC_POSTREAD); 2540 DPRINTF(("microcode alive notification version=%d.%d " 2541 "subtype=%x alive=%x\n", uc->major, uc->minor, 2542 uc->subtype, le32toh(uc->valid))); 2543 2544 if (le32toh(uc->valid) != 1) { 2545 aprint_error_dev(sc->sc_dev, 2546 "microcontroller initialization " 2547 "failed\n"); 2548 break; 2549 } 2550 if (uc->subtype == IWN_UCODE_INIT) { 2551 /* Save microcontroller report. */ 2552 memcpy(&sc->ucode_info, uc, sizeof (*uc)); 2553 } 2554 /* Save the address of the error log in SRAM. */ 2555 sc->errptr = le32toh(uc->errptr); 2556 break; 2557 } 2558 case IWN_STATE_CHANGED: 2559 { 2560 uint32_t *status = (uint32_t *)(desc + 1); 2561 2562 /* Enabled/disabled notification. */ 2563 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2564 sizeof (*status), BUS_DMASYNC_POSTREAD); 2565 DPRINTF(("state changed to %x\n", le32toh(*status))); 2566 2567 if (le32toh(*status) & 1) { 2568 /* The radio button has to be pushed. */ 2569 aprint_error_dev(sc->sc_dev, 2570 "Radio transmitter is off\n"); 2571 /* Turn the interface down. */ 2572 s = splnet(); 2573 ifp->if_flags &= ~IFF_UP; 2574 iwn_stop(ifp, 1); 2575 splx(s); 2576 return; /* No further processing. */ 2577 } 2578 break; 2579 } 2580 case IWN_START_SCAN: 2581 { 2582 struct iwn_start_scan *scan = 2583 (struct iwn_start_scan *)(desc + 1); 2584 2585 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2586 sizeof (*scan), BUS_DMASYNC_POSTREAD); 2587 DPRINTFN(2, ("scanning channel %d status %x\n", 2588 scan->chan, le32toh(scan->status))); 2589 2590 /* Fix current channel. */ 2591 ic->ic_bss->ni_chan = &ic->ic_channels[scan->chan]; 2592 break; 2593 } 2594 case IWN_STOP_SCAN: 2595 { 2596 struct iwn_stop_scan *scan = 2597 (struct iwn_stop_scan *)(desc + 1); 2598 2599 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2600 sizeof (*scan), BUS_DMASYNC_POSTREAD); 2601 DPRINTF(("scan finished nchan=%d status=%d chan=%d\n", 2602 scan->nchan, scan->status, scan->chan)); 2603 2604 if (scan->status == 1 && scan->chan <= 14 && 2605 (sc->sc_flags & IWN_FLAG_HAS_5GHZ)) { 2606 /* 2607 * We just finished scanning 2GHz channels, 2608 * start scanning 5GHz ones. 2609 */ 2610 sc->sc_flags &= ~IWN_FLAG_SCANNING_2GHZ; 2611 sc->sc_flags |= IWN_FLAG_SCANNING_5GHZ; 2612 if (iwn_scan(sc, IEEE80211_CHAN_5GHZ) == 0) 2613 break; 2614 } 2615 sc->sc_flags &= ~IWN_FLAG_SCANNING; 2616 ieee80211_end_scan(ic); 2617 break; 2618 } 2619 case IWN5000_CALIBRATION_RESULT: 2620 iwn5000_rx_calib_results(sc, desc, data); 2621 break; 2622 2623 case IWN5000_CALIBRATION_DONE: 2624 sc->sc_flags |= IWN_FLAG_CALIB_DONE; 2625 wakeup(sc); 2626 break; 2627 } 2628 2629 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT; 2630 } 2631 2632 /* Tell the firmware what we have processed. */ 2633 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1; 2634 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7); 2635 } 2636 2637 /* 2638 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 2639 * from power-down sleep mode. 2640 */ 2641 static void 2642 iwn_wakeup_intr(struct iwn_softc *sc) 2643 { 2644 int qid; 2645 2646 DPRINTF(("ucode wakeup from power-down sleep\n")); 2647 2648 /* Wakeup RX and TX rings. */ 2649 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7); 2650 for (qid = 0; qid < sc->ntxqs; qid++) { 2651 struct iwn_tx_ring *ring = &sc->txq[qid]; 2652 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur); 2653 } 2654 } 2655 2656 /* 2657 * Dump the error log of the firmware when a firmware panic occurs. Although 2658 * we can't debug the firmware because it is neither open source nor free, it 2659 * can help us to identify certain classes of problems. 2660 */ 2661 static void 2662 iwn_fatal_intr(struct iwn_softc *sc) 2663 { 2664 struct iwn_fw_dump dump; 2665 int i; 2666 2667 /* Force a complete recalibration on next init. */ 2668 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE; 2669 2670 /* Check that the error log address is valid. */ 2671 if (sc->errptr < IWN_FW_DATA_BASE || 2672 sc->errptr + sizeof (dump) > 2673 IWN_FW_DATA_BASE + sc->fw_data_maxsz) { 2674 aprint_error_dev(sc->sc_dev, 2675 "bad firmware error log address 0x%08x\n", sc->errptr); 2676 return; 2677 } 2678 if (iwn_nic_lock(sc) != 0) { 2679 aprint_error_dev(sc->sc_dev, 2680 "could not read firmware error log\n"); 2681 return; 2682 } 2683 /* Read firmware error log from SRAM. */ 2684 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump, 2685 sizeof (dump) / sizeof (uint32_t)); 2686 iwn_nic_unlock(sc); 2687 2688 if (dump.valid == 0) { 2689 aprint_error_dev(sc->sc_dev, 2690 "firmware error log is empty\n"); 2691 return; 2692 } 2693 aprint_error("firmware error log:\n"); 2694 aprint_error(" error type = \"%s\" (0x%08X)\n", 2695 (dump.id < __arraycount(iwn_fw_errmsg)) ? 2696 iwn_fw_errmsg[dump.id] : "UNKNOWN", 2697 dump.id); 2698 aprint_error(" program counter = 0x%08X\n", dump.pc); 2699 aprint_error(" source line = 0x%08X\n", dump.src_line); 2700 aprint_error(" error data = 0x%08X%08X\n", 2701 dump.error_data[0], dump.error_data[1]); 2702 aprint_error(" branch link = 0x%08X%08X\n", 2703 dump.branch_link[0], dump.branch_link[1]); 2704 aprint_error(" interrupt link = 0x%08X%08X\n", 2705 dump.interrupt_link[0], dump.interrupt_link[1]); 2706 aprint_error(" time = %u\n", dump.time[0]); 2707 2708 /* Dump driver status (TX and RX rings) while we're here. */ 2709 aprint_error("driver status:\n"); 2710 for (i = 0; i < sc->ntxqs; i++) { 2711 struct iwn_tx_ring *ring = &sc->txq[i]; 2712 aprint_error(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 2713 i, ring->qid, ring->cur, ring->queued); 2714 } 2715 aprint_error(" rx ring: cur=%d\n", sc->rxq.cur); 2716 aprint_error(" 802.11 state %d\n", sc->sc_ic.ic_state); 2717 } 2718 2719 static int 2720 iwn_intr(void *arg) 2721 { 2722 struct iwn_softc *sc = arg; 2723 2724 /* Disable interrupts. */ 2725 IWN_WRITE(sc, IWN_INT_MASK, 0); 2726 2727 softint_schedule(sc->sc_soft_ih); 2728 return 1; 2729 } 2730 2731 static void 2732 iwn_softintr(void *arg) 2733 { 2734 struct iwn_softc *sc = arg; 2735 struct ifnet *ifp = sc->sc_ic.ic_ifp; 2736 uint32_t r1, r2, tmp; 2737 int s; 2738 2739 /* Read interrupts from ICT (fast) or from registers (slow). */ 2740 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 2741 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map, 0, 2742 IWN_ICT_SIZE, BUS_DMASYNC_POSTREAD); 2743 tmp = 0; 2744 while (sc->ict[sc->ict_cur] != 0) { 2745 tmp |= sc->ict[sc->ict_cur]; 2746 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */ 2747 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT; 2748 } 2749 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map, 0, 2750 IWN_ICT_SIZE, BUS_DMASYNC_PREWRITE); 2751 tmp = le32toh(tmp); 2752 if (tmp == 0xffffffff) /* Shouldn't happen. */ 2753 tmp = 0; 2754 else if (tmp & 0xc0000) /* Workaround a HW bug. */ 2755 tmp |= 0x8000; 2756 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff); 2757 r2 = 0; /* Unused. */ 2758 } else { 2759 r1 = IWN_READ(sc, IWN_INT); 2760 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 2761 return; /* Hardware gone! */ 2762 r2 = IWN_READ(sc, IWN_FH_INT); 2763 } 2764 if (r1 == 0 && r2 == 0) { 2765 goto out; /* Interrupt not for us. */ 2766 } 2767 2768 /* Acknowledge interrupts. */ 2769 IWN_WRITE(sc, IWN_INT, r1); 2770 if (!(sc->sc_flags & IWN_FLAG_USE_ICT)) 2771 IWN_WRITE(sc, IWN_FH_INT, r2); 2772 2773 if (r1 & IWN_INT_RF_TOGGLED) { 2774 tmp = IWN_READ(sc, IWN_GP_CNTRL); 2775 aprint_error_dev(sc->sc_dev, 2776 "RF switch: radio %s\n", 2777 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled"); 2778 } 2779 if (r1 & IWN_INT_CT_REACHED) { 2780 aprint_error_dev(sc->sc_dev, 2781 "critical temperature reached!\n"); 2782 } 2783 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) { 2784 aprint_error_dev(sc->sc_dev, 2785 "fatal firmware error\n"); 2786 /* Dump firmware error log and stop. */ 2787 iwn_fatal_intr(sc); 2788 s = splnet(); 2789 ifp->if_flags &= ~IFF_UP; 2790 iwn_stop(ifp, 1); 2791 splx(s); 2792 return; 2793 } 2794 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) || 2795 (r2 & IWN_FH_INT_RX)) { 2796 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 2797 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) 2798 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX); 2799 IWN_WRITE_1(sc, IWN_INT_PERIODIC, IWN_INT_PERIODIC_DIS); 2800 iwn_notif_intr(sc); 2801 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) { 2802 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 2803 IWN_INT_PERIODIC_ENA); 2804 } 2805 } else 2806 iwn_notif_intr(sc); 2807 } 2808 2809 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) { 2810 if (sc->sc_flags & IWN_FLAG_USE_ICT) 2811 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX); 2812 wakeup(sc); /* FH DMA transfer completed. */ 2813 } 2814 2815 if (r1 & IWN_INT_ALIVE) 2816 wakeup(sc); /* Firmware is alive. */ 2817 2818 if (r1 & IWN_INT_WAKEUP) 2819 iwn_wakeup_intr(sc); 2820 2821 out: 2822 /* Re-enable interrupts. */ 2823 if (ifp->if_flags & IFF_UP) 2824 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 2825 } 2826 2827 /* 2828 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and 2829 * 5000 adapters use a slightly different format). 2830 */ 2831 static void 2832 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 2833 uint16_t len) 2834 { 2835 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx]; 2836 2837 *w = htole16(len + 8); 2838 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2839 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr, 2840 sizeof (uint16_t), 2841 BUS_DMASYNC_PREWRITE); 2842 if (idx < IWN_SCHED_WINSZ) { 2843 *(w + IWN_TX_RING_COUNT) = *w; 2844 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2845 (char *)(void *)(w + IWN_TX_RING_COUNT) - 2846 (char *)(void *)sc->sched_dma.vaddr, 2847 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 2848 } 2849 } 2850 2851 static void 2852 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 2853 uint16_t len) 2854 { 2855 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 2856 2857 *w = htole16(id << 12 | (len + 8)); 2858 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2859 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr, 2860 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 2861 if (idx < IWN_SCHED_WINSZ) { 2862 *(w + IWN_TX_RING_COUNT) = *w; 2863 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2864 (char *)(void *)(w + IWN_TX_RING_COUNT) - 2865 (char *)(void *)sc->sched_dma.vaddr, 2866 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 2867 } 2868 } 2869 2870 #ifdef notyet 2871 static void 2872 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx) 2873 { 2874 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 2875 2876 *w = (*w & htole16(0xf000)) | htole16(1); 2877 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2878 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr, 2879 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 2880 if (idx < IWN_SCHED_WINSZ) { 2881 *(w + IWN_TX_RING_COUNT) = *w; 2882 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2883 (char *)(void *)(w + IWN_TX_RING_COUNT) - 2884 (char *)(void *)sc->sched_dma.vaddr, 2885 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 2886 } 2887 } 2888 #endif 2889 2890 static int 2891 iwn_tx(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac) 2892 { 2893 struct ieee80211com *ic = &sc->sc_ic; 2894 struct iwn_node *wn = (void *)ni; 2895 struct iwn_tx_ring *ring; 2896 struct iwn_tx_desc *desc; 2897 struct iwn_tx_data *data; 2898 struct iwn_tx_cmd *cmd; 2899 struct iwn_cmd_data *tx; 2900 const struct iwn_rate *rinfo; 2901 struct ieee80211_frame *wh; 2902 struct ieee80211_key *k = NULL; 2903 struct mbuf *m1; 2904 uint32_t flags; 2905 u_int hdrlen; 2906 bus_dma_segment_t *seg; 2907 uint8_t tid, ridx, txant, type; 2908 int i, totlen, error, pad; 2909 2910 const struct chanAccParams *cap; 2911 int noack; 2912 int hdrlen2; 2913 2914 wh = mtod(m, struct ieee80211_frame *); 2915 hdrlen = ieee80211_anyhdrsize(wh); 2916 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2917 2918 hdrlen2 = (ieee80211_has_qos(wh)) ? 2919 sizeof (struct ieee80211_qosframe) : 2920 sizeof (struct ieee80211_frame); 2921 2922 if (hdrlen != hdrlen2) 2923 aprint_error_dev(sc->sc_dev, "hdrlen error (%d != %d)\n", 2924 hdrlen, hdrlen2); 2925 2926 /* XXX OpenBSD sets a different tid when using QOS */ 2927 tid = 0; 2928 if (ieee80211_has_qos(wh)) { 2929 cap = &ic->ic_wme.wme_chanParams; 2930 noack = cap->cap_wmeParams[ac].wmep_noackPolicy; 2931 } 2932 else 2933 noack = 0; 2934 2935 ring = &sc->txq[ac]; 2936 desc = &ring->desc[ring->cur]; 2937 data = &ring->data[ring->cur]; 2938 2939 /* Choose a TX rate index. */ 2940 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 2941 type != IEEE80211_FC0_TYPE_DATA) { 2942 ridx = (ic->ic_curmode == IEEE80211_MODE_11A) ? 2943 IWN_RIDX_OFDM6 : IWN_RIDX_CCK1; 2944 } else if (ic->ic_fixed_rate != -1) { 2945 ridx = sc->fixed_ridx; 2946 } else 2947 ridx = wn->ridx[ni->ni_txrate]; 2948 rinfo = &iwn_rates[ridx]; 2949 2950 /* Encrypt the frame if need be. */ 2951 /* 2952 * XXX For now, NetBSD swaps the encryption and bpf sections 2953 * in order to match old code and other drivers. Tests with 2954 * tcpdump indicates that the order is irrelevant, however, 2955 * as bpf produces unencrypted data for both ordering choices. 2956 */ 2957 if (wh->i_fc[1] & IEEE80211_FC1_WEP) { 2958 k = ieee80211_crypto_encap(ic, ni, m); 2959 if (k == NULL) { 2960 m_freem(m); 2961 return ENOBUFS; 2962 } 2963 /* Packet header may have moved, reset our local pointer. */ 2964 wh = mtod(m, struct ieee80211_frame *); 2965 } 2966 totlen = m->m_pkthdr.len; 2967 2968 if (sc->sc_drvbpf != NULL) { 2969 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 2970 2971 tap->wt_flags = 0; 2972 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq); 2973 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags); 2974 tap->wt_rate = rinfo->rate; 2975 tap->wt_hwqueue = ac; 2976 if (wh->i_fc[1] & IEEE80211_FC1_WEP) 2977 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2978 2979 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m, BPF_D_OUT); 2980 } 2981 2982 /* Prepare TX firmware command. */ 2983 cmd = &ring->cmd[ring->cur]; 2984 cmd->code = IWN_CMD_TX_DATA; 2985 cmd->flags = 0; 2986 cmd->qid = ring->qid; 2987 cmd->idx = ring->cur; 2988 2989 tx = (struct iwn_cmd_data *)cmd->data; 2990 /* NB: No need to clear tx, all fields are reinitialized here. */ 2991 tx->scratch = 0; /* clear "scratch" area */ 2992 2993 flags = 0; 2994 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2995 /* Unicast frame, check if an ACK is expected. */ 2996 if (!noack) 2997 flags |= IWN_TX_NEED_ACK; 2998 } 2999 3000 #ifdef notyet 3001 /* XXX NetBSD does not define IEEE80211_FC0_SUBTYPE_BAR */ 3002 if ((wh->i_fc[0] & 3003 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == 3004 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR)) 3005 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */ 3006 #endif 3007 3008 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 3009 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */ 3010 3011 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 3012 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3013 /* NB: Group frames are sent using CCK in 802.11b/g. */ 3014 if (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold) { 3015 flags |= IWN_TX_NEED_RTS; 3016 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 3017 ridx >= IWN_RIDX_OFDM6) { 3018 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 3019 flags |= IWN_TX_NEED_CTS; 3020 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 3021 flags |= IWN_TX_NEED_RTS; 3022 } 3023 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) { 3024 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 3025 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 3026 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS); 3027 flags |= IWN_TX_NEED_PROTECTION; 3028 } else 3029 flags |= IWN_TX_FULL_TXOP; 3030 } 3031 } 3032 3033 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 3034 type != IEEE80211_FC0_TYPE_DATA) 3035 tx->id = sc->broadcast_id; 3036 else 3037 tx->id = wn->id; 3038 3039 if (type == IEEE80211_FC0_TYPE_MGT) { 3040 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3041 3042 #ifndef IEEE80211_STA_ONLY 3043 /* Tell HW to set timestamp in probe responses. */ 3044 /* XXX NetBSD rev 1.11 added probe requests here but */ 3045 /* probe requests do not take timestamps (from Bergamini). */ 3046 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 3047 flags |= IWN_TX_INSERT_TSTAMP; 3048 #endif 3049 /* XXX NetBSD rev 1.11 and 1.20 added AUTH/DAUTH and RTS/CTS */ 3050 /* changes here. These are not needed (from Bergamini). */ 3051 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 3052 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 3053 tx->timeout = htole16(3); 3054 else 3055 tx->timeout = htole16(2); 3056 } else 3057 tx->timeout = htole16(0); 3058 3059 if (hdrlen & 3) { 3060 /* First segment length must be a multiple of 4. */ 3061 flags |= IWN_TX_NEED_PADDING; 3062 pad = 4 - (hdrlen & 3); 3063 } else 3064 pad = 0; 3065 3066 tx->len = htole16(totlen); 3067 tx->tid = tid; 3068 tx->rts_ntries = 60; 3069 tx->data_ntries = 15; 3070 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 3071 tx->plcp = rinfo->plcp; 3072 tx->rflags = rinfo->flags; 3073 if (tx->id == sc->broadcast_id) { 3074 /* Group or management frame. */ 3075 tx->linkq = 0; 3076 /* XXX Alternate between antenna A and B? */ 3077 txant = IWN_LSB(sc->txchainmask); 3078 tx->rflags |= IWN_RFLAG_ANT(txant); 3079 } else { 3080 tx->linkq = ni->ni_rates.rs_nrates - ni->ni_txrate - 1; 3081 flags |= IWN_TX_LINKQ; /* enable MRR */ 3082 } 3083 /* Set physical address of "scratch area". */ 3084 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 3085 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 3086 3087 /* Copy 802.11 header in TX command. */ 3088 /* XXX NetBSD changed this in rev 1.20 */ 3089 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen); 3090 3091 /* Trim 802.11 header. */ 3092 m_adj(m, hdrlen); 3093 tx->security = 0; 3094 tx->flags = htole32(flags); 3095 3096 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m, 3097 BUS_DMA_NOWAIT | BUS_DMA_WRITE); 3098 if (error != 0) { 3099 if (error != EFBIG) { 3100 aprint_error_dev(sc->sc_dev, 3101 "can't map mbuf (error %d)\n", error); 3102 m_freem(m); 3103 return error; 3104 } 3105 /* Too many DMA segments, linearize mbuf. */ 3106 MGETHDR(m1, M_DONTWAIT, MT_DATA); 3107 if (m1 == NULL) { 3108 m_freem(m); 3109 return ENOBUFS; 3110 } 3111 if (m->m_pkthdr.len > MHLEN) { 3112 MCLGET(m1, M_DONTWAIT); 3113 if (!(m1->m_flags & M_EXT)) { 3114 m_freem(m); 3115 m_freem(m1); 3116 return ENOBUFS; 3117 } 3118 } 3119 m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *)); 3120 m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len; 3121 m_freem(m); 3122 m = m1; 3123 3124 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m, 3125 BUS_DMA_NOWAIT | BUS_DMA_WRITE); 3126 if (error != 0) { 3127 aprint_error_dev(sc->sc_dev, 3128 "can't map mbuf (error %d)\n", error); 3129 m_freem(m); 3130 return error; 3131 } 3132 } 3133 3134 data->m = m; 3135 data->ni = ni; 3136 3137 DPRINTFN(4, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n", 3138 ring->qid, ring->cur, m->m_pkthdr.len, data->map->dm_nsegs)); 3139 3140 /* Fill TX descriptor. */ 3141 desc->nsegs = 1 + data->map->dm_nsegs; 3142 /* First DMA segment is used by the TX command. */ 3143 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 3144 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 3145 (4 + sizeof (*tx) + hdrlen + pad) << 4); 3146 /* Other DMA segments are for data payload. */ 3147 seg = data->map->dm_segs; 3148 for (i = 1; i <= data->map->dm_nsegs; i++) { 3149 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); 3150 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | 3151 seg->ds_len << 4); 3152 seg++; 3153 } 3154 3155 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize, 3156 BUS_DMASYNC_PREWRITE); 3157 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map, 3158 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr, 3159 sizeof (*cmd), BUS_DMASYNC_PREWRITE); 3160 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 3161 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr, 3162 sizeof (*desc), BUS_DMASYNC_PREWRITE); 3163 3164 #ifdef notyet 3165 /* Update TX scheduler. */ 3166 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 3167 #endif 3168 3169 /* Kick TX ring. */ 3170 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3171 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3172 3173 /* Mark TX ring as full if we reach a certain threshold. */ 3174 if (++ring->queued > IWN_TX_RING_HIMARK) 3175 sc->qfullmsk |= 1 << ring->qid; 3176 3177 return 0; 3178 } 3179 3180 static void 3181 iwn_start(struct ifnet *ifp) 3182 { 3183 struct iwn_softc *sc = ifp->if_softc; 3184 struct ieee80211com *ic = &sc->sc_ic; 3185 struct ieee80211_node *ni; 3186 struct ether_header *eh; 3187 struct mbuf *m; 3188 int ac; 3189 3190 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 3191 return; 3192 3193 for (;;) { 3194 if (sc->sc_beacon_wait == 1) { 3195 ifp->if_flags |= IFF_OACTIVE; 3196 break; 3197 } 3198 3199 if (sc->qfullmsk != 0) { 3200 ifp->if_flags |= IFF_OACTIVE; 3201 break; 3202 } 3203 /* Send pending management frames first. */ 3204 IF_DEQUEUE(&ic->ic_mgtq, m); 3205 if (m != NULL) { 3206 ni = M_GETCTX(m, struct ieee80211_node *); 3207 ac = 0; 3208 goto sendit; 3209 } 3210 if (ic->ic_state != IEEE80211_S_RUN) 3211 break; 3212 3213 /* Encapsulate and send data frames. */ 3214 IFQ_DEQUEUE(&ifp->if_snd, m); 3215 if (m == NULL) 3216 break; 3217 if (m->m_len < sizeof (*eh) && 3218 (m = m_pullup(m, sizeof (*eh))) == NULL) { 3219 if_statinc(ifp, if_oerrors); 3220 continue; 3221 } 3222 eh = mtod(m, struct ether_header *); 3223 ni = ieee80211_find_txnode(ic, eh->ether_dhost); 3224 if (ni == NULL) { 3225 m_freem(m); 3226 if_statinc(ifp, if_oerrors); 3227 continue; 3228 } 3229 /* classify mbuf so we can find which tx ring to use */ 3230 if (ieee80211_classify(ic, m, ni) != 0) { 3231 m_freem(m); 3232 ieee80211_free_node(ni); 3233 if_statinc(ifp, if_oerrors); 3234 continue; 3235 } 3236 3237 /* No QoS encapsulation for EAPOL frames. */ 3238 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ? 3239 M_WME_GETAC(m) : WME_AC_BE; 3240 3241 if (sc->sc_beacon_wait == 0) 3242 bpf_mtap(ifp, m, BPF_D_OUT); 3243 3244 if ((m = ieee80211_encap(ic, m, ni)) == NULL) { 3245 ieee80211_free_node(ni); 3246 if_statinc(ifp, if_oerrors); 3247 continue; 3248 } 3249 sendit: 3250 if (sc->sc_beacon_wait) 3251 continue; 3252 3253 bpf_mtap3(ic->ic_rawbpf, m, BPF_D_OUT); 3254 3255 if (iwn_tx(sc, m, ni, ac) != 0) { 3256 ieee80211_free_node(ni); 3257 if_statinc(ifp, if_oerrors); 3258 continue; 3259 } 3260 3261 sc->sc_tx_timer = 5; 3262 ifp->if_timer = 1; 3263 } 3264 3265 if (sc->sc_beacon_wait > 1) 3266 sc->sc_beacon_wait = 0; 3267 } 3268 3269 static void 3270 iwn_watchdog(struct ifnet *ifp) 3271 { 3272 struct iwn_softc *sc = ifp->if_softc; 3273 3274 ifp->if_timer = 0; 3275 3276 if (sc->sc_tx_timer > 0) { 3277 if (--sc->sc_tx_timer == 0) { 3278 aprint_error_dev(sc->sc_dev, 3279 "device timeout\n"); 3280 ifp->if_flags &= ~IFF_UP; 3281 iwn_stop(ifp, 1); 3282 if_statinc(ifp, if_oerrors); 3283 return; 3284 } 3285 ifp->if_timer = 1; 3286 } 3287 3288 ieee80211_watchdog(&sc->sc_ic); 3289 } 3290 3291 static int 3292 iwn_ioctl(struct ifnet *ifp, u_long cmd, void *data) 3293 { 3294 struct iwn_softc *sc = ifp->if_softc; 3295 struct ieee80211com *ic = &sc->sc_ic; 3296 const struct sockaddr *sa; 3297 int s, error = 0; 3298 3299 s = splnet(); 3300 3301 switch (cmd) { 3302 case SIOCSIFADDR: 3303 ifp->if_flags |= IFF_UP; 3304 /* FALLTHROUGH */ 3305 case SIOCSIFFLAGS: 3306 /* XXX Added as it is in every NetBSD driver */ 3307 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 3308 break; 3309 if (ifp->if_flags & IFF_UP) { 3310 if (!(ifp->if_flags & IFF_RUNNING)) 3311 error = iwn_init(ifp); 3312 } else { 3313 if (ifp->if_flags & IFF_RUNNING) 3314 iwn_stop(ifp, 1); 3315 } 3316 break; 3317 3318 case SIOCADDMULTI: 3319 case SIOCDELMULTI: 3320 sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data); 3321 error = (cmd == SIOCADDMULTI) ? 3322 ether_addmulti(sa, &sc->sc_ec) : 3323 ether_delmulti(sa, &sc->sc_ec); 3324 3325 if (error == ENETRESET) 3326 error = 0; 3327 break; 3328 3329 default: 3330 error = ieee80211_ioctl(ic, cmd, data); 3331 } 3332 3333 if (error == ENETRESET) { 3334 error = 0; 3335 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 3336 (IFF_UP | IFF_RUNNING)) { 3337 iwn_stop(ifp, 0); 3338 error = iwn_init(ifp); 3339 } 3340 } 3341 3342 splx(s); 3343 return error; 3344 } 3345 3346 /* 3347 * Send a command to the firmware. 3348 */ 3349 static int 3350 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async) 3351 { 3352 struct iwn_tx_ring *ring = &sc->txq[4]; 3353 struct iwn_tx_desc *desc; 3354 struct iwn_tx_data *data; 3355 struct iwn_tx_cmd *cmd; 3356 struct mbuf *m; 3357 bus_addr_t paddr; 3358 int totlen, error; 3359 3360 desc = &ring->desc[ring->cur]; 3361 data = &ring->data[ring->cur]; 3362 totlen = 4 + size; 3363 3364 if (size > sizeof cmd->data) { 3365 /* Command is too large to fit in a descriptor. */ 3366 if (totlen > MCLBYTES) 3367 return EINVAL; 3368 MGETHDR(m, M_DONTWAIT, MT_DATA); 3369 if (m == NULL) 3370 return ENOMEM; 3371 if (totlen > MHLEN) { 3372 MCLGET(m, M_DONTWAIT); 3373 if (!(m->m_flags & M_EXT)) { 3374 m_freem(m); 3375 return ENOMEM; 3376 } 3377 } 3378 cmd = mtod(m, struct iwn_tx_cmd *); 3379 error = bus_dmamap_load(sc->sc_dmat, data->map, cmd, totlen, 3380 NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE); 3381 if (error != 0) { 3382 m_freem(m); 3383 return error; 3384 } 3385 data->m = m; 3386 paddr = data->map->dm_segs[0].ds_addr; 3387 } else { 3388 cmd = &ring->cmd[ring->cur]; 3389 paddr = data->cmd_paddr; 3390 } 3391 3392 cmd->code = code; 3393 cmd->flags = 0; 3394 cmd->qid = ring->qid; 3395 cmd->idx = ring->cur; 3396 /* 3397 * Coverity:[OUT_OF_BOUNDS] 3398 * false positive since, allocated in mbuf if it does not fit 3399 */ 3400 memcpy(cmd->data, buf, size); 3401 3402 desc->nsegs = 1; 3403 desc->segs[0].addr = htole32(IWN_LOADDR(paddr)); 3404 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4); 3405 3406 if (size > sizeof cmd->data) { 3407 bus_dmamap_sync(sc->sc_dmat, data->map, 0, totlen, 3408 BUS_DMASYNC_PREWRITE); 3409 } else { 3410 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map, 3411 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr, 3412 totlen, BUS_DMASYNC_PREWRITE); 3413 } 3414 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 3415 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr, 3416 sizeof (*desc), BUS_DMASYNC_PREWRITE); 3417 3418 #ifdef notyet 3419 /* Update TX scheduler. */ 3420 ops->update_sched(sc, ring->qid, ring->cur, 0, 0); 3421 #endif 3422 DPRINTFN(4, ("iwn_cmd %d size=%d %s\n", code, size, async ? " (async)" : "")); 3423 3424 /* Kick command ring. */ 3425 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3426 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3427 3428 return async ? 0 : tsleep(desc, PCATCH, "iwncmd", hz); 3429 } 3430 3431 static int 3432 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 3433 { 3434 struct iwn4965_node_info hnode; 3435 char *src, *dst; 3436 3437 /* 3438 * We use the node structure for 5000 Series internally (it is 3439 * a superset of the one for 4965AGN). We thus copy the common 3440 * fields before sending the command. 3441 */ 3442 src = (char *)node; 3443 dst = (char *)&hnode; 3444 memcpy(dst, src, 48); 3445 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */ 3446 memcpy(dst + 48, src + 72, 20); 3447 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async); 3448 } 3449 3450 static int 3451 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 3452 { 3453 /* Direct mapping. */ 3454 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async); 3455 } 3456 3457 static int 3458 iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni) 3459 { 3460 struct iwn_node *wn = (void *)ni; 3461 struct ieee80211_rateset *rs = &ni->ni_rates; 3462 struct iwn_cmd_link_quality linkq; 3463 const struct iwn_rate *rinfo; 3464 uint8_t txant; 3465 int i, txrate; 3466 3467 /* Use the first valid TX antenna. */ 3468 txant = IWN_LSB(sc->txchainmask); 3469 3470 memset(&linkq, 0, sizeof linkq); 3471 linkq.id = wn->id; 3472 linkq.antmsk_1stream = txant; 3473 linkq.antmsk_2stream = IWN_ANT_AB; 3474 linkq.ampdu_max = 31; 3475 linkq.ampdu_threshold = 3; 3476 linkq.ampdu_limit = htole16(4000); /* 4ms */ 3477 3478 /* Start at highest available bit-rate. */ 3479 txrate = rs->rs_nrates - 1; 3480 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) { 3481 rinfo = &iwn_rates[wn->ridx[txrate]]; 3482 linkq.retry[i].plcp = rinfo->plcp; 3483 linkq.retry[i].rflags = rinfo->flags; 3484 linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant); 3485 /* Next retry at immediate lower bit-rate. */ 3486 if (txrate > 0) 3487 txrate--; 3488 } 3489 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1); 3490 } 3491 3492 /* 3493 * Broadcast node is used to send group-addressed and management frames. 3494 */ 3495 static int 3496 iwn_add_broadcast_node(struct iwn_softc *sc, int async) 3497 { 3498 struct iwn_ops *ops = &sc->ops; 3499 struct iwn_node_info node; 3500 struct iwn_cmd_link_quality linkq; 3501 const struct iwn_rate *rinfo; 3502 uint8_t txant; 3503 int i, error; 3504 3505 memset(&node, 0, sizeof node); 3506 IEEE80211_ADDR_COPY(node.macaddr, etherbroadcastaddr); 3507 node.id = sc->broadcast_id; 3508 DPRINTF(("adding broadcast node\n")); 3509 if ((error = ops->add_node(sc, &node, async)) != 0) 3510 return error; 3511 3512 /* Use the first valid TX antenna. */ 3513 txant = IWN_LSB(sc->txchainmask); 3514 3515 memset(&linkq, 0, sizeof linkq); 3516 linkq.id = sc->broadcast_id; 3517 linkq.antmsk_1stream = txant; 3518 linkq.antmsk_2stream = IWN_ANT_AB; 3519 linkq.ampdu_max = 64; 3520 linkq.ampdu_threshold = 3; 3521 linkq.ampdu_limit = htole16(4000); /* 4ms */ 3522 3523 /* Use lowest mandatory bit-rate. */ 3524 rinfo = (sc->sc_ic.ic_curmode != IEEE80211_MODE_11A) ? 3525 &iwn_rates[IWN_RIDX_CCK1] : &iwn_rates[IWN_RIDX_OFDM6]; 3526 linkq.retry[0].plcp = rinfo->plcp; 3527 linkq.retry[0].rflags = rinfo->flags; 3528 linkq.retry[0].rflags |= IWN_RFLAG_ANT(txant); 3529 /* Use same bit-rate for all TX retries. */ 3530 for (i = 1; i < IWN_MAX_TX_RETRIES; i++) { 3531 linkq.retry[i].plcp = linkq.retry[0].plcp; 3532 linkq.retry[i].rflags = linkq.retry[0].rflags; 3533 } 3534 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async); 3535 } 3536 3537 static void 3538 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on) 3539 { 3540 struct iwn_cmd_led led; 3541 3542 /* Clear microcode LED ownership. */ 3543 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL); 3544 3545 led.which = which; 3546 led.unit = htole32(10000); /* on/off in unit of 100ms */ 3547 led.off = off; 3548 led.on = on; 3549 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1); 3550 } 3551 3552 /* 3553 * Set the critical temperature at which the firmware will stop the radio 3554 * and notify us. 3555 */ 3556 static int 3557 iwn_set_critical_temp(struct iwn_softc *sc) 3558 { 3559 struct iwn_critical_temp crit; 3560 int32_t temp; 3561 3562 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF); 3563 3564 if (sc->hw_type == IWN_HW_REV_TYPE_5150) 3565 temp = (IWN_CTOK(110) - sc->temp_off) * -5; 3566 else if (sc->hw_type == IWN_HW_REV_TYPE_4965) 3567 temp = IWN_CTOK(110); 3568 else 3569 temp = 110; 3570 memset(&crit, 0, sizeof crit); 3571 crit.tempR = htole32(temp); 3572 DPRINTF(("setting critical temperature to %d\n", temp)); 3573 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0); 3574 } 3575 3576 static int 3577 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni) 3578 { 3579 struct iwn_cmd_timing cmd; 3580 uint64_t val, mod; 3581 3582 memset(&cmd, 0, sizeof cmd); 3583 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 3584 cmd.bintval = htole16(ni->ni_intval); 3585 cmd.lintval = htole16(10); 3586 3587 /* Compute remaining time until next beacon. */ 3588 val = (uint64_t)ni->ni_intval * 1024; /* msecs -> usecs */ 3589 mod = le64toh(cmd.tstamp) % val; 3590 cmd.binitval = htole32((uint32_t)(val - mod)); 3591 3592 DPRINTF(("timing bintval=%u, tstamp=%" PRIu64 ", init=%" PRIu32 "\n", 3593 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod))); 3594 3595 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1); 3596 } 3597 3598 static void 3599 iwn4965_power_calibration(struct iwn_softc *sc, int temp) 3600 { 3601 /* Adjust TX power if need be (delta >= 3 degC). */ 3602 DPRINTF(("temperature %d->%d\n", sc->temp, temp)); 3603 if (abs(temp - sc->temp) >= 3) { 3604 /* Record temperature of last calibration. */ 3605 sc->temp = temp; 3606 (void)iwn4965_set_txpower(sc, 1); 3607 } 3608 } 3609 3610 /* 3611 * Set TX power for current channel (each rate has its own power settings). 3612 * This function takes into account the regulatory information from EEPROM, 3613 * the current temperature and the current voltage. 3614 */ 3615 static int 3616 iwn4965_set_txpower(struct iwn_softc *sc, int async) 3617 { 3618 /* Fixed-point arithmetic division using a n-bit fractional part. */ 3619 #define fdivround(a, b, n) \ 3620 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 3621 /* Linear interpolation. */ 3622 #define interpolate(x, x1, y1, x2, y2, n) \ 3623 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 3624 3625 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 }; 3626 struct ieee80211com *ic = &sc->sc_ic; 3627 struct iwn_ucode_info *uc = &sc->ucode_info; 3628 struct ieee80211_channel *ch; 3629 struct iwn4965_cmd_txpower cmd; 3630 struct iwn4965_eeprom_chan_samples *chans; 3631 const uint8_t *rf_gain, *dsp_gain; 3632 int32_t vdiff, tdiff; 3633 int i, c, grp, maxpwr; 3634 uint8_t chan; 3635 3636 /* Retrieve current channel from last RXON. */ 3637 chan = sc->rxon.chan; 3638 DPRINTF(("setting TX power for channel %d\n", chan)); 3639 ch = &ic->ic_channels[chan]; 3640 3641 memset(&cmd, 0, sizeof cmd); 3642 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1; 3643 cmd.chan = chan; 3644 3645 if (IEEE80211_IS_CHAN_5GHZ(ch)) { 3646 maxpwr = sc->maxpwr5GHz; 3647 rf_gain = iwn4965_rf_gain_5ghz; 3648 dsp_gain = iwn4965_dsp_gain_5ghz; 3649 } else { 3650 maxpwr = sc->maxpwr2GHz; 3651 rf_gain = iwn4965_rf_gain_2ghz; 3652 dsp_gain = iwn4965_dsp_gain_2ghz; 3653 } 3654 3655 /* Compute voltage compensation. */ 3656 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7; 3657 if (vdiff > 0) 3658 vdiff *= 2; 3659 if (abs(vdiff) > 2) 3660 vdiff = 0; 3661 DPRINTF(("voltage compensation=%d (UCODE=%d, EEPROM=%d)\n", 3662 vdiff, le32toh(uc->volt), sc->eeprom_voltage)); 3663 3664 /* Get channel attenuation group. */ 3665 if (chan <= 20) /* 1-20 */ 3666 grp = 4; 3667 else if (chan <= 43) /* 34-43 */ 3668 grp = 0; 3669 else if (chan <= 70) /* 44-70 */ 3670 grp = 1; 3671 else if (chan <= 124) /* 71-124 */ 3672 grp = 2; 3673 else /* 125-200 */ 3674 grp = 3; 3675 DPRINTF(("chan %d, attenuation group=%d\n", chan, grp)); 3676 3677 /* Get channel sub-band. */ 3678 for (i = 0; i < IWN_NBANDS; i++) 3679 if (sc->bands[i].lo != 0 && 3680 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi) 3681 break; 3682 if (i == IWN_NBANDS) /* Can't happen in real-life. */ 3683 return EINVAL; 3684 chans = sc->bands[i].chans; 3685 DPRINTF(("chan %d sub-band=%d\n", chan, i)); 3686 3687 for (c = 0; c < 2; c++) { 3688 uint8_t power, gain, temp; 3689 int maxchpwr, pwr, ridx, idx; 3690 3691 power = interpolate(chan, 3692 chans[0].num, chans[0].samples[c][1].power, 3693 chans[1].num, chans[1].samples[c][1].power, 1); 3694 gain = interpolate(chan, 3695 chans[0].num, chans[0].samples[c][1].gain, 3696 chans[1].num, chans[1].samples[c][1].gain, 1); 3697 temp = interpolate(chan, 3698 chans[0].num, chans[0].samples[c][1].temp, 3699 chans[1].num, chans[1].samples[c][1].temp, 1); 3700 DPRINTF(("TX chain %d: power=%d gain=%d temp=%d\n", 3701 c, power, gain, temp)); 3702 3703 /* Compute temperature compensation. */ 3704 tdiff = ((sc->temp - temp) * 2) / tdiv[grp]; 3705 DPRINTF(("temperature compensation=%d (current=%d, " 3706 "EEPROM=%d)\n", tdiff, sc->temp, temp)); 3707 3708 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) { 3709 /* Convert dBm to half-dBm. */ 3710 maxchpwr = sc->maxpwr[chan] * 2; 3711 if ((ridx / 8) & 1) 3712 maxchpwr -= 6; /* MIMO 2T: -3dB */ 3713 3714 pwr = maxpwr; 3715 3716 /* Adjust TX power based on rate. */ 3717 if ((ridx % 8) == 5) 3718 pwr -= 15; /* OFDM48: -7.5dB */ 3719 else if ((ridx % 8) == 6) 3720 pwr -= 17; /* OFDM54: -8.5dB */ 3721 else if ((ridx % 8) == 7) 3722 pwr -= 20; /* OFDM60: -10dB */ 3723 else 3724 pwr -= 10; /* Others: -5dB */ 3725 3726 /* Do not exceed channel max TX power. */ 3727 if (pwr > maxchpwr) 3728 pwr = maxchpwr; 3729 3730 idx = gain - (pwr - power) - tdiff - vdiff; 3731 if ((ridx / 8) & 1) /* MIMO */ 3732 idx += (int32_t)le32toh(uc->atten[grp][c]); 3733 3734 if (cmd.band == 0) 3735 idx += 9; /* 5GHz */ 3736 if (ridx == IWN_RIDX_MAX) 3737 idx += 5; /* CCK */ 3738 3739 /* Make sure idx stays in a valid range. */ 3740 if (idx < 0) 3741 idx = 0; 3742 else if (idx > IWN4965_MAX_PWR_INDEX) 3743 idx = IWN4965_MAX_PWR_INDEX; 3744 3745 DPRINTF(("TX chain %d, rate idx %d: power=%d\n", 3746 c, ridx, idx)); 3747 cmd.power[ridx].rf_gain[c] = rf_gain[idx]; 3748 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx]; 3749 } 3750 } 3751 3752 DPRINTF(("setting TX power for chan %d\n", chan)); 3753 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async); 3754 3755 #undef interpolate 3756 #undef fdivround 3757 } 3758 3759 static int 3760 iwn5000_set_txpower(struct iwn_softc *sc, int async) 3761 { 3762 struct iwn5000_cmd_txpower cmd; 3763 int cmdid; 3764 3765 /* 3766 * TX power calibration is handled automatically by the firmware 3767 * for 5000 Series. 3768 */ 3769 memset(&cmd, 0, sizeof cmd); 3770 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */ 3771 cmd.flags = IWN5000_TXPOWER_NO_CLOSED; 3772 cmd.srv_limit = IWN5000_TXPOWER_AUTO; 3773 DPRINTF(("setting TX power\n")); 3774 if (IWN_UCODE_API(sc->ucode_rev) == 1) 3775 cmdid = IWN_CMD_TXPOWER_DBM_V1; 3776 else 3777 cmdid = IWN_CMD_TXPOWER_DBM; 3778 return iwn_cmd(sc, cmdid, &cmd, sizeof cmd, async); 3779 } 3780 3781 /* 3782 * Retrieve the maximum RSSI (in dBm) among receivers. 3783 */ 3784 static int 3785 iwn4965_get_rssi(const struct iwn_rx_stat *stat) 3786 { 3787 const struct iwn4965_rx_phystat *phy = (const void *)stat->phybuf; 3788 uint8_t mask, agc; 3789 int rssi; 3790 3791 mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC; 3792 agc = (le16toh(phy->agc) >> 7) & 0x7f; 3793 3794 rssi = 0; 3795 if (mask & IWN_ANT_A) 3796 rssi = MAX(rssi, phy->rssi[0]); 3797 if (mask & IWN_ANT_B) 3798 rssi = MAX(rssi, phy->rssi[2]); 3799 if (mask & IWN_ANT_C) 3800 rssi = MAX(rssi, phy->rssi[4]); 3801 3802 return rssi - agc - IWN_RSSI_TO_DBM; 3803 } 3804 3805 static int 3806 iwn5000_get_rssi(const struct iwn_rx_stat *stat) 3807 { 3808 const struct iwn5000_rx_phystat *phy = (const void *)stat->phybuf; 3809 uint8_t agc; 3810 int rssi; 3811 3812 agc = (le32toh(phy->agc) >> 9) & 0x7f; 3813 3814 rssi = MAX(le16toh(phy->rssi[0]) & 0xff, 3815 le16toh(phy->rssi[1]) & 0xff); 3816 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi); 3817 3818 return rssi - agc - IWN_RSSI_TO_DBM; 3819 } 3820 3821 /* 3822 * Retrieve the average noise (in dBm) among receivers. 3823 */ 3824 static int 3825 iwn_get_noise(const struct iwn_rx_general_stats *stats) 3826 { 3827 int i, total, nbant, noise; 3828 3829 total = nbant = 0; 3830 for (i = 0; i < 3; i++) { 3831 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0) 3832 continue; 3833 total += noise; 3834 nbant++; 3835 } 3836 /* There should be at least one antenna but check anyway. */ 3837 return (nbant == 0) ? -127 : (total / nbant) - 107; 3838 } 3839 3840 /* 3841 * Compute temperature (in degC) from last received statistics. 3842 */ 3843 static int 3844 iwn4965_get_temperature(struct iwn_softc *sc) 3845 { 3846 struct iwn_ucode_info *uc = &sc->ucode_info; 3847 int32_t r1, r2, r3, r4, temp; 3848 3849 r1 = le32toh(uc->temp[0].chan20MHz); 3850 r2 = le32toh(uc->temp[1].chan20MHz); 3851 r3 = le32toh(uc->temp[2].chan20MHz); 3852 r4 = le32toh(sc->rawtemp); 3853 3854 if (r1 == r3) /* Prevents division by 0 (should not happen). */ 3855 return 0; 3856 3857 /* Sign-extend 23-bit R4 value to 32-bit. */ 3858 r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000; 3859 /* Compute temperature in Kelvin. */ 3860 temp = (259 * (r4 - r2)) / (r3 - r1); 3861 temp = (temp * 97) / 100 + 8; 3862 3863 DPRINTF(("temperature %dK/%dC\n", temp, IWN_KTOC(temp))); 3864 return IWN_KTOC(temp); 3865 } 3866 3867 static int 3868 iwn5000_get_temperature(struct iwn_softc *sc) 3869 { 3870 int32_t temp; 3871 3872 /* 3873 * Temperature is not used by the driver for 5000 Series because 3874 * TX power calibration is handled by firmware. We export it to 3875 * users through the sensor framework though. 3876 */ 3877 temp = le32toh(sc->rawtemp); 3878 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 3879 temp = (temp / -5) + sc->temp_off; 3880 temp = IWN_KTOC(temp); 3881 } 3882 return temp; 3883 } 3884 3885 /* 3886 * Initialize sensitivity calibration state machine. 3887 */ 3888 static int 3889 iwn_init_sensitivity(struct iwn_softc *sc) 3890 { 3891 struct iwn_ops *ops = &sc->ops; 3892 struct iwn_calib_state *calib = &sc->calib; 3893 uint32_t flags; 3894 int error; 3895 3896 /* Reset calibration state machine. */ 3897 memset(calib, 0, sizeof (*calib)); 3898 calib->state = IWN_CALIB_STATE_INIT; 3899 calib->cck_state = IWN_CCK_STATE_HIFA; 3900 /* Set initial correlation values. */ 3901 calib->ofdm_x1 = sc->limits->min_ofdm_x1; 3902 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1; 3903 calib->ofdm_x4 = sc->limits->min_ofdm_x4; 3904 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4; 3905 calib->cck_x4 = 125; 3906 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4; 3907 calib->energy_cck = sc->limits->energy_cck; 3908 3909 /* Write initial sensitivity. */ 3910 if ((error = iwn_send_sensitivity(sc)) != 0) 3911 return error; 3912 3913 /* Write initial gains. */ 3914 if ((error = ops->init_gains(sc)) != 0) 3915 return error; 3916 3917 /* Request statistics at each beacon interval. */ 3918 flags = 0; 3919 DPRINTF(("sending request for statistics\n")); 3920 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1); 3921 } 3922 3923 /* 3924 * Collect noise and RSSI statistics for the first 20 beacons received 3925 * after association and use them to determine connected antennas and 3926 * to set differential gains. 3927 */ 3928 static void 3929 iwn_collect_noise(struct iwn_softc *sc, 3930 const struct iwn_rx_general_stats *stats) 3931 { 3932 struct iwn_ops *ops = &sc->ops; 3933 struct iwn_calib_state *calib = &sc->calib; 3934 uint32_t val; 3935 int i; 3936 3937 /* Accumulate RSSI and noise for all 3 antennas. */ 3938 for (i = 0; i < 3; i++) { 3939 calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff; 3940 calib->noise[i] += le32toh(stats->noise[i]) & 0xff; 3941 } 3942 /* NB: We update differential gains only once after 20 beacons. */ 3943 if (++calib->nbeacons < 20) 3944 return; 3945 3946 /* Determine highest average RSSI. */ 3947 val = MAX(calib->rssi[0], calib->rssi[1]); 3948 val = MAX(calib->rssi[2], val); 3949 3950 /* Determine which antennas are connected. */ 3951 sc->chainmask = sc->rxchainmask; 3952 for (i = 0; i < 3; i++) 3953 if (val - calib->rssi[i] > 15 * 20) 3954 sc->chainmask &= ~(1 << i); 3955 DPRINTF(("RX chains mask: theoretical=0x%x, actual=0x%x\n", 3956 sc->rxchainmask, sc->chainmask)); 3957 3958 /* If none of the TX antennas are connected, keep at least one. */ 3959 if ((sc->chainmask & sc->txchainmask) == 0) 3960 sc->chainmask |= IWN_LSB(sc->txchainmask); 3961 3962 (void)ops->set_gains(sc); 3963 calib->state = IWN_CALIB_STATE_RUN; 3964 3965 #ifdef notyet 3966 /* XXX Disable RX chains with no antennas connected. */ 3967 sc->rxon.rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask)); 3968 (void)iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1); 3969 #endif 3970 3971 /* Enable power-saving mode if requested by user. */ 3972 if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON) 3973 (void)iwn_set_pslevel(sc, 0, 3, 1); 3974 } 3975 3976 static int 3977 iwn4965_init_gains(struct iwn_softc *sc) 3978 { 3979 struct iwn_phy_calib_gain cmd; 3980 3981 memset(&cmd, 0, sizeof cmd); 3982 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 3983 /* Differential gains initially set to 0 for all 3 antennas. */ 3984 DPRINTF(("setting initial differential gains\n")); 3985 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 3986 } 3987 3988 static int 3989 iwn5000_init_gains(struct iwn_softc *sc) 3990 { 3991 struct iwn_phy_calib cmd; 3992 3993 memset(&cmd, 0, sizeof cmd); 3994 cmd.code = sc->reset_noise_gain; 3995 cmd.ngroups = 1; 3996 cmd.isvalid = 1; 3997 DPRINTF(("setting initial differential gains\n")); 3998 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 3999 } 4000 4001 static int 4002 iwn4965_set_gains(struct iwn_softc *sc) 4003 { 4004 struct iwn_calib_state *calib = &sc->calib; 4005 struct iwn_phy_calib_gain cmd; 4006 int i, delta, noise; 4007 4008 /* Get minimal noise among connected antennas. */ 4009 noise = INT_MAX; /* NB: There's at least one antenna. */ 4010 for (i = 0; i < 3; i++) 4011 if (sc->chainmask & (1 << i)) 4012 noise = MIN(calib->noise[i], noise); 4013 4014 memset(&cmd, 0, sizeof cmd); 4015 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 4016 /* Set differential gains for connected antennas. */ 4017 for (i = 0; i < 3; i++) { 4018 if (sc->chainmask & (1 << i)) { 4019 /* Compute attenuation (in unit of 1.5dB). */ 4020 delta = (noise - (int32_t)calib->noise[i]) / 30; 4021 /* NB: delta <= 0 */ 4022 /* Limit to [-4.5dB,0]. */ 4023 cmd.gain[i] = MIN(abs(delta), 3); 4024 if (delta < 0) 4025 cmd.gain[i] |= 1 << 2; /* sign bit */ 4026 } 4027 } 4028 DPRINTF(("setting differential gains Ant A/B/C: %x/%x/%x (%x)\n", 4029 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask)); 4030 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4031 } 4032 4033 static int 4034 iwn5000_set_gains(struct iwn_softc *sc) 4035 { 4036 struct iwn_calib_state *calib = &sc->calib; 4037 struct iwn_phy_calib_gain cmd; 4038 int i, ant, div, delta; 4039 4040 /* We collected 20 beacons and !=6050 need a 1.5 factor. */ 4041 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30; 4042 4043 memset(&cmd, 0, sizeof cmd); 4044 cmd.code = sc->noise_gain; 4045 cmd.ngroups = 1; 4046 cmd.isvalid = 1; 4047 /* Get first available RX antenna as referential. */ 4048 ant = IWN_LSB(sc->rxchainmask); 4049 /* Set differential gains for other antennas. */ 4050 for (i = ant + 1; i < 3; i++) { 4051 if (sc->chainmask & (1 << i)) { 4052 /* The delta is relative to antenna "ant". */ 4053 delta = ((int32_t)calib->noise[ant] - 4054 (int32_t)calib->noise[i]) / div; 4055 /* Limit to [-4.5dB,+4.5dB]. */ 4056 cmd.gain[i - 1] = MIN(abs(delta), 3); 4057 if (delta < 0) 4058 cmd.gain[i - 1] |= 1 << 2; /* sign bit */ 4059 } 4060 } 4061 DPRINTF(("setting differential gains: %x/%x (%x)\n", 4062 cmd.gain[0], cmd.gain[1], sc->chainmask)); 4063 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4064 } 4065 4066 /* 4067 * Tune RF RX sensitivity based on the number of false alarms detected 4068 * during the last beacon period. 4069 */ 4070 static void 4071 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats) 4072 { 4073 #define inc(val, inc, max) \ 4074 if ((val) < (max)) { \ 4075 if ((val) < (max) - (inc)) \ 4076 (val) += (inc); \ 4077 else \ 4078 (val) = (max); \ 4079 needs_update = 1; \ 4080 } 4081 #define dec(val, dec, min) \ 4082 if ((val) > (min)) { \ 4083 if ((val) > (min) + (dec)) \ 4084 (val) -= (dec); \ 4085 else \ 4086 (val) = (min); \ 4087 needs_update = 1; \ 4088 } 4089 4090 const struct iwn_sensitivity_limits *limits = sc->limits; 4091 struct iwn_calib_state *calib = &sc->calib; 4092 uint32_t val, rxena, fa; 4093 uint32_t energy[3], energy_min; 4094 uint8_t noise[3], noise_ref; 4095 int i, needs_update = 0; 4096 4097 /* Check that we've been enabled long enough. */ 4098 if ((rxena = le32toh(stats->general.load)) == 0) 4099 return; 4100 4101 /* Compute number of false alarms since last call for OFDM. */ 4102 fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm; 4103 fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm; 4104 fa *= 200 * 1024; /* 200TU */ 4105 4106 /* Save counters values for next call. */ 4107 calib->bad_plcp_ofdm = le32toh(stats->ofdm.bad_plcp); 4108 calib->fa_ofdm = le32toh(stats->ofdm.fa); 4109 4110 if (fa > 50 * rxena) { 4111 /* High false alarm count, decrease sensitivity. */ 4112 DPRINTFN(2, ("OFDM high false alarm count: %u\n", fa)); 4113 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1); 4114 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1); 4115 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4); 4116 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4); 4117 4118 } else if (fa < 5 * rxena) { 4119 /* Low false alarm count, increase sensitivity. */ 4120 DPRINTFN(2, ("OFDM low false alarm count: %u\n", fa)); 4121 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1); 4122 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1); 4123 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4); 4124 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4); 4125 } 4126 4127 /* Compute maximum noise among 3 receivers. */ 4128 for (i = 0; i < 3; i++) 4129 noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff; 4130 val = MAX(noise[0], noise[1]); 4131 val = MAX(noise[2], val); 4132 /* Insert it into our samples table. */ 4133 calib->noise_samples[calib->cur_noise_sample] = val; 4134 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20; 4135 4136 /* Compute maximum noise among last 20 samples. */ 4137 noise_ref = calib->noise_samples[0]; 4138 for (i = 1; i < 20; i++) 4139 noise_ref = MAX(noise_ref, calib->noise_samples[i]); 4140 4141 /* Compute maximum energy among 3 receivers. */ 4142 for (i = 0; i < 3; i++) 4143 energy[i] = le32toh(stats->general.energy[i]); 4144 val = MIN(energy[0], energy[1]); 4145 val = MIN(energy[2], val); 4146 /* Insert it into our samples table. */ 4147 calib->energy_samples[calib->cur_energy_sample] = val; 4148 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10; 4149 4150 /* Compute minimum energy among last 10 samples. */ 4151 energy_min = calib->energy_samples[0]; 4152 for (i = 1; i < 10; i++) 4153 energy_min = MAX(energy_min, calib->energy_samples[i]); 4154 energy_min += 6; 4155 4156 /* Compute number of false alarms since last call for CCK. */ 4157 fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck; 4158 fa += le32toh(stats->cck.fa) - calib->fa_cck; 4159 fa *= 200 * 1024; /* 200TU */ 4160 4161 /* Save counters values for next call. */ 4162 calib->bad_plcp_cck = le32toh(stats->cck.bad_plcp); 4163 calib->fa_cck = le32toh(stats->cck.fa); 4164 4165 if (fa > 50 * rxena) { 4166 /* High false alarm count, decrease sensitivity. */ 4167 DPRINTFN(2, ("CCK high false alarm count: %u\n", fa)); 4168 calib->cck_state = IWN_CCK_STATE_HIFA; 4169 calib->low_fa = 0; 4170 4171 if (calib->cck_x4 > 160) { 4172 calib->noise_ref = noise_ref; 4173 if (calib->energy_cck > 2) 4174 dec(calib->energy_cck, 2, energy_min); 4175 } 4176 if (calib->cck_x4 < 160) { 4177 calib->cck_x4 = 161; 4178 needs_update = 1; 4179 } else 4180 inc(calib->cck_x4, 3, limits->max_cck_x4); 4181 4182 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4); 4183 4184 } else if (fa < 5 * rxena) { 4185 /* Low false alarm count, increase sensitivity. */ 4186 DPRINTFN(2, ("CCK low false alarm count: %u\n", fa)); 4187 calib->cck_state = IWN_CCK_STATE_LOFA; 4188 calib->low_fa++; 4189 4190 if (calib->cck_state != IWN_CCK_STATE_INIT && 4191 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 || 4192 calib->low_fa > 100)) { 4193 inc(calib->energy_cck, 2, limits->min_energy_cck); 4194 dec(calib->cck_x4, 3, limits->min_cck_x4); 4195 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4); 4196 } 4197 } else { 4198 /* Not worth to increase or decrease sensitivity. */ 4199 DPRINTFN(2, ("CCK normal false alarm count: %u\n", fa)); 4200 calib->low_fa = 0; 4201 calib->noise_ref = noise_ref; 4202 4203 if (calib->cck_state == IWN_CCK_STATE_HIFA) { 4204 /* Previous interval had many false alarms. */ 4205 dec(calib->energy_cck, 8, energy_min); 4206 } 4207 calib->cck_state = IWN_CCK_STATE_INIT; 4208 } 4209 4210 if (needs_update) 4211 (void)iwn_send_sensitivity(sc); 4212 #undef dec 4213 #undef inc 4214 } 4215 4216 static int 4217 iwn_send_sensitivity(struct iwn_softc *sc) 4218 { 4219 struct iwn_calib_state *calib = &sc->calib; 4220 struct iwn_enhanced_sensitivity_cmd cmd; 4221 int len; 4222 4223 memset(&cmd, 0, sizeof cmd); 4224 len = sizeof (struct iwn_sensitivity_cmd); 4225 cmd.which = IWN_SENSITIVITY_WORKTBL; 4226 /* OFDM modulation. */ 4227 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1); 4228 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1); 4229 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4); 4230 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4); 4231 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm); 4232 cmd.energy_ofdm_th = htole16(62); 4233 /* CCK modulation. */ 4234 cmd.corr_cck_x4 = htole16(calib->cck_x4); 4235 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4); 4236 cmd.energy_cck = htole16(calib->energy_cck); 4237 /* Barker modulation: use default values. */ 4238 cmd.corr_barker = htole16(190); 4239 cmd.corr_barker_mrc = htole16(sc->limits->barker_mrc); 4240 if (!(sc->sc_flags & IWN_FLAG_ENH_SENS)) 4241 goto send; 4242 /* Enhanced sensitivity settings. */ 4243 len = sizeof (struct iwn_enhanced_sensitivity_cmd); 4244 cmd.ofdm_det_slope_mrc = htole16(668); 4245 cmd.ofdm_det_icept_mrc = htole16(4); 4246 cmd.ofdm_det_slope = htole16(486); 4247 cmd.ofdm_det_icept = htole16(37); 4248 cmd.cck_det_slope_mrc = htole16(853); 4249 cmd.cck_det_icept_mrc = htole16(4); 4250 cmd.cck_det_slope = htole16(476); 4251 cmd.cck_det_icept = htole16(99); 4252 send: 4253 DPRINTFN(2, ("setting sensitivity %d/%d/%d/%d/%d/%d/%d\n", 4254 calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4, 4255 calib->ofdm_mrc_x4, calib->cck_x4, calib->cck_mrc_x4, 4256 calib->energy_cck)); 4257 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1); 4258 } 4259 4260 /* 4261 * Set STA mode power saving level (between 0 and 5). 4262 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 4263 */ 4264 static int 4265 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async) 4266 { 4267 struct iwn_pmgt_cmd cmd; 4268 const struct iwn_pmgt *pmgt; 4269 uint32_t maxp, skip_dtim; 4270 pcireg_t reg; 4271 int i; 4272 4273 /* Select which PS parameters to use. */ 4274 if (dtim <= 2) 4275 pmgt = &iwn_pmgt[0][level]; 4276 else if (dtim <= 10) 4277 pmgt = &iwn_pmgt[1][level]; 4278 else 4279 pmgt = &iwn_pmgt[2][level]; 4280 4281 memset(&cmd, 0, sizeof cmd); 4282 if (level != 0) /* not CAM */ 4283 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP); 4284 if (level == 5) 4285 cmd.flags |= htole16(IWN_PS_FAST_PD); 4286 /* Retrieve PCIe Active State Power Management (ASPM). */ 4287 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 4288 sc->sc_cap_off + PCIE_LCSR); 4289 if (!(reg & PCIE_LCSR_ASPM_L0S)) /* L0s Entry disabled. */ 4290 cmd.flags |= htole16(IWN_PS_PCI_PMGT); 4291 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024); 4292 cmd.txtimeout = htole32(pmgt->txtimeout * 1024); 4293 4294 if (dtim == 0) { 4295 dtim = 1; 4296 skip_dtim = 0; 4297 } else 4298 skip_dtim = pmgt->skip_dtim; 4299 if (skip_dtim != 0) { 4300 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM); 4301 maxp = pmgt->intval[4]; 4302 if (maxp == (uint32_t)-1) 4303 maxp = dtim * (skip_dtim + 1); 4304 else if (maxp > dtim) 4305 maxp = (maxp / dtim) * dtim; 4306 } else 4307 maxp = dtim; 4308 for (i = 0; i < 5; i++) 4309 cmd.intval[i] = htole32(MIN(maxp, pmgt->intval[i])); 4310 4311 DPRINTF(("setting power saving level to %d\n", level)); 4312 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 4313 } 4314 4315 int 4316 iwn5000_runtime_calib(struct iwn_softc *sc) 4317 { 4318 struct iwn5000_calib_config cmd; 4319 4320 memset(&cmd, 0, sizeof cmd); 4321 cmd.ucode.once.enable = 0xffffffff; 4322 cmd.ucode.once.start = IWN5000_CALIB_DC; 4323 DPRINTF(("configuring runtime calibration\n")); 4324 return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0); 4325 } 4326 4327 static int 4328 iwn_config_bt_coex_bluetooth(struct iwn_softc *sc) 4329 { 4330 struct iwn_bluetooth bluetooth; 4331 4332 memset(&bluetooth, 0, sizeof bluetooth); 4333 bluetooth.flags = IWN_BT_COEX_ENABLE; 4334 bluetooth.lead_time = IWN_BT_LEAD_TIME_DEF; 4335 bluetooth.max_kill = IWN_BT_MAX_KILL_DEF; 4336 4337 DPRINTF(("configuring bluetooth coexistence\n")); 4338 return iwn_cmd(sc, IWN_CMD_BT_COEX, &bluetooth, sizeof bluetooth, 0); 4339 } 4340 4341 static int 4342 iwn_config_bt_coex_prio_table(struct iwn_softc *sc) 4343 { 4344 uint8_t prio_table[16]; 4345 4346 memset(&prio_table, 0, sizeof prio_table); 4347 prio_table[ 0] = 6; /* init calibration 1 */ 4348 prio_table[ 1] = 7; /* init calibration 2 */ 4349 prio_table[ 2] = 2; /* periodic calib low 1 */ 4350 prio_table[ 3] = 3; /* periodic calib low 2 */ 4351 prio_table[ 4] = 4; /* periodic calib high 1 */ 4352 prio_table[ 5] = 5; /* periodic calib high 2 */ 4353 prio_table[ 6] = 6; /* dtim */ 4354 prio_table[ 7] = 8; /* scan52 */ 4355 prio_table[ 8] = 10; /* scan24 */ 4356 4357 DPRINTF(("sending priority lookup table\n")); 4358 return iwn_cmd(sc, IWN_CMD_BT_COEX_PRIO_TABLE, 4359 &prio_table, sizeof prio_table, 0); 4360 } 4361 4362 static int 4363 iwn_config_bt_coex_adv_config(struct iwn_softc *sc, struct iwn_bt_basic *basic, 4364 size_t len) 4365 { 4366 struct iwn_btcoex_prot btprot; 4367 int error; 4368 4369 basic->bt.flags = IWN_BT_COEX_ENABLE; 4370 basic->bt.lead_time = IWN_BT_LEAD_TIME_DEF; 4371 basic->bt.max_kill = IWN_BT_MAX_KILL_DEF; 4372 basic->bt.bt3_timer_t7_value = IWN_BT_BT3_T7_DEF; 4373 basic->bt.kill_ack_mask = IWN_BT_KILL_ACK_MASK_DEF; 4374 basic->bt.kill_cts_mask = IWN_BT_KILL_CTS_MASK_DEF; 4375 basic->bt3_prio_sample_time = IWN_BT_BT3_PRIO_SAMPLE_DEF; 4376 basic->bt3_timer_t2_value = IWN_BT_BT3_T2_DEF; 4377 basic->bt3_lookup_table[ 0] = htole32(0xaaaaaaaa); /* Normal */ 4378 basic->bt3_lookup_table[ 1] = htole32(0xaaaaaaaa); 4379 basic->bt3_lookup_table[ 2] = htole32(0xaeaaaaaa); 4380 basic->bt3_lookup_table[ 3] = htole32(0xaaaaaaaa); 4381 basic->bt3_lookup_table[ 4] = htole32(0xcc00ff28); 4382 basic->bt3_lookup_table[ 5] = htole32(0x0000aaaa); 4383 basic->bt3_lookup_table[ 6] = htole32(0xcc00aaaa); 4384 basic->bt3_lookup_table[ 7] = htole32(0x0000aaaa); 4385 basic->bt3_lookup_table[ 8] = htole32(0xc0004000); 4386 basic->bt3_lookup_table[ 9] = htole32(0x00004000); 4387 basic->bt3_lookup_table[10] = htole32(0xf0005000); 4388 basic->bt3_lookup_table[11] = htole32(0xf0005000); 4389 basic->reduce_txpower = 0; /* as not implemented */ 4390 basic->valid = IWN_BT_ALL_VALID_MASK; 4391 4392 DPRINTF(("configuring advanced bluetooth coexistence v1\n")); 4393 error = iwn_cmd(sc, IWN_CMD_BT_COEX, basic, len, 0); 4394 if (error != 0) { 4395 aprint_error_dev(sc->sc_dev, 4396 "could not configure advanced bluetooth coexistence\n"); 4397 return error; 4398 } 4399 4400 error = iwn_config_bt_coex_prio_table(sc); 4401 if (error != 0) { 4402 aprint_error_dev(sc->sc_dev, 4403 "could not configure send BT priority table\n"); 4404 return error; 4405 } 4406 4407 /* Force BT state machine change */ 4408 memset(&btprot, 0, sizeof btprot); 4409 btprot.open = 1; 4410 btprot.type = 1; 4411 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof btprot, 1); 4412 if (error != 0) { 4413 aprint_error_dev(sc->sc_dev, "could not open BT protcol\n"); 4414 return error; 4415 } 4416 4417 btprot.open = 0; 4418 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof btprot, 1); 4419 if (error != 0) { 4420 aprint_error_dev(sc->sc_dev, "could not close BT protcol\n"); 4421 return error; 4422 } 4423 return 0; 4424 } 4425 4426 static int 4427 iwn_config_bt_coex_adv1(struct iwn_softc *sc) 4428 { 4429 struct iwn_bt_adv1 d; 4430 4431 memset(&d, 0, sizeof d); 4432 d.prio_boost = IWN_BT_PRIO_BOOST_DEF; 4433 d.tx_prio_boost = 0; 4434 d.rx_prio_boost = 0; 4435 return iwn_config_bt_coex_adv_config(sc, &d.basic, sizeof d); 4436 } 4437 4438 static int 4439 iwn_config_bt_coex_adv2(struct iwn_softc *sc) 4440 { 4441 struct iwn_bt_adv2 d; 4442 4443 memset(&d, 0, sizeof d); 4444 d.prio_boost = IWN_BT_PRIO_BOOST_DEF; 4445 d.tx_prio_boost = 0; 4446 d.rx_prio_boost = 0; 4447 return iwn_config_bt_coex_adv_config(sc, &d.basic, sizeof d); 4448 } 4449 4450 static int 4451 iwn_config(struct iwn_softc *sc) 4452 { 4453 struct iwn_ops *ops = &sc->ops; 4454 struct ieee80211com *ic = &sc->sc_ic; 4455 struct ifnet *ifp = ic->ic_ifp; 4456 uint32_t txmask; 4457 uint16_t rxchain; 4458 int error; 4459 4460 error = ops->config_bt_coex(sc); 4461 if (error != 0) { 4462 aprint_error_dev(sc->sc_dev, 4463 "could not configure bluetooth coexistence\n"); 4464 return error; 4465 } 4466 4467 /* Set radio temperature sensor offset. */ 4468 if (sc->hw_type == IWN_HW_REV_TYPE_6005) { 4469 error = iwn6000_temp_offset_calib(sc); 4470 if (error != 0) { 4471 aprint_error_dev(sc->sc_dev, 4472 "could not set temperature offset\n"); 4473 return error; 4474 } 4475 } 4476 4477 if (sc->hw_type == IWN_HW_REV_TYPE_2030 || 4478 sc->hw_type == IWN_HW_REV_TYPE_2000 || 4479 sc->hw_type == IWN_HW_REV_TYPE_135 || 4480 sc->hw_type == IWN_HW_REV_TYPE_105) { 4481 error = iwn2000_temp_offset_calib(sc); 4482 if (error != 0) { 4483 aprint_error_dev(sc->sc_dev, 4484 "could not set temperature offset\n"); 4485 return error; 4486 } 4487 } 4488 4489 if (sc->hw_type == IWN_HW_REV_TYPE_6050 || 4490 sc->hw_type == IWN_HW_REV_TYPE_6005) { 4491 /* Configure runtime DC calibration. */ 4492 error = iwn5000_runtime_calib(sc); 4493 if (error != 0) { 4494 aprint_error_dev(sc->sc_dev, 4495 "could not configure runtime calibration\n"); 4496 return error; 4497 } 4498 } 4499 4500 /* Configure valid TX chains for 5000 Series. */ 4501 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4502 txmask = htole32(sc->txchainmask); 4503 DPRINTF(("configuring valid TX chains 0x%x\n", txmask)); 4504 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask, 4505 sizeof txmask, 0); 4506 if (error != 0) { 4507 aprint_error_dev(sc->sc_dev, 4508 "could not configure valid TX chains\n"); 4509 return error; 4510 } 4511 } 4512 4513 /* Set mode, channel, RX filter and enable RX. */ 4514 memset(&sc->rxon, 0, sizeof (struct iwn_rxon)); 4515 IEEE80211_ADDR_COPY(ic->ic_myaddr, CLLADDR(ifp->if_sadl)); 4516 IEEE80211_ADDR_COPY(sc->rxon.myaddr, ic->ic_myaddr); 4517 IEEE80211_ADDR_COPY(sc->rxon.wlap, ic->ic_myaddr); 4518 sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_ibss_chan); 4519 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 4520 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_ibss_chan)) 4521 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 4522 switch (ic->ic_opmode) { 4523 case IEEE80211_M_STA: 4524 sc->rxon.mode = IWN_MODE_STA; 4525 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST); 4526 break; 4527 case IEEE80211_M_MONITOR: 4528 sc->rxon.mode = IWN_MODE_MONITOR; 4529 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST | 4530 IWN_FILTER_CTL | IWN_FILTER_PROMISC); 4531 break; 4532 default: 4533 /* Should not get there. */ 4534 break; 4535 } 4536 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ 4537 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ 4538 sc->rxon.ht_single_mask = 0xff; 4539 sc->rxon.ht_dual_mask = 0xff; 4540 sc->rxon.ht_triple_mask = 0xff; 4541 rxchain = 4542 IWN_RXCHAIN_VALID(sc->rxchainmask) | 4543 IWN_RXCHAIN_MIMO_COUNT(2) | 4544 IWN_RXCHAIN_IDLE_COUNT(2); 4545 sc->rxon.rxchain = htole16(rxchain); 4546 DPRINTF(("setting configuration\n")); 4547 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 0); 4548 if (error != 0) { 4549 aprint_error_dev(sc->sc_dev, 4550 "RXON command failed\n"); 4551 return error; 4552 } 4553 4554 if ((error = iwn_add_broadcast_node(sc, 0)) != 0) { 4555 aprint_error_dev(sc->sc_dev, 4556 "could not add broadcast node\n"); 4557 return error; 4558 } 4559 4560 /* Configuration has changed, set TX power accordingly. */ 4561 if ((error = ops->set_txpower(sc, 0)) != 0) { 4562 aprint_error_dev(sc->sc_dev, 4563 "could not set TX power\n"); 4564 return error; 4565 } 4566 4567 if ((error = iwn_set_critical_temp(sc)) != 0) { 4568 aprint_error_dev(sc->sc_dev, 4569 "could not set critical temperature\n"); 4570 return error; 4571 } 4572 4573 /* Set power saving level to CAM during initialization. */ 4574 if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) { 4575 aprint_error_dev(sc->sc_dev, 4576 "could not set power saving level\n"); 4577 return error; 4578 } 4579 return 0; 4580 } 4581 4582 static uint16_t 4583 iwn_get_active_dwell_time(struct iwn_softc *sc, uint16_t flags, 4584 uint8_t n_probes) 4585 { 4586 /* No channel? Default to 2GHz settings */ 4587 if (flags & IEEE80211_CHAN_2GHZ) 4588 return IWN_ACTIVE_DWELL_TIME_2GHZ + 4589 IWN_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1); 4590 4591 /* 5GHz dwell time */ 4592 return IWN_ACTIVE_DWELL_TIME_5GHZ + 4593 IWN_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1); 4594 } 4595 4596 /* 4597 * Limit the total dwell time to 85% of the beacon interval. 4598 * 4599 * Returns the dwell time in milliseconds. 4600 */ 4601 static uint16_t 4602 iwn_limit_dwell(struct iwn_softc *sc, uint16_t dwell_time) 4603 { 4604 struct ieee80211com *ic = &sc->sc_ic; 4605 struct ieee80211_node *ni = ic->ic_bss; 4606 int bintval = 0; 4607 4608 /* bintval is in TU (1.024mS) */ 4609 if (ni != NULL) 4610 bintval = ni->ni_intval; 4611 4612 /* 4613 * If it's non-zero, we should calculate the minimum of 4614 * it and the DWELL_BASE. 4615 * 4616 * XXX Yes, the math should take into account that bintval 4617 * is 1.024mS, not 1mS.. 4618 */ 4619 if (bintval > 0) 4620 return MIN(IWN_PASSIVE_DWELL_BASE, ((bintval * 85) / 100)); 4621 4622 /* No association context? Default */ 4623 return IWN_PASSIVE_DWELL_BASE; 4624 } 4625 4626 static uint16_t 4627 iwn_get_passive_dwell_time(struct iwn_softc *sc, uint16_t flags) 4628 { 4629 uint16_t passive; 4630 if (flags & IEEE80211_CHAN_2GHZ) 4631 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_2GHZ; 4632 else 4633 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_5GHZ; 4634 4635 /* Clamp to the beacon interval if we're associated */ 4636 return iwn_limit_dwell(sc, passive); 4637 } 4638 4639 static int 4640 iwn_scan(struct iwn_softc *sc, uint16_t flags) 4641 { 4642 struct ieee80211com *ic = &sc->sc_ic; 4643 struct iwn_scan_hdr *hdr; 4644 struct iwn_cmd_data *tx; 4645 struct iwn_scan_essid *essid; 4646 struct iwn_scan_chan *chan; 4647 struct ieee80211_frame *wh; 4648 struct ieee80211_rateset *rs; 4649 struct ieee80211_channel *c; 4650 uint8_t *buf, *frm; 4651 uint16_t rxchain, dwell_active, dwell_passive; 4652 uint8_t txant; 4653 int buflen, error, is_active; 4654 4655 buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 4656 if (buf == NULL) { 4657 aprint_error_dev(sc->sc_dev, 4658 "could not allocate buffer for scan command\n"); 4659 return ENOMEM; 4660 } 4661 hdr = (struct iwn_scan_hdr *)buf; 4662 /* 4663 * Move to the next channel if no frames are received within 10ms 4664 * after sending the probe request. 4665 */ 4666 hdr->quiet_time = htole16(10); /* timeout in milliseconds */ 4667 hdr->quiet_threshold = htole16(1); /* min # of packets */ 4668 4669 /* Select antennas for scanning. */ 4670 rxchain = 4671 IWN_RXCHAIN_VALID(sc->rxchainmask) | 4672 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) | 4673 IWN_RXCHAIN_DRIVER_FORCE; 4674 if ((flags & IEEE80211_CHAN_5GHZ) && 4675 sc->hw_type == IWN_HW_REV_TYPE_4965) { 4676 /* Ant A must be avoided in 5GHz because of an HW bug. */ 4677 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_BC); 4678 } else /* Use all available RX antennas. */ 4679 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask); 4680 hdr->rxchain = htole16(rxchain); 4681 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON); 4682 4683 tx = (struct iwn_cmd_data *)(hdr + 1); 4684 tx->flags = htole32(IWN_TX_AUTO_SEQ); 4685 tx->id = sc->broadcast_id; 4686 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 4687 4688 if (flags & IEEE80211_CHAN_5GHZ) { 4689 hdr->crc_threshold = 0xffff; 4690 /* Send probe requests at 6Mbps. */ 4691 tx->plcp = iwn_rates[IWN_RIDX_OFDM6].plcp; 4692 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 4693 } else { 4694 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO); 4695 /* Send probe requests at 1Mbps. */ 4696 tx->plcp = iwn_rates[IWN_RIDX_CCK1].plcp; 4697 tx->rflags = IWN_RFLAG_CCK; 4698 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 4699 } 4700 /* Use the first valid TX antenna. */ 4701 txant = IWN_LSB(sc->txchainmask); 4702 tx->rflags |= IWN_RFLAG_ANT(txant); 4703 4704 /* 4705 * Only do active scanning if we're announcing a probe request 4706 * for a given SSID (or more, if we ever add it to the driver.) 4707 */ 4708 is_active = 0; 4709 4710 essid = (struct iwn_scan_essid *)(tx + 1); 4711 if (ic->ic_des_esslen != 0) { 4712 essid[0].id = IEEE80211_ELEMID_SSID; 4713 essid[0].len = ic->ic_des_esslen; 4714 memcpy(essid[0].data, ic->ic_des_essid, ic->ic_des_esslen); 4715 4716 is_active = 1; 4717 } 4718 /* 4719 * Build a probe request frame. Most of the following code is a 4720 * copy & paste of what is done in net80211. 4721 */ 4722 wh = (struct ieee80211_frame *)(essid + 20); 4723 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 4724 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 4725 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 4726 IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr); 4727 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr); 4728 IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr); 4729 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */ 4730 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */ 4731 4732 frm = (uint8_t *)(wh + 1); 4733 frm = ieee80211_add_ssid(frm, NULL, 0); 4734 frm = ieee80211_add_rates(frm, rs); 4735 #ifndef IEEE80211_NO_HT 4736 if (ic->ic_flags & IEEE80211_F_HTON) 4737 frm = ieee80211_add_htcaps(frm, ic); 4738 #endif 4739 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 4740 frm = ieee80211_add_xrates(frm, rs); 4741 4742 /* Set length of probe request. */ 4743 tx->len = htole16(frm - (uint8_t *)wh); 4744 4745 4746 /* 4747 * If active scanning is requested but a certain channel is 4748 * marked passive, we can do active scanning if we detect 4749 * transmissions. 4750 * 4751 * There is an issue with some firmware versions that triggers 4752 * a sysassert on a "good CRC threshold" of zero (== disabled), 4753 * on a radar channel even though this means that we should NOT 4754 * send probes. 4755 * 4756 * The "good CRC threshold" is the number of frames that we 4757 * need to receive during our dwell time on a channel before 4758 * sending out probes -- setting this to a huge value will 4759 * mean we never reach it, but at the same time work around 4760 * the aforementioned issue. Thus use IWN_GOOD_CRC_TH_NEVER 4761 * here instead of IWN_GOOD_CRC_TH_DISABLED. 4762 * 4763 * This was fixed in later versions along with some other 4764 * scan changes, and the threshold behaves as a flag in those 4765 * versions. 4766 */ 4767 4768 /* 4769 * If we're doing active scanning, set the crc_threshold 4770 * to a suitable value. This is different to active veruss 4771 * passive scanning depending upon the channel flags; the 4772 * firmware will obey that particular check for us. 4773 */ 4774 if (sc->tlv_feature_flags & IWN_UCODE_TLV_FLAGS_NEWSCAN) 4775 hdr->crc_threshold = is_active ? 4776 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_DISABLED; 4777 else 4778 hdr->crc_threshold = is_active ? 4779 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_NEVER; 4780 4781 chan = (struct iwn_scan_chan *)frm; 4782 for (c = &ic->ic_channels[1]; 4783 c <= &ic->ic_channels[IEEE80211_CHAN_MAX]; c++) { 4784 if ((c->ic_flags & flags) != flags) 4785 continue; 4786 4787 chan->chan = htole16(ieee80211_chan2ieee(ic, c)); 4788 DPRINTFN(2, ("adding channel %d\n", chan->chan)); 4789 chan->flags = 0; 4790 if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) 4791 chan->flags |= htole32(IWN_CHAN_ACTIVE); 4792 if (ic->ic_des_esslen != 0) 4793 chan->flags |= htole32(IWN_CHAN_NPBREQS(1)); 4794 4795 /* 4796 * Calculate the active/passive dwell times. 4797 */ 4798 4799 dwell_active = iwn_get_active_dwell_time(sc, flags, is_active); 4800 dwell_passive = iwn_get_passive_dwell_time(sc, flags); 4801 4802 /* Make sure they're valid */ 4803 if (dwell_passive <= dwell_active) 4804 dwell_passive = dwell_active + 1; 4805 4806 chan->active = htole16(dwell_active); 4807 chan->passive = htole16(dwell_passive); 4808 4809 chan->dsp_gain = 0x6e; 4810 if (IEEE80211_IS_CHAN_5GHZ(c)) { 4811 chan->rf_gain = 0x3b; 4812 } else { 4813 chan->rf_gain = 0x28; 4814 } 4815 hdr->nchan++; 4816 chan++; 4817 } 4818 4819 buflen = (uint8_t *)chan - buf; 4820 hdr->len = htole16(buflen); 4821 4822 DPRINTF(("sending scan command nchan=%d\n", hdr->nchan)); 4823 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1); 4824 free(buf, M_DEVBUF); 4825 return error; 4826 } 4827 4828 static int 4829 iwn_auth(struct iwn_softc *sc) 4830 { 4831 struct iwn_ops *ops = &sc->ops; 4832 struct ieee80211com *ic = &sc->sc_ic; 4833 struct ieee80211_node *ni = ic->ic_bss; 4834 int error; 4835 4836 /* Update adapter configuration. */ 4837 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4838 sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan); 4839 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 4840 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 4841 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 4842 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4843 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT); 4844 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4845 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE); 4846 switch (ic->ic_curmode) { 4847 case IEEE80211_MODE_11A: 4848 sc->rxon.cck_mask = 0; 4849 sc->rxon.ofdm_mask = 0x15; 4850 break; 4851 case IEEE80211_MODE_11B: 4852 sc->rxon.cck_mask = 0x03; 4853 sc->rxon.ofdm_mask = 0; 4854 break; 4855 default: /* Assume 802.11b/g. */ 4856 sc->rxon.cck_mask = 0x0f; 4857 sc->rxon.ofdm_mask = 0x15; 4858 } 4859 DPRINTF(("rxon chan %d flags %x cck %x ofdm %x\n", sc->rxon.chan, 4860 sc->rxon.flags, sc->rxon.cck_mask, sc->rxon.ofdm_mask)); 4861 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1); 4862 if (error != 0) { 4863 aprint_error_dev(sc->sc_dev, 4864 "RXON command failed\n"); 4865 return error; 4866 } 4867 4868 /* Configuration has changed, set TX power accordingly. */ 4869 if ((error = ops->set_txpower(sc, 1)) != 0) { 4870 aprint_error_dev(sc->sc_dev, 4871 "could not set TX power\n"); 4872 return error; 4873 } 4874 /* 4875 * Reconfiguring RXON clears the firmware nodes table so we must 4876 * add the broadcast node again. 4877 */ 4878 if ((error = iwn_add_broadcast_node(sc, 1)) != 0) { 4879 aprint_error_dev(sc->sc_dev, 4880 "could not add broadcast node\n"); 4881 return error; 4882 } 4883 return 0; 4884 } 4885 4886 static int 4887 iwn_run(struct iwn_softc *sc) 4888 { 4889 struct iwn_ops *ops = &sc->ops; 4890 struct ieee80211com *ic = &sc->sc_ic; 4891 struct ieee80211_node *ni = ic->ic_bss; 4892 struct iwn_node_info node; 4893 int error; 4894 4895 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 4896 /* Link LED blinks while monitoring. */ 4897 iwn_set_led(sc, IWN_LED_LINK, 5, 5); 4898 return 0; 4899 } 4900 if ((error = iwn_set_timing(sc, ni)) != 0) { 4901 aprint_error_dev(sc->sc_dev, 4902 "could not set timing\n"); 4903 return error; 4904 } 4905 4906 /* Update adapter configuration. */ 4907 sc->rxon.associd = htole16(IEEE80211_AID(ni->ni_associd)); 4908 /* Short preamble and slot time are negotiated when associating. */ 4909 sc->rxon.flags &= ~htole32(IWN_RXON_SHPREAMBLE | IWN_RXON_SHSLOT); 4910 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4911 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT); 4912 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4913 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE); 4914 sc->rxon.filter |= htole32(IWN_FILTER_BSS); 4915 DPRINTF(("rxon chan %d flags %x\n", sc->rxon.chan, sc->rxon.flags)); 4916 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1); 4917 if (error != 0) { 4918 aprint_error_dev(sc->sc_dev, 4919 "could not update configuration\n"); 4920 return error; 4921 } 4922 4923 /* Configuration has changed, set TX power accordingly. */ 4924 if ((error = ops->set_txpower(sc, 1)) != 0) { 4925 aprint_error_dev(sc->sc_dev, 4926 "could not set TX power\n"); 4927 return error; 4928 } 4929 4930 /* Fake a join to initialize the TX rate. */ 4931 ((struct iwn_node *)ni)->id = IWN_ID_BSS; 4932 iwn_newassoc(ni, 1); 4933 4934 /* Add BSS node. */ 4935 memset(&node, 0, sizeof node); 4936 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 4937 node.id = IWN_ID_BSS; 4938 #ifdef notyet 4939 node.htflags = htole32(IWN_AMDPU_SIZE_FACTOR(3) | 4940 IWN_AMDPU_DENSITY(5)); /* 2us */ 4941 #endif 4942 DPRINTF(("adding BSS node\n")); 4943 error = ops->add_node(sc, &node, 1); 4944 if (error != 0) { 4945 aprint_error_dev(sc->sc_dev, 4946 "could not add BSS node\n"); 4947 return error; 4948 } 4949 DPRINTF(("setting link quality for node %d\n", node.id)); 4950 if ((error = iwn_set_link_quality(sc, ni)) != 0) { 4951 aprint_error_dev(sc->sc_dev, 4952 "could not setup link quality for node %d\n", node.id); 4953 return error; 4954 } 4955 4956 if ((error = iwn_init_sensitivity(sc)) != 0) { 4957 aprint_error_dev(sc->sc_dev, 4958 "could not set sensitivity\n"); 4959 return error; 4960 } 4961 /* Start periodic calibration timer. */ 4962 sc->calib.state = IWN_CALIB_STATE_ASSOC; 4963 sc->calib_cnt = 0; 4964 callout_schedule(&sc->calib_to, hz/2); 4965 4966 /* Link LED always on while associated. */ 4967 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 4968 return 0; 4969 } 4970 4971 #ifdef IWN_HWCRYPTO 4972 /* 4973 * We support CCMP hardware encryption/decryption of unicast frames only. 4974 * HW support for TKIP really sucks. We should let TKIP die anyway. 4975 */ 4976 static int 4977 iwn_set_key(struct ieee80211com *ic, struct ieee80211_node *ni, 4978 struct ieee80211_key *k) 4979 { 4980 struct iwn_softc *sc = ic->ic_softc; 4981 struct iwn_ops *ops = &sc->ops; 4982 struct iwn_node *wn = (void *)ni; 4983 struct iwn_node_info node; 4984 uint16_t kflags; 4985 4986 if ((k->k_flags & IEEE80211_KEY_GROUP) || 4987 k->k_cipher != IEEE80211_CIPHER_CCMP) 4988 return ieee80211_set_key(ic, ni, k); 4989 4990 kflags = IWN_KFLAG_CCMP | IWN_KFLAG_MAP | IWN_KFLAG_KID(k->k_id); 4991 if (k->k_flags & IEEE80211_KEY_GROUP) 4992 kflags |= IWN_KFLAG_GROUP; 4993 4994 memset(&node, 0, sizeof node); 4995 node.id = (k->k_flags & IEEE80211_KEY_GROUP) ? 4996 sc->broadcast_id : wn->id; 4997 node.control = IWN_NODE_UPDATE; 4998 node.flags = IWN_FLAG_SET_KEY; 4999 node.kflags = htole16(kflags); 5000 node.kid = k->k_id; 5001 memcpy(node.key, k->k_key, k->k_len); 5002 DPRINTF(("set key id=%d for node %d\n", k->k_id, node.id)); 5003 return ops->add_node(sc, &node, 1); 5004 } 5005 5006 static void 5007 iwn_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni, 5008 struct ieee80211_key *k) 5009 { 5010 struct iwn_softc *sc = ic->ic_softc; 5011 struct iwn_ops *ops = &sc->ops; 5012 struct iwn_node *wn = (void *)ni; 5013 struct iwn_node_info node; 5014 5015 if ((k->k_flags & IEEE80211_KEY_GROUP) || 5016 k->k_cipher != IEEE80211_CIPHER_CCMP) { 5017 /* See comment about other ciphers above. */ 5018 ieee80211_delete_key(ic, ni, k); 5019 return; 5020 } 5021 if (ic->ic_state != IEEE80211_S_RUN) 5022 return; /* Nothing to do. */ 5023 memset(&node, 0, sizeof node); 5024 node.id = (k->k_flags & IEEE80211_KEY_GROUP) ? 5025 sc->broadcast_id : wn->id; 5026 node.control = IWN_NODE_UPDATE; 5027 node.flags = IWN_FLAG_SET_KEY; 5028 node.kflags = htole16(IWN_KFLAG_INVALID); 5029 node.kid = 0xff; 5030 DPRINTF(("delete keys for node %d\n", node.id)); 5031 (void)ops->add_node(sc, &node, 1); 5032 } 5033 #endif 5034 5035 /* XXX Added for NetBSD (copied from rev 1.39). */ 5036 5037 static int 5038 iwn_wme_update(struct ieee80211com *ic) 5039 { 5040 #define IWN_EXP2(v) htole16((1 << (v)) - 1) 5041 #define IWN_USEC(v) htole16(IEEE80211_TXOP_TO_US(v)) 5042 struct iwn_softc *sc = ic->ic_ifp->if_softc; 5043 const struct wmeParams *wmep; 5044 struct iwn_edca_params cmd; 5045 int ac; 5046 5047 /* don't override default WME values if WME is not actually enabled */ 5048 if (!(ic->ic_flags & IEEE80211_F_WME)) 5049 return 0; 5050 cmd.flags = 0; 5051 for (ac = 0; ac < WME_NUM_AC; ac++) { 5052 wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 5053 cmd.ac[ac].aifsn = wmep->wmep_aifsn; 5054 cmd.ac[ac].cwmin = IWN_EXP2(wmep->wmep_logcwmin); 5055 cmd.ac[ac].cwmax = IWN_EXP2(wmep->wmep_logcwmax); 5056 cmd.ac[ac].txoplimit = IWN_USEC(wmep->wmep_txopLimit); 5057 5058 DPRINTF(("setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d " 5059 "txop=%d\n", ac, cmd.ac[ac].aifsn, 5060 cmd.ac[ac].cwmin, 5061 cmd.ac[ac].cwmax, cmd.ac[ac].txoplimit)); 5062 } 5063 return iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 5064 #undef IWN_USEC 5065 #undef IWN_EXP2 5066 } 5067 5068 #ifndef IEEE80211_NO_HT 5069 /* 5070 * This function is called by upper layer when an ADDBA request is received 5071 * from another STA and before the ADDBA response is sent. 5072 */ 5073 static int 5074 iwn_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 5075 uint8_t tid) 5076 { 5077 struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid]; 5078 struct iwn_softc *sc = ic->ic_softc; 5079 struct iwn_ops *ops = &sc->ops; 5080 struct iwn_node *wn = (void *)ni; 5081 struct iwn_node_info node; 5082 5083 memset(&node, 0, sizeof node); 5084 node.id = wn->id; 5085 node.control = IWN_NODE_UPDATE; 5086 node.flags = IWN_FLAG_SET_ADDBA; 5087 node.addba_tid = tid; 5088 node.addba_ssn = htole16(ba->ba_winstart); 5089 DPRINTFN(2, ("ADDBA RA=%d TID=%d SSN=%d\n", wn->id, tid, 5090 ba->ba_winstart)); 5091 return ops->add_node(sc, &node, 1); 5092 } 5093 5094 /* 5095 * This function is called by upper layer on teardown of an HT-immediate 5096 * Block Ack agreement (eg. uppon receipt of a DELBA frame). 5097 */ 5098 static void 5099 iwn_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni, 5100 uint8_t tid) 5101 { 5102 struct iwn_softc *sc = ic->ic_softc; 5103 struct iwn_ops *ops = &sc->ops; 5104 struct iwn_node *wn = (void *)ni; 5105 struct iwn_node_info node; 5106 5107 memset(&node, 0, sizeof node); 5108 node.id = wn->id; 5109 node.control = IWN_NODE_UPDATE; 5110 node.flags = IWN_FLAG_SET_DELBA; 5111 node.delba_tid = tid; 5112 DPRINTFN(2, ("DELBA RA=%d TID=%d\n", wn->id, tid)); 5113 (void)ops->add_node(sc, &node, 1); 5114 } 5115 5116 /* 5117 * This function is called by upper layer when an ADDBA response is received 5118 * from another STA. 5119 */ 5120 static int 5121 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 5122 uint8_t tid) 5123 { 5124 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid]; 5125 struct iwn_softc *sc = ic->ic_softc; 5126 struct iwn_ops *ops = &sc->ops; 5127 struct iwn_node *wn = (void *)ni; 5128 struct iwn_node_info node; 5129 int error; 5130 5131 /* Enable TX for the specified RA/TID. */ 5132 wn->disable_tid &= ~(1 << tid); 5133 memset(&node, 0, sizeof node); 5134 node.id = wn->id; 5135 node.control = IWN_NODE_UPDATE; 5136 node.flags = IWN_FLAG_SET_DISABLE_TID; 5137 node.disable_tid = htole16(wn->disable_tid); 5138 error = ops->add_node(sc, &node, 1); 5139 if (error != 0) 5140 return error; 5141 5142 if ((error = iwn_nic_lock(sc)) != 0) 5143 return error; 5144 ops->ampdu_tx_start(sc, ni, tid, ba->ba_winstart); 5145 iwn_nic_unlock(sc); 5146 return 0; 5147 } 5148 5149 static void 5150 iwn_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni, 5151 uint8_t tid) 5152 { 5153 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid]; 5154 struct iwn_softc *sc = ic->ic_softc; 5155 struct iwn_ops *ops = &sc->ops; 5156 5157 if (iwn_nic_lock(sc) != 0) 5158 return; 5159 ops->ampdu_tx_stop(sc, tid, ba->ba_winstart); 5160 iwn_nic_unlock(sc); 5161 } 5162 5163 static void 5164 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 5165 uint8_t tid, uint16_t ssn) 5166 { 5167 struct iwn_node *wn = (void *)ni; 5168 int qid = 7 + tid; 5169 5170 /* Stop TX scheduler while we're changing its configuration. */ 5171 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5172 IWN4965_TXQ_STATUS_CHGACT); 5173 5174 /* Assign RA/TID translation to the queue. */ 5175 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid), 5176 wn->id << 4 | tid); 5177 5178 /* Enable chain-building mode for the queue. */ 5179 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid); 5180 5181 /* Set starting sequence number from the ADDBA request. */ 5182 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5183 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 5184 5185 /* Set scheduler window size. */ 5186 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid), 5187 IWN_SCHED_WINSZ); 5188 /* Set scheduler frame limit. */ 5189 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 5190 IWN_SCHED_LIMIT << 16); 5191 5192 /* Enable interrupts for the queue. */ 5193 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 5194 5195 /* Mark the queue as active. */ 5196 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5197 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA | 5198 iwn_tid2fifo[tid] << 1); 5199 } 5200 5201 static void 5202 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn) 5203 { 5204 int qid = 7 + tid; 5205 5206 /* Stop TX scheduler while we're changing its configuration. */ 5207 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5208 IWN4965_TXQ_STATUS_CHGACT); 5209 5210 /* Set starting sequence number from the ADDBA request. */ 5211 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5212 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 5213 5214 /* Disable interrupts for the queue. */ 5215 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 5216 5217 /* Mark the queue as inactive. */ 5218 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5219 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1); 5220 } 5221 5222 static void 5223 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 5224 uint8_t tid, uint16_t ssn) 5225 { 5226 struct iwn_node *wn = (void *)ni; 5227 int qid = 10 + tid; 5228 5229 /* Stop TX scheduler while we're changing its configuration. */ 5230 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5231 IWN5000_TXQ_STATUS_CHGACT); 5232 5233 /* Assign RA/TID translation to the queue. */ 5234 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid), 5235 wn->id << 4 | tid); 5236 5237 /* Enable chain-building mode for the queue. */ 5238 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid); 5239 5240 /* Enable aggregation for the queue. */ 5241 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 5242 5243 /* Set starting sequence number from the ADDBA request. */ 5244 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5245 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 5246 5247 /* Set scheduler window size and frame limit. */ 5248 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 5249 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 5250 5251 /* Enable interrupts for the queue. */ 5252 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 5253 5254 /* Mark the queue as active. */ 5255 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5256 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]); 5257 } 5258 5259 static void 5260 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn) 5261 { 5262 int qid = 10 + tid; 5263 5264 /* Stop TX scheduler while we're changing its configuration. */ 5265 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5266 IWN5000_TXQ_STATUS_CHGACT); 5267 5268 /* Disable aggregation for the queue. */ 5269 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 5270 5271 /* Set starting sequence number from the ADDBA request. */ 5272 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5273 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 5274 5275 /* Disable interrupts for the queue. */ 5276 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 5277 5278 /* Mark the queue as inactive. */ 5279 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5280 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]); 5281 } 5282 #endif /* !IEEE80211_NO_HT */ 5283 5284 /* 5285 * Query calibration tables from the initialization firmware. We do this 5286 * only once at first boot. Called from a process context. 5287 */ 5288 static int 5289 iwn5000_query_calibration(struct iwn_softc *sc) 5290 { 5291 struct iwn5000_calib_config cmd; 5292 int error; 5293 5294 memset(&cmd, 0, sizeof cmd); 5295 cmd.ucode.once.enable = 0xffffffff; 5296 cmd.ucode.once.start = 0xffffffff; 5297 cmd.ucode.once.send = 0xffffffff; 5298 cmd.ucode.flags = 0xffffffff; 5299 DPRINTF(("sending calibration query\n")); 5300 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0); 5301 if (error != 0) 5302 return error; 5303 5304 /* Wait at most two seconds for calibration to complete. */ 5305 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) 5306 error = tsleep(sc, PCATCH, "iwncal", 2 * hz); 5307 return error; 5308 } 5309 5310 /* 5311 * Send calibration results to the runtime firmware. These results were 5312 * obtained on first boot from the initialization firmware. 5313 */ 5314 static int 5315 iwn5000_send_calibration(struct iwn_softc *sc) 5316 { 5317 int idx, error; 5318 5319 for (idx = 0; idx < 5; idx++) { 5320 if (sc->calibcmd[idx].buf == NULL) 5321 continue; /* No results available. */ 5322 DPRINTF(("send calibration result idx=%d len=%d\n", 5323 idx, sc->calibcmd[idx].len)); 5324 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf, 5325 sc->calibcmd[idx].len, 0); 5326 if (error != 0) { 5327 aprint_error_dev(sc->sc_dev, 5328 "could not send calibration result\n"); 5329 return error; 5330 } 5331 } 5332 return 0; 5333 } 5334 5335 static int 5336 iwn5000_send_wimax_coex(struct iwn_softc *sc) 5337 { 5338 struct iwn5000_wimax_coex wimax; 5339 5340 #ifdef notyet 5341 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 5342 /* Enable WiMAX coexistence for combo adapters. */ 5343 wimax.flags = 5344 IWN_WIMAX_COEX_ASSOC_WA_UNMASK | 5345 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK | 5346 IWN_WIMAX_COEX_STA_TABLE_VALID | 5347 IWN_WIMAX_COEX_ENABLE; 5348 memcpy(wimax.events, iwn6050_wimax_events, 5349 sizeof iwn6050_wimax_events); 5350 } else 5351 #endif 5352 { 5353 /* Disable WiMAX coexistence. */ 5354 wimax.flags = 0; 5355 memset(wimax.events, 0, sizeof wimax.events); 5356 } 5357 DPRINTF(("Configuring WiMAX coexistence\n")); 5358 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0); 5359 } 5360 5361 static int 5362 iwn6000_temp_offset_calib(struct iwn_softc *sc) 5363 { 5364 struct iwn6000_phy_calib_temp_offset cmd; 5365 5366 memset(&cmd, 0, sizeof cmd); 5367 cmd.code = IWN6000_PHY_CALIB_TEMP_OFFSET; 5368 cmd.ngroups = 1; 5369 cmd.isvalid = 1; 5370 if (sc->eeprom_temp != 0) 5371 cmd.offset = htole16(sc->eeprom_temp); 5372 else 5373 cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET); 5374 DPRINTF(("setting radio sensor offset to %d\n", le16toh(cmd.offset))); 5375 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 5376 } 5377 5378 static int 5379 iwn2000_temp_offset_calib(struct iwn_softc *sc) 5380 { 5381 struct iwn2000_phy_calib_temp_offset cmd; 5382 5383 memset(&cmd, 0, sizeof cmd); 5384 cmd.code = IWN2000_PHY_CALIB_TEMP_OFFSET; 5385 cmd.ngroups = 1; 5386 cmd.isvalid = 1; 5387 if (sc->eeprom_rawtemp != 0) { 5388 cmd.offset_low = htole16(sc->eeprom_rawtemp); 5389 cmd.offset_high = htole16(sc->eeprom_temp); 5390 } else { 5391 cmd.offset_low = htole16(IWN_DEFAULT_TEMP_OFFSET); 5392 cmd.offset_high = htole16(IWN_DEFAULT_TEMP_OFFSET); 5393 } 5394 cmd.burnt_voltage_ref = htole16(sc->eeprom_voltage); 5395 DPRINTF(("setting radio sensor offset to %d:%d, voltage to %d\n", 5396 le16toh(cmd.offset_low), le16toh(cmd.offset_high), 5397 le16toh(cmd.burnt_voltage_ref))); 5398 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 5399 } 5400 5401 /* 5402 * This function is called after the runtime firmware notifies us of its 5403 * readiness (called in a process context). 5404 */ 5405 static int 5406 iwn4965_post_alive(struct iwn_softc *sc) 5407 { 5408 int error, qid; 5409 5410 if ((error = iwn_nic_lock(sc)) != 0) 5411 return error; 5412 5413 /* Clear TX scheduler state in SRAM. */ 5414 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 5415 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0, 5416 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t)); 5417 5418 /* Set physical address of TX scheduler rings (1KB aligned). */ 5419 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 5420 5421 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 5422 5423 /* Disable chain mode for all our 16 queues. */ 5424 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0); 5425 5426 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) { 5427 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0); 5428 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 5429 5430 /* Set scheduler window size. */ 5431 iwn_mem_write(sc, sc->sched_base + 5432 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ); 5433 /* Set scheduler frame limit. */ 5434 iwn_mem_write(sc, sc->sched_base + 5435 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 5436 IWN_SCHED_LIMIT << 16); 5437 } 5438 5439 /* Enable interrupts for all our 16 queues. */ 5440 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff); 5441 /* Identify TX FIFO rings (0-7). */ 5442 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff); 5443 5444 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 5445 for (qid = 0; qid < 7; qid++) { 5446 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 }; 5447 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5448 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1); 5449 } 5450 iwn_nic_unlock(sc); 5451 return 0; 5452 } 5453 5454 /* 5455 * This function is called after the initialization or runtime firmware 5456 * notifies us of its readiness (called in a process context). 5457 */ 5458 static int 5459 iwn5000_post_alive(struct iwn_softc *sc) 5460 { 5461 int error, qid; 5462 5463 /* Switch to using ICT interrupt mode. */ 5464 iwn5000_ict_reset(sc); 5465 5466 if ((error = iwn_nic_lock(sc)) != 0) 5467 return error; 5468 5469 /* Clear TX scheduler state in SRAM. */ 5470 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 5471 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0, 5472 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t)); 5473 5474 /* Set physical address of TX scheduler rings (1KB aligned). */ 5475 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 5476 5477 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 5478 5479 /* Enable chain mode for all queues, except command queue. */ 5480 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef); 5481 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0); 5482 5483 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) { 5484 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0); 5485 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 5486 5487 iwn_mem_write(sc, sc->sched_base + 5488 IWN5000_SCHED_QUEUE_OFFSET(qid), 0); 5489 /* Set scheduler window size and frame limit. */ 5490 iwn_mem_write(sc, sc->sched_base + 5491 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 5492 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 5493 } 5494 5495 /* Enable interrupts for all our 20 queues. */ 5496 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff); 5497 /* Identify TX FIFO rings (0-7). */ 5498 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff); 5499 5500 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 5501 for (qid = 0; qid < 7; qid++) { 5502 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 }; 5503 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5504 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 5505 } 5506 iwn_nic_unlock(sc); 5507 5508 /* Configure WiMAX coexistence for combo adapters. */ 5509 error = iwn5000_send_wimax_coex(sc); 5510 if (error != 0) { 5511 aprint_error_dev(sc->sc_dev, 5512 "could not configure WiMAX coexistence\n"); 5513 return error; 5514 } 5515 if (sc->hw_type != IWN_HW_REV_TYPE_5150) { 5516 struct iwn5000_phy_calib_crystal cmd; 5517 5518 /* Perform crystal calibration. */ 5519 memset(&cmd, 0, sizeof cmd); 5520 cmd.code = IWN5000_PHY_CALIB_CRYSTAL; 5521 cmd.ngroups = 1; 5522 cmd.isvalid = 1; 5523 cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff; 5524 cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff; 5525 DPRINTF(("sending crystal calibration %d, %d\n", 5526 cmd.cap_pin[0], cmd.cap_pin[1])); 5527 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 5528 if (error != 0) { 5529 aprint_error_dev(sc->sc_dev, 5530 "crystal calibration failed\n"); 5531 return error; 5532 } 5533 } 5534 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) { 5535 /* Query calibration from the initialization firmware. */ 5536 if ((error = iwn5000_query_calibration(sc)) != 0) { 5537 aprint_error_dev(sc->sc_dev, 5538 "could not query calibration\n"); 5539 return error; 5540 } 5541 /* 5542 * We have the calibration results now, reboot with the 5543 * runtime firmware (call ourselves recursively!) 5544 */ 5545 iwn_hw_stop(sc); 5546 error = iwn_hw_init(sc); 5547 } else { 5548 /* Send calibration results to runtime firmware. */ 5549 error = iwn5000_send_calibration(sc); 5550 } 5551 return error; 5552 } 5553 5554 /* 5555 * The firmware boot code is small and is intended to be copied directly into 5556 * the NIC internal memory (no DMA transfer). 5557 */ 5558 static int 5559 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size) 5560 { 5561 int error, ntries; 5562 5563 size /= sizeof (uint32_t); 5564 5565 if ((error = iwn_nic_lock(sc)) != 0) 5566 return error; 5567 5568 /* Copy microcode image into NIC memory. */ 5569 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE, 5570 (const uint32_t *)ucode, size); 5571 5572 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0); 5573 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE); 5574 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size); 5575 5576 /* Start boot load now. */ 5577 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START); 5578 5579 /* Wait for transfer to complete. */ 5580 for (ntries = 0; ntries < 1000; ntries++) { 5581 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) & 5582 IWN_BSM_WR_CTRL_START)) 5583 break; 5584 DELAY(10); 5585 } 5586 if (ntries == 1000) { 5587 aprint_error_dev(sc->sc_dev, 5588 "could not load boot firmware\n"); 5589 iwn_nic_unlock(sc); 5590 return ETIMEDOUT; 5591 } 5592 5593 /* Enable boot after power up. */ 5594 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN); 5595 5596 iwn_nic_unlock(sc); 5597 return 0; 5598 } 5599 5600 static int 5601 iwn4965_load_firmware(struct iwn_softc *sc) 5602 { 5603 struct iwn_fw_info *fw = &sc->fw; 5604 struct iwn_dma_info *dma = &sc->fw_dma; 5605 int error; 5606 5607 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 5608 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 5609 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, fw->init.datasz, 5610 BUS_DMASYNC_PREWRITE); 5611 memcpy((char *)dma->vaddr + IWN4965_FW_DATA_MAXSZ, 5612 fw->init.text, fw->init.textsz); 5613 bus_dmamap_sync(sc->sc_dmat, dma->map, IWN4965_FW_DATA_MAXSZ, 5614 fw->init.textsz, BUS_DMASYNC_PREWRITE); 5615 5616 /* Tell adapter where to find initialization sections. */ 5617 if ((error = iwn_nic_lock(sc)) != 0) 5618 return error; 5619 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 5620 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz); 5621 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 5622 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 5623 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 5624 iwn_nic_unlock(sc); 5625 5626 /* Load firmware boot code. */ 5627 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 5628 if (error != 0) { 5629 aprint_error_dev(sc->sc_dev, 5630 "could not load boot firmware\n"); 5631 return error; 5632 } 5633 /* Now press "execute". */ 5634 IWN_WRITE(sc, IWN_RESET, 0); 5635 5636 /* Wait at most one second for first alive notification. */ 5637 if ((error = tsleep(sc, PCATCH, "iwninit", hz)) != 0) { 5638 aprint_error_dev(sc->sc_dev, 5639 "timeout waiting for adapter to initialize\n"); 5640 return error; 5641 } 5642 5643 /* Retrieve current temperature for initial TX power calibration. */ 5644 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz; 5645 sc->temp = iwn4965_get_temperature(sc); 5646 5647 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 5648 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 5649 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, fw->main.datasz, 5650 BUS_DMASYNC_PREWRITE); 5651 memcpy((char *)dma->vaddr + IWN4965_FW_DATA_MAXSZ, 5652 fw->main.text, fw->main.textsz); 5653 bus_dmamap_sync(sc->sc_dmat, dma->map, IWN4965_FW_DATA_MAXSZ, 5654 fw->main.textsz, BUS_DMASYNC_PREWRITE); 5655 5656 /* Tell adapter where to find runtime sections. */ 5657 if ((error = iwn_nic_lock(sc)) != 0) 5658 return error; 5659 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 5660 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz); 5661 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 5662 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 5663 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, 5664 IWN_FW_UPDATED | fw->main.textsz); 5665 iwn_nic_unlock(sc); 5666 5667 return 0; 5668 } 5669 5670 static int 5671 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst, 5672 const uint8_t *section, int size) 5673 { 5674 struct iwn_dma_info *dma = &sc->fw_dma; 5675 int error; 5676 5677 /* Copy firmware section into pre-allocated DMA-safe memory. */ 5678 memcpy(dma->vaddr, section, size); 5679 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, size, BUS_DMASYNC_PREWRITE); 5680 5681 if ((error = iwn_nic_lock(sc)) != 0) 5682 return error; 5683 5684 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 5685 IWN_FH_TX_CONFIG_DMA_PAUSE); 5686 5687 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst); 5688 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL), 5689 IWN_LOADDR(dma->paddr)); 5690 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL), 5691 IWN_HIADDR(dma->paddr) << 28 | size); 5692 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL), 5693 IWN_FH_TXBUF_STATUS_TBNUM(1) | 5694 IWN_FH_TXBUF_STATUS_TBIDX(1) | 5695 IWN_FH_TXBUF_STATUS_TFBD_VALID); 5696 5697 /* Kick Flow Handler to start DMA transfer. */ 5698 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 5699 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD); 5700 5701 iwn_nic_unlock(sc); 5702 5703 /* Wait at most five seconds for FH DMA transfer to complete. */ 5704 return tsleep(sc, PCATCH, "iwninit", 5 * hz); 5705 } 5706 5707 static int 5708 iwn5000_load_firmware(struct iwn_softc *sc) 5709 { 5710 struct iwn_fw_part *fw; 5711 int error; 5712 5713 /* Load the initialization firmware on first boot only. */ 5714 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ? 5715 &sc->fw.main : &sc->fw.init; 5716 5717 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE, 5718 fw->text, fw->textsz); 5719 if (error != 0) { 5720 aprint_error_dev(sc->sc_dev, 5721 "could not load firmware %s section\n", ".text"); 5722 return error; 5723 } 5724 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE, 5725 fw->data, fw->datasz); 5726 if (error != 0) { 5727 aprint_error_dev(sc->sc_dev, 5728 "could not load firmware %s section\n", ".data"); 5729 return error; 5730 } 5731 5732 /* Now press "execute". */ 5733 IWN_WRITE(sc, IWN_RESET, 0); 5734 return 0; 5735 } 5736 5737 /* 5738 * Extract text and data sections from a legacy firmware image. 5739 */ 5740 static int 5741 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw) 5742 { 5743 const uint32_t *ptr; 5744 size_t hdrlen = 24; 5745 uint32_t rev; 5746 5747 ptr = (const uint32_t *)fw->data; 5748 rev = le32toh(*ptr++); 5749 5750 sc->ucode_rev = rev; 5751 5752 /* Check firmware API version. */ 5753 if (IWN_FW_API(rev) <= 1) { 5754 aprint_error_dev(sc->sc_dev, 5755 "bad firmware, need API version >=2\n"); 5756 return EINVAL; 5757 } 5758 if (IWN_FW_API(rev) >= 3) { 5759 /* Skip build number (version 2 header). */ 5760 hdrlen += 4; 5761 ptr++; 5762 } 5763 if (fw->size < hdrlen) { 5764 aprint_error_dev(sc->sc_dev, 5765 "firmware too short: %zd bytes\n", fw->size); 5766 return EINVAL; 5767 } 5768 fw->main.textsz = le32toh(*ptr++); 5769 fw->main.datasz = le32toh(*ptr++); 5770 fw->init.textsz = le32toh(*ptr++); 5771 fw->init.datasz = le32toh(*ptr++); 5772 fw->boot.textsz = le32toh(*ptr++); 5773 5774 /* Check that all firmware sections fit. */ 5775 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz + 5776 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 5777 aprint_error_dev(sc->sc_dev, 5778 "firmware too short: %zd bytes\n", fw->size); 5779 return EINVAL; 5780 } 5781 5782 /* Get pointers to firmware sections. */ 5783 fw->main.text = (const uint8_t *)ptr; 5784 fw->main.data = fw->main.text + fw->main.textsz; 5785 fw->init.text = fw->main.data + fw->main.datasz; 5786 fw->init.data = fw->init.text + fw->init.textsz; 5787 fw->boot.text = fw->init.data + fw->init.datasz; 5788 return 0; 5789 } 5790 5791 /* 5792 * Extract text and data sections from a TLV firmware image. 5793 */ 5794 static int 5795 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw, 5796 uint16_t alt) 5797 { 5798 const struct iwn_fw_tlv_hdr *hdr; 5799 const struct iwn_fw_tlv *tlv; 5800 const uint8_t *ptr, *end; 5801 uint64_t altmask; 5802 uint32_t len; 5803 5804 if (fw->size < sizeof (*hdr)) { 5805 aprint_error_dev(sc->sc_dev, 5806 "firmware too short: %zd bytes\n", fw->size); 5807 return EINVAL; 5808 } 5809 hdr = (const struct iwn_fw_tlv_hdr *)fw->data; 5810 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) { 5811 aprint_error_dev(sc->sc_dev, 5812 "bad firmware signature 0x%08x\n", le32toh(hdr->signature)); 5813 return EINVAL; 5814 } 5815 DPRINTF(("FW: \"%.64s\", build 0x%x\n", hdr->descr, 5816 le32toh(hdr->build))); 5817 sc->ucode_rev = le32toh(hdr->rev); 5818 5819 /* 5820 * Select the closest supported alternative that is less than 5821 * or equal to the specified one. 5822 */ 5823 altmask = le64toh(hdr->altmask); 5824 while (alt > 0 && !(altmask & (1ULL << alt))) 5825 alt--; /* Downgrade. */ 5826 DPRINTF(("using alternative %d\n", alt)); 5827 5828 ptr = (const uint8_t *)(hdr + 1); 5829 end = (const uint8_t *)(fw->data + fw->size); 5830 5831 /* Parse type-length-value fields. */ 5832 while (ptr + sizeof (*tlv) <= end) { 5833 tlv = (const struct iwn_fw_tlv *)ptr; 5834 len = le32toh(tlv->len); 5835 5836 ptr += sizeof (*tlv); 5837 if (ptr + len > end) { 5838 aprint_error_dev(sc->sc_dev, 5839 "firmware too short: %zd bytes\n", fw->size); 5840 return EINVAL; 5841 } 5842 /* Skip other alternatives. */ 5843 if (tlv->alt != 0 && tlv->alt != htole16(alt)) 5844 goto next; 5845 5846 switch (le16toh(tlv->type)) { 5847 case IWN_FW_TLV_MAIN_TEXT: 5848 fw->main.text = ptr; 5849 fw->main.textsz = len; 5850 break; 5851 case IWN_FW_TLV_MAIN_DATA: 5852 fw->main.data = ptr; 5853 fw->main.datasz = len; 5854 break; 5855 case IWN_FW_TLV_INIT_TEXT: 5856 fw->init.text = ptr; 5857 fw->init.textsz = len; 5858 break; 5859 case IWN_FW_TLV_INIT_DATA: 5860 fw->init.data = ptr; 5861 fw->init.datasz = len; 5862 break; 5863 case IWN_FW_TLV_BOOT_TEXT: 5864 fw->boot.text = ptr; 5865 fw->boot.textsz = len; 5866 break; 5867 case IWN_FW_TLV_ENH_SENS: 5868 if (len != 0) { 5869 aprint_error_dev(sc->sc_dev, 5870 "TLV type %d has invalid size %u\n", 5871 le16toh(tlv->type), len); 5872 goto next; 5873 } 5874 sc->sc_flags |= IWN_FLAG_ENH_SENS; 5875 break; 5876 case IWN_FW_TLV_PHY_CALIB: 5877 if (len != sizeof(uint32_t)) { 5878 aprint_error_dev(sc->sc_dev, 5879 "TLV type %d has invalid size %u\n", 5880 le16toh(tlv->type), len); 5881 goto next; 5882 } 5883 if (le32toh(*ptr) <= IWN5000_PHY_CALIB_MAX) { 5884 sc->reset_noise_gain = le32toh(*ptr); 5885 sc->noise_gain = le32toh(*ptr) + 1; 5886 } 5887 break; 5888 case IWN_FW_TLV_FLAGS: 5889 if (len < sizeof(uint32_t)) 5890 break; 5891 if (len % sizeof(uint32_t)) 5892 break; 5893 sc->tlv_feature_flags = le32toh(*ptr); 5894 DPRINTF(("feature: 0x%08x\n", sc->tlv_feature_flags)); 5895 break; 5896 default: 5897 DPRINTF(("TLV type %d not handled\n", 5898 le16toh(tlv->type))); 5899 break; 5900 } 5901 next: /* TLV fields are 32-bit aligned. */ 5902 ptr += (len + 3) & ~3; 5903 } 5904 return 0; 5905 } 5906 5907 static int 5908 iwn_read_firmware(struct iwn_softc *sc) 5909 { 5910 struct iwn_fw_info *fw = &sc->fw; 5911 firmware_handle_t fwh; 5912 int error; 5913 5914 /* 5915 * Some PHY calibration commands are firmware-dependent; these 5916 * are the default values that will be overridden if 5917 * necessary. 5918 */ 5919 sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN; 5920 sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN; 5921 5922 /* Initialize for error returns */ 5923 fw->data = NULL; 5924 fw->size = 0; 5925 5926 /* Open firmware image. */ 5927 if ((error = firmware_open("if_iwn", sc->fwname, &fwh)) != 0) { 5928 aprint_error_dev(sc->sc_dev, 5929 "could not get firmware handle %s\n", sc->fwname); 5930 return error; 5931 } 5932 fw->size = firmware_get_size(fwh); 5933 if (fw->size < sizeof (uint32_t)) { 5934 aprint_error_dev(sc->sc_dev, 5935 "firmware too short: %zd bytes\n", fw->size); 5936 firmware_close(fwh); 5937 return EINVAL; 5938 } 5939 5940 /* Read the firmware. */ 5941 fw->data = firmware_malloc(fw->size); 5942 if (fw->data == NULL) { 5943 aprint_error_dev(sc->sc_dev, 5944 "not enough memory to stock firmware %s\n", sc->fwname); 5945 firmware_close(fwh); 5946 return ENOMEM; 5947 } 5948 error = firmware_read(fwh, 0, fw->data, fw->size); 5949 firmware_close(fwh); 5950 if (error != 0) { 5951 aprint_error_dev(sc->sc_dev, 5952 "could not read firmware %s\n", sc->fwname); 5953 goto out; 5954 } 5955 5956 /* Retrieve text and data sections. */ 5957 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */ 5958 error = iwn_read_firmware_leg(sc, fw); 5959 else 5960 error = iwn_read_firmware_tlv(sc, fw, 1); 5961 if (error != 0) { 5962 aprint_error_dev(sc->sc_dev, 5963 "could not read firmware sections\n"); 5964 goto out; 5965 } 5966 5967 /* Make sure text and data sections fit in hardware memory. */ 5968 if (fw->main.textsz > sc->fw_text_maxsz || 5969 fw->main.datasz > sc->fw_data_maxsz || 5970 fw->init.textsz > sc->fw_text_maxsz || 5971 fw->init.datasz > sc->fw_data_maxsz || 5972 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ || 5973 (fw->boot.textsz & 3) != 0) { 5974 aprint_error_dev(sc->sc_dev, 5975 "firmware sections too large\n"); 5976 goto out; 5977 } 5978 5979 /* We can proceed with loading the firmware. */ 5980 return 0; 5981 out: 5982 firmware_free(fw->data, fw->size); 5983 fw->data = NULL; 5984 fw->size = 0; 5985 return error ? error : EINVAL; 5986 } 5987 5988 static int 5989 iwn_clock_wait(struct iwn_softc *sc) 5990 { 5991 int ntries; 5992 5993 /* Set "initialization complete" bit. */ 5994 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 5995 5996 /* Wait for clock stabilization. */ 5997 for (ntries = 0; ntries < 2500; ntries++) { 5998 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY) 5999 return 0; 6000 DELAY(10); 6001 } 6002 aprint_error_dev(sc->sc_dev, 6003 "timeout waiting for clock stabilization\n"); 6004 return ETIMEDOUT; 6005 } 6006 6007 static int 6008 iwn_apm_init(struct iwn_softc *sc) 6009 { 6010 pcireg_t reg; 6011 int error; 6012 6013 /* Disable L0s exit timer (NMI bug workaround). */ 6014 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER); 6015 /* Don't wait for ICH L0s (ICH bug workaround). */ 6016 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX); 6017 6018 /* Set FH wait threshold to max (HW bug under stress workaround). */ 6019 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000); 6020 6021 /* Enable HAP INTA to move adapter from L1a to L0s. */ 6022 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A); 6023 6024 /* Retrieve PCIe Active State Power Management (ASPM). */ 6025 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 6026 sc->sc_cap_off + PCIE_LCSR); 6027 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 6028 if (reg & PCIE_LCSR_ASPM_L1) /* L1 Entry enabled. */ 6029 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 6030 else 6031 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 6032 6033 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 6034 sc->hw_type <= IWN_HW_REV_TYPE_1000) 6035 IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT); 6036 6037 /* Wait for clock stabilization before accessing prph. */ 6038 if ((error = iwn_clock_wait(sc)) != 0) 6039 return error; 6040 6041 if ((error = iwn_nic_lock(sc)) != 0) 6042 return error; 6043 if (sc->hw_type == IWN_HW_REV_TYPE_4965) { 6044 /* Enable DMA and BSM (Bootstrap State Machine). */ 6045 iwn_prph_write(sc, IWN_APMG_CLK_EN, 6046 IWN_APMG_CLK_CTRL_DMA_CLK_RQT | 6047 IWN_APMG_CLK_CTRL_BSM_CLK_RQT); 6048 } else { 6049 /* Enable DMA. */ 6050 iwn_prph_write(sc, IWN_APMG_CLK_EN, 6051 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 6052 } 6053 DELAY(20); 6054 /* Disable L1-Active. */ 6055 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS); 6056 iwn_nic_unlock(sc); 6057 6058 return 0; 6059 } 6060 6061 static void 6062 iwn_apm_stop_master(struct iwn_softc *sc) 6063 { 6064 int ntries; 6065 6066 /* Stop busmaster DMA activity. */ 6067 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER); 6068 for (ntries = 0; ntries < 100; ntries++) { 6069 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED) 6070 return; 6071 DELAY(10); 6072 } 6073 aprint_error_dev(sc->sc_dev, "timeout waiting for master\n"); 6074 } 6075 6076 static void 6077 iwn_apm_stop(struct iwn_softc *sc) 6078 { 6079 iwn_apm_stop_master(sc); 6080 6081 /* Reset the entire device. */ 6082 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW); 6083 DELAY(10); 6084 /* Clear "initialization complete" bit. */ 6085 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 6086 } 6087 6088 static int 6089 iwn4965_nic_config(struct iwn_softc *sc) 6090 { 6091 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) { 6092 /* 6093 * I don't believe this to be correct but this is what the 6094 * vendor driver is doing. Probably the bits should not be 6095 * shifted in IWN_RFCFG_*. 6096 */ 6097 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6098 IWN_RFCFG_TYPE(sc->rfcfg) | 6099 IWN_RFCFG_STEP(sc->rfcfg) | 6100 IWN_RFCFG_DASH(sc->rfcfg)); 6101 } 6102 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6103 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 6104 return 0; 6105 } 6106 6107 static int 6108 iwn5000_nic_config(struct iwn_softc *sc) 6109 { 6110 uint32_t tmp; 6111 int error; 6112 6113 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) { 6114 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6115 IWN_RFCFG_TYPE(sc->rfcfg) | 6116 IWN_RFCFG_STEP(sc->rfcfg) | 6117 IWN_RFCFG_DASH(sc->rfcfg)); 6118 } 6119 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6120 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 6121 6122 if ((error = iwn_nic_lock(sc)) != 0) 6123 return error; 6124 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS); 6125 6126 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 6127 /* 6128 * Select first Switching Voltage Regulator (1.32V) to 6129 * solve a stability issue related to noisy DC2DC line 6130 * in the silicon of 1000 Series. 6131 */ 6132 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR); 6133 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK; 6134 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32; 6135 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp); 6136 } 6137 iwn_nic_unlock(sc); 6138 6139 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) { 6140 /* Use internal power amplifier only. */ 6141 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA); 6142 } 6143 if ((sc->hw_type == IWN_HW_REV_TYPE_6050 || 6144 sc->hw_type == IWN_HW_REV_TYPE_6005) && sc->calib_ver >= 6) { 6145 /* Indicate that ROM calibration version is >=6. */ 6146 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6); 6147 } 6148 if (sc->hw_type == IWN_HW_REV_TYPE_6005) 6149 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_6050_1X2); 6150 if (sc->hw_type == IWN_HW_REV_TYPE_2030 || 6151 sc->hw_type == IWN_HW_REV_TYPE_2000 || 6152 sc->hw_type == IWN_HW_REV_TYPE_135 || 6153 sc->hw_type == IWN_HW_REV_TYPE_105) 6154 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_IQ_INVERT); 6155 return 0; 6156 } 6157 6158 /* 6159 * Take NIC ownership over Intel Active Management Technology (AMT). 6160 */ 6161 static int 6162 iwn_hw_prepare(struct iwn_softc *sc) 6163 { 6164 int ntries; 6165 6166 /* Check if hardware is ready. */ 6167 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 6168 for (ntries = 0; ntries < 5; ntries++) { 6169 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 6170 IWN_HW_IF_CONFIG_NIC_READY) 6171 return 0; 6172 DELAY(10); 6173 } 6174 6175 /* Hardware not ready, force into ready state. */ 6176 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE); 6177 for (ntries = 0; ntries < 15000; ntries++) { 6178 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) & 6179 IWN_HW_IF_CONFIG_PREPARE_DONE)) 6180 break; 6181 DELAY(10); 6182 } 6183 if (ntries == 15000) 6184 return ETIMEDOUT; 6185 6186 /* Hardware should be ready now. */ 6187 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 6188 for (ntries = 0; ntries < 5; ntries++) { 6189 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 6190 IWN_HW_IF_CONFIG_NIC_READY) 6191 return 0; 6192 DELAY(10); 6193 } 6194 return ETIMEDOUT; 6195 } 6196 6197 static int 6198 iwn_hw_init(struct iwn_softc *sc) 6199 { 6200 struct iwn_ops *ops = &sc->ops; 6201 int error, chnl, qid; 6202 6203 /* Clear pending interrupts. */ 6204 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6205 6206 if ((error = iwn_apm_init(sc)) != 0) { 6207 aprint_error_dev(sc->sc_dev, 6208 "could not power ON adapter\n"); 6209 return error; 6210 } 6211 6212 /* Select VMAIN power source. */ 6213 if ((error = iwn_nic_lock(sc)) != 0) 6214 return error; 6215 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK); 6216 iwn_nic_unlock(sc); 6217 6218 /* Perform adapter-specific initialization. */ 6219 if ((error = ops->nic_config(sc)) != 0) 6220 return error; 6221 6222 /* Initialize RX ring. */ 6223 if ((error = iwn_nic_lock(sc)) != 0) 6224 return error; 6225 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 6226 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0); 6227 /* Set physical address of RX ring (256-byte aligned). */ 6228 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8); 6229 /* Set physical address of RX status (16-byte aligned). */ 6230 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4); 6231 /* Enable RX. */ 6232 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 6233 IWN_FH_RX_CONFIG_ENA | 6234 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */ 6235 IWN_FH_RX_CONFIG_IRQ_DST_HOST | 6236 IWN_FH_RX_CONFIG_SINGLE_FRAME | 6237 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) | 6238 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG)); 6239 iwn_nic_unlock(sc); 6240 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7); 6241 6242 if ((error = iwn_nic_lock(sc)) != 0) 6243 return error; 6244 6245 /* Initialize TX scheduler. */ 6246 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 6247 6248 /* Set physical address of "keep warm" page (16-byte aligned). */ 6249 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4); 6250 6251 /* Initialize TX rings. */ 6252 for (qid = 0; qid < sc->ntxqs; qid++) { 6253 struct iwn_tx_ring *txq = &sc->txq[qid]; 6254 6255 /* Set physical address of TX ring (256-byte aligned). */ 6256 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid), 6257 txq->desc_dma.paddr >> 8); 6258 } 6259 iwn_nic_unlock(sc); 6260 6261 /* Enable DMA channels. */ 6262 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 6263 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 6264 IWN_FH_TX_CONFIG_DMA_ENA | 6265 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA); 6266 } 6267 6268 /* Clear "radio off" and "commands blocked" bits. */ 6269 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 6270 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED); 6271 6272 /* Clear pending interrupts. */ 6273 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6274 /* Enable interrupt coalescing. */ 6275 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8); 6276 /* Enable interrupts. */ 6277 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 6278 6279 /* _Really_ make sure "radio off" bit is cleared! */ 6280 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 6281 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 6282 6283 /* Enable shadow registers. */ 6284 if (sc->hw_type >= IWN_HW_REV_TYPE_6000) 6285 IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff); 6286 6287 if ((error = ops->load_firmware(sc)) != 0) { 6288 aprint_error_dev(sc->sc_dev, 6289 "could not load firmware\n"); 6290 return error; 6291 } 6292 /* Wait at most one second for firmware alive notification. */ 6293 if ((error = tsleep(sc, PCATCH, "iwninit", hz)) != 0) { 6294 aprint_error_dev(sc->sc_dev, 6295 "timeout waiting for adapter to initialize\n"); 6296 return error; 6297 } 6298 /* Do post-firmware initialization. */ 6299 return ops->post_alive(sc); 6300 } 6301 6302 static void 6303 iwn_hw_stop(struct iwn_softc *sc) 6304 { 6305 int chnl, qid, ntries; 6306 6307 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO); 6308 6309 /* Disable interrupts. */ 6310 IWN_WRITE(sc, IWN_INT_MASK, 0); 6311 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6312 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff); 6313 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 6314 6315 /* Make sure we no longer hold the NIC lock. */ 6316 iwn_nic_unlock(sc); 6317 6318 /* Stop TX scheduler. */ 6319 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 6320 6321 /* Stop all DMA channels. */ 6322 if (iwn_nic_lock(sc) == 0) { 6323 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 6324 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0); 6325 for (ntries = 0; ntries < 200; ntries++) { 6326 if (IWN_READ(sc, IWN_FH_TX_STATUS) & 6327 IWN_FH_TX_STATUS_IDLE(chnl)) 6328 break; 6329 DELAY(10); 6330 } 6331 } 6332 iwn_nic_unlock(sc); 6333 } 6334 6335 /* Stop RX ring. */ 6336 iwn_reset_rx_ring(sc, &sc->rxq); 6337 6338 /* Reset all TX rings. */ 6339 for (qid = 0; qid < sc->ntxqs; qid++) 6340 iwn_reset_tx_ring(sc, &sc->txq[qid]); 6341 6342 if (iwn_nic_lock(sc) == 0) { 6343 iwn_prph_write(sc, IWN_APMG_CLK_DIS, 6344 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 6345 iwn_nic_unlock(sc); 6346 } 6347 DELAY(5); 6348 /* Power OFF adapter. */ 6349 iwn_apm_stop(sc); 6350 } 6351 6352 static int 6353 iwn_init(struct ifnet *ifp) 6354 { 6355 struct iwn_softc *sc = ifp->if_softc; 6356 struct ieee80211com *ic = &sc->sc_ic; 6357 int error; 6358 6359 mutex_enter(&sc->sc_mtx); 6360 if (sc->sc_flags & IWN_FLAG_HW_INITED) 6361 goto out; 6362 if ((error = iwn_hw_prepare(sc)) != 0) { 6363 aprint_error_dev(sc->sc_dev, 6364 "hardware not ready\n"); 6365 goto fail; 6366 } 6367 6368 /* Check that the radio is not disabled by hardware switch. */ 6369 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) { 6370 aprint_error_dev(sc->sc_dev, 6371 "radio is disabled by hardware switch\n"); 6372 error = EPERM; /* :-) */ 6373 goto fail; 6374 } 6375 6376 /* Read firmware images from the filesystem. */ 6377 if ((error = iwn_read_firmware(sc)) != 0) { 6378 aprint_error_dev(sc->sc_dev, 6379 "could not read firmware\n"); 6380 goto fail; 6381 } 6382 6383 /* Initialize interrupt mask to default value. */ 6384 sc->int_mask = IWN_INT_MASK_DEF; 6385 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 6386 6387 /* Initialize hardware and upload firmware. */ 6388 KASSERT(sc->fw.data != NULL && sc->fw.size > 0); 6389 error = iwn_hw_init(sc); 6390 firmware_free(sc->fw.data, sc->fw.size); 6391 sc->fw.data = NULL; 6392 sc->fw.size = 0; 6393 if (error != 0) { 6394 aprint_error_dev(sc->sc_dev, 6395 "could not initialize hardware\n"); 6396 goto fail; 6397 } 6398 6399 /* Configure adapter now that it is ready. */ 6400 if ((error = iwn_config(sc)) != 0) { 6401 aprint_error_dev(sc->sc_dev, 6402 "could not configure device\n"); 6403 goto fail; 6404 } 6405 6406 sc->sc_beacon_wait = 0; 6407 6408 ifp->if_flags &= ~IFF_OACTIVE; 6409 ifp->if_flags |= IFF_RUNNING; 6410 6411 if (ic->ic_opmode != IEEE80211_M_MONITOR) 6412 ieee80211_begin_scan(ic, 0); 6413 else 6414 ieee80211_new_state(ic, IEEE80211_S_RUN, -1); 6415 6416 sc->sc_flags |= IWN_FLAG_HW_INITED; 6417 out: 6418 mutex_exit(&sc->sc_mtx); 6419 return 0; 6420 6421 fail: mutex_exit(&sc->sc_mtx); 6422 iwn_stop(ifp, 1); 6423 return error; 6424 } 6425 6426 static void 6427 iwn_stop(struct ifnet *ifp, int disable) 6428 { 6429 struct iwn_softc *sc = ifp->if_softc; 6430 struct ieee80211com *ic = &sc->sc_ic; 6431 6432 if (!disable) 6433 mutex_enter(&sc->sc_mtx); 6434 sc->sc_flags &= ~IWN_FLAG_HW_INITED; 6435 ifp->if_timer = sc->sc_tx_timer = 0; 6436 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 6437 6438 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 6439 6440 /* Power OFF hardware. */ 6441 iwn_hw_stop(sc); 6442 6443 if (!disable) 6444 mutex_exit(&sc->sc_mtx); 6445 } 6446 6447 /* 6448 * XXX MCLGETI alternative 6449 * 6450 * With IWN_USE_RBUF defined it uses the rbuf cache for receive buffers 6451 * as long as there are available free buffers then it uses MEXTMALLOC., 6452 * Without IWN_USE_RBUF defined it uses MEXTMALLOC exclusively. 6453 * The MCLGET4K code is used for testing an alternative mbuf cache. 6454 */ 6455 6456 static struct mbuf * 6457 MCLGETIalt(struct iwn_softc *sc, int how, 6458 struct ifnet *ifp __unused, u_int size) 6459 { 6460 struct mbuf *m; 6461 #ifdef IWN_USE_RBUF 6462 struct iwn_rbuf *rbuf; 6463 #endif 6464 6465 MGETHDR(m, how, MT_DATA); 6466 if (m == NULL) 6467 return NULL; 6468 6469 #ifdef IWN_USE_RBUF 6470 if (sc->rxq.nb_free_entries > 0 && 6471 (rbuf = iwn_alloc_rbuf(sc)) != NULL) { 6472 /* Attach buffer to mbuf header. */ 6473 MEXTADD(m, rbuf->vaddr, size, 0, iwn_free_rbuf, rbuf); 6474 m->m_flags |= M_EXT_RW; 6475 } 6476 else { 6477 MEXTMALLOC(m, size, how); 6478 if ((m->m_flags & M_EXT) == 0) { 6479 m_freem(m); 6480 return NULL; 6481 } 6482 } 6483 6484 #else 6485 #ifdef MCLGET4K 6486 if (size == 4096) 6487 MCLGET4K(m, how); 6488 else 6489 panic("size must be 4k"); 6490 #else 6491 MEXTMALLOC(m, size, how); 6492 #endif 6493 if ((m->m_flags & M_EXT) == 0) { 6494 m_freem(m); 6495 return NULL; 6496 } 6497 #endif 6498 6499 return m; 6500 } 6501 6502 #ifdef IWN_USE_RBUF 6503 static struct iwn_rbuf * 6504 iwn_alloc_rbuf(struct iwn_softc *sc) 6505 { 6506 struct iwn_rbuf *rbuf; 6507 mutex_enter(&sc->rxq.freelist_mtx); 6508 6509 rbuf = SLIST_FIRST(&sc->rxq.freelist); 6510 if (rbuf != NULL) { 6511 SLIST_REMOVE_HEAD(&sc->rxq.freelist, next); 6512 sc->rxq.nb_free_entries --; 6513 } 6514 mutex_exit(&sc->rxq.freelist_mtx); 6515 return rbuf; 6516 } 6517 6518 /* 6519 * This is called automatically by the network stack when the mbuf to which 6520 * our RX buffer is attached is freed. 6521 */ 6522 static void 6523 iwn_free_rbuf(struct mbuf* m, void *buf, size_t size, void *arg) 6524 { 6525 struct iwn_rbuf *rbuf = arg; 6526 struct iwn_softc *sc = rbuf->sc; 6527 6528 /* Put the RX buffer back in the free list. */ 6529 mutex_enter(&sc->rxq.freelist_mtx); 6530 SLIST_INSERT_HEAD(&sc->rxq.freelist, rbuf, next); 6531 mutex_exit(&sc->rxq.freelist_mtx); 6532 6533 sc->rxq.nb_free_entries ++; 6534 if (__predict_true(m != NULL)) 6535 pool_cache_put(mb_cache, m); 6536 } 6537 6538 static int 6539 iwn_alloc_rpool(struct iwn_softc *sc) 6540 { 6541 struct iwn_rx_ring *ring = &sc->rxq; 6542 struct iwn_rbuf *rbuf; 6543 int i, error; 6544 6545 mutex_init(&ring->freelist_mtx, MUTEX_DEFAULT, IPL_NET); 6546 6547 /* Allocate a big chunk of DMA'able memory... */ 6548 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->buf_dma, NULL, 6549 IWN_RBUF_COUNT * IWN_RBUF_SIZE, PAGE_SIZE); 6550 if (error != 0) { 6551 aprint_error_dev(sc->sc_dev, 6552 "could not allocate RX buffers DMA memory\n"); 6553 return error; 6554 } 6555 /* ...and split it into chunks of IWN_RBUF_SIZE bytes. */ 6556 SLIST_INIT(&ring->freelist); 6557 for (i = 0; i < IWN_RBUF_COUNT; i++) { 6558 rbuf = &ring->rbuf[i]; 6559 6560 rbuf->sc = sc; /* Backpointer for callbacks. */ 6561 rbuf->vaddr = (void *)((vaddr_t)ring->buf_dma.vaddr + i * IWN_RBUF_SIZE); 6562 rbuf->paddr = ring->buf_dma.paddr + i * IWN_RBUF_SIZE; 6563 6564 SLIST_INSERT_HEAD(&ring->freelist, rbuf, next); 6565 } 6566 ring->nb_free_entries = IWN_RBUF_COUNT; 6567 return 0; 6568 } 6569 6570 static void 6571 iwn_free_rpool(struct iwn_softc *sc) 6572 { 6573 iwn_dma_contig_free(&sc->rxq.buf_dma); 6574 } 6575 #endif 6576 6577 /* 6578 * XXX: Hack to set the current channel to the value advertised in beacons or 6579 * probe responses. Only used during AP detection. 6580 * XXX: Duplicated from if_iwi.c 6581 */ 6582 static void 6583 iwn_fix_channel(struct ieee80211com *ic, struct mbuf *m, 6584 struct iwn_rx_stat *stat) 6585 { 6586 struct iwn_softc *sc = ic->ic_ifp->if_softc; 6587 struct ieee80211_frame *wh; 6588 uint8_t subtype; 6589 uint8_t *frm, *efrm; 6590 6591 wh = mtod(m, struct ieee80211_frame *); 6592 6593 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT) 6594 return; 6595 6596 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 6597 6598 if (subtype != IEEE80211_FC0_SUBTYPE_BEACON && 6599 subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP) 6600 return; 6601 6602 if (sc->sc_flags & IWN_FLAG_SCANNING_5GHZ) { 6603 int chan = le16toh(stat->chan); 6604 if (chan < __arraycount(ic->ic_channels)) 6605 ic->ic_curchan = &ic->ic_channels[chan]; 6606 return; 6607 } 6608 6609 frm = (uint8_t *)(wh + 1); 6610 efrm = mtod(m, uint8_t *) + m->m_len; 6611 6612 frm += 12; /* skip tstamp, bintval and capinfo fields */ 6613 while (frm + 2 < efrm) { 6614 if (*frm == IEEE80211_ELEMID_DSPARMS) { 6615 #if IEEE80211_CHAN_MAX < 255 6616 if (frm[2] <= IEEE80211_CHAN_MAX) 6617 #endif 6618 ic->ic_curchan = &ic->ic_channels[frm[2]]; 6619 } 6620 6621 frm += frm[1] + 2; 6622 } 6623 } 6624 6625 #ifdef notyetMODULE 6626 6627 MODULE(MODULE_CLASS_DRIVER, if_iwn, "pci"); 6628 6629 #ifdef _MODULE 6630 #include "ioconf.c" 6631 #endif 6632 6633 static int 6634 if_iwn_modcmd(modcmd_t cmd, void *data) 6635 { 6636 int error = 0; 6637 6638 switch (cmd) { 6639 case MODULE_CMD_INIT: 6640 #ifdef _MODULE 6641 error = config_init_component(cfdriver_ioconf_if_iwn, 6642 cfattach_ioconf_if_iwn, cfdata_ioconf_if_iwn); 6643 #endif 6644 return error; 6645 case MODULE_CMD_FINI: 6646 #ifdef _MODULE 6647 error = config_fini_component(cfdriver_ioconf_if_iwn, 6648 cfattach_ioconf_if_iwn, cfdata_ioconf_if_iwn); 6649 #endif 6650 return error; 6651 case MODULE_CMD_AUTOUNLOAD: 6652 #ifdef _MODULE 6653 /* XXX This is not optional! */ 6654 #endif 6655 return error; 6656 default: 6657 return ENOTTY; 6658 } 6659 } 6660 #endif 6661