1 /* $NetBSD: if_iwn.c,v 1.90 2018/06/26 06:48:01 msaitoh Exp $ */ 2 /* $OpenBSD: if_iwn.c,v 1.135 2014/09/10 07:22:09 dcoppa Exp $ */ 3 4 /*- 5 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /* 21 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network 22 * adapters. 23 */ 24 #include <sys/cdefs.h> 25 __KERNEL_RCSID(0, "$NetBSD: if_iwn.c,v 1.90 2018/06/26 06:48:01 msaitoh Exp $"); 26 27 #define IWN_USE_RBUF /* Use local storage for RX */ 28 #undef IWN_HWCRYPTO /* XXX does not even compile yet */ 29 30 #include <sys/param.h> 31 #include <sys/sockio.h> 32 #include <sys/proc.h> 33 #include <sys/mbuf.h> 34 #include <sys/kernel.h> 35 #include <sys/socket.h> 36 #include <sys/systm.h> 37 #include <sys/malloc.h> 38 #ifdef notyetMODULE 39 #include <sys/module.h> 40 #endif 41 #include <sys/mutex.h> 42 #include <sys/conf.h> 43 #include <sys/kauth.h> 44 #include <sys/callout.h> 45 46 #include <dev/sysmon/sysmonvar.h> 47 48 #include <sys/bus.h> 49 #include <machine/endian.h> 50 #include <sys/intr.h> 51 52 #include <dev/pci/pcireg.h> 53 #include <dev/pci/pcivar.h> 54 #include <dev/pci/pcidevs.h> 55 56 #include <net/bpf.h> 57 #include <net/if.h> 58 #include <net/if_arp.h> 59 #include <net/if_dl.h> 60 #include <net/if_media.h> 61 #include <net/if_types.h> 62 63 #include <netinet/in.h> 64 #include <netinet/in_systm.h> 65 #include <netinet/in_var.h> 66 #include <net/if_ether.h> 67 #include <netinet/ip.h> 68 69 #include <net80211/ieee80211_var.h> 70 #include <net80211/ieee80211_amrr.h> 71 #include <net80211/ieee80211_radiotap.h> 72 73 #include <dev/firmload.h> 74 75 #include <dev/pci/if_iwnreg.h> 76 #include <dev/pci/if_iwnvar.h> 77 78 static const pci_product_id_t iwn_devices[] = { 79 PCI_PRODUCT_INTEL_WIFI_LINK_1030_1, 80 PCI_PRODUCT_INTEL_WIFI_LINK_1030_2, 81 PCI_PRODUCT_INTEL_WIFI_LINK_4965_1, 82 PCI_PRODUCT_INTEL_WIFI_LINK_4965_2, 83 PCI_PRODUCT_INTEL_WIFI_LINK_4965_3, 84 PCI_PRODUCT_INTEL_WIFI_LINK_4965_4, 85 PCI_PRODUCT_INTEL_WIFI_LINK_5100_1, 86 PCI_PRODUCT_INTEL_WIFI_LINK_5100_2, 87 PCI_PRODUCT_INTEL_WIFI_LINK_5150_1, 88 PCI_PRODUCT_INTEL_WIFI_LINK_5150_2, 89 PCI_PRODUCT_INTEL_WIFI_LINK_5300_1, 90 PCI_PRODUCT_INTEL_WIFI_LINK_5300_2, 91 PCI_PRODUCT_INTEL_WIFI_LINK_5350_1, 92 PCI_PRODUCT_INTEL_WIFI_LINK_5350_2, 93 PCI_PRODUCT_INTEL_WIFI_LINK_1000_1, 94 PCI_PRODUCT_INTEL_WIFI_LINK_1000_2, 95 PCI_PRODUCT_INTEL_WIFI_LINK_6000_3X3_1, 96 PCI_PRODUCT_INTEL_WIFI_LINK_6000_3X3_2, 97 PCI_PRODUCT_INTEL_WIFI_LINK_6000_IPA_1, 98 PCI_PRODUCT_INTEL_WIFI_LINK_6000_IPA_2, 99 PCI_PRODUCT_INTEL_WIFI_LINK_6050_2X2_1, 100 PCI_PRODUCT_INTEL_WIFI_LINK_6050_2X2_2, 101 PCI_PRODUCT_INTEL_WIFI_LINK_6005_2X2_1, 102 PCI_PRODUCT_INTEL_WIFI_LINK_6005_2X2_2, 103 PCI_PRODUCT_INTEL_WIFI_LINK_6230_1, 104 PCI_PRODUCT_INTEL_WIFI_LINK_6230_2, 105 PCI_PRODUCT_INTEL_WIFI_LINK_6235, 106 PCI_PRODUCT_INTEL_WIFI_LINK_6235_2, 107 PCI_PRODUCT_INTEL_WIFI_LINK_100_1, 108 PCI_PRODUCT_INTEL_WIFI_LINK_100_2, 109 PCI_PRODUCT_INTEL_WIFI_LINK_130_1, 110 PCI_PRODUCT_INTEL_WIFI_LINK_130_2, 111 PCI_PRODUCT_INTEL_WIFI_LINK_2230_1, 112 PCI_PRODUCT_INTEL_WIFI_LINK_2230_2, 113 PCI_PRODUCT_INTEL_WIFI_LINK_2200_1, 114 PCI_PRODUCT_INTEL_WIFI_LINK_2200_2, 115 PCI_PRODUCT_INTEL_WIFI_LINK_135_1, 116 PCI_PRODUCT_INTEL_WIFI_LINK_135_2, 117 PCI_PRODUCT_INTEL_WIFI_LINK_105_1, 118 PCI_PRODUCT_INTEL_WIFI_LINK_105_2, 119 }; 120 121 static int iwn_match(device_t , struct cfdata *, void *); 122 static void iwn_attach(device_t , device_t , void *); 123 static int iwn4965_attach(struct iwn_softc *, pci_product_id_t); 124 static int iwn5000_attach(struct iwn_softc *, pci_product_id_t); 125 static void iwn_radiotap_attach(struct iwn_softc *); 126 static int iwn_detach(device_t , int); 127 #if 0 128 static void iwn_power(int, void *); 129 #endif 130 static bool iwn_resume(device_t, const pmf_qual_t *); 131 static int iwn_nic_lock(struct iwn_softc *); 132 static int iwn_eeprom_lock(struct iwn_softc *); 133 static int iwn_init_otprom(struct iwn_softc *); 134 static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int); 135 static int iwn_dma_contig_alloc(bus_dma_tag_t, struct iwn_dma_info *, 136 void **, bus_size_t, bus_size_t); 137 static void iwn_dma_contig_free(struct iwn_dma_info *); 138 static int iwn_alloc_sched(struct iwn_softc *); 139 static void iwn_free_sched(struct iwn_softc *); 140 static int iwn_alloc_kw(struct iwn_softc *); 141 static void iwn_free_kw(struct iwn_softc *); 142 static int iwn_alloc_ict(struct iwn_softc *); 143 static void iwn_free_ict(struct iwn_softc *); 144 static int iwn_alloc_fwmem(struct iwn_softc *); 145 static void iwn_free_fwmem(struct iwn_softc *); 146 static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 147 static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 148 static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 149 static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *, 150 int); 151 static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 152 static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 153 static void iwn5000_ict_reset(struct iwn_softc *); 154 static int iwn_read_eeprom(struct iwn_softc *); 155 static void iwn4965_read_eeprom(struct iwn_softc *); 156 157 #ifdef IWN_DEBUG 158 static void iwn4965_print_power_group(struct iwn_softc *, int); 159 #endif 160 static void iwn5000_read_eeprom(struct iwn_softc *); 161 static void iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t); 162 static void iwn_read_eeprom_enhinfo(struct iwn_softc *); 163 static struct ieee80211_node *iwn_node_alloc(struct ieee80211_node_table *); 164 static void iwn_newassoc(struct ieee80211_node *, int); 165 static int iwn_media_change(struct ifnet *); 166 static int iwn_newstate(struct ieee80211com *, enum ieee80211_state, int); 167 static void iwn_iter_func(void *, struct ieee80211_node *); 168 static void iwn_calib_timeout(void *); 169 static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *, 170 struct iwn_rx_data *); 171 static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *, 172 struct iwn_rx_data *); 173 #ifndef IEEE80211_NO_HT 174 static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *, 175 struct iwn_rx_data *); 176 #endif 177 static void iwn5000_rx_calib_results(struct iwn_softc *, 178 struct iwn_rx_desc *, struct iwn_rx_data *); 179 static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *, 180 struct iwn_rx_data *); 181 static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 182 struct iwn_rx_data *); 183 static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 184 struct iwn_rx_data *); 185 static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int, 186 uint8_t); 187 static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *); 188 static void iwn_notif_intr(struct iwn_softc *); 189 static void iwn_wakeup_intr(struct iwn_softc *); 190 static void iwn_fatal_intr(struct iwn_softc *); 191 static int iwn_intr(void *); 192 static void iwn_softintr(void *); 193 static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t, 194 uint16_t); 195 static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t, 196 uint16_t); 197 #ifdef notyet 198 static void iwn5000_reset_sched(struct iwn_softc *, int, int); 199 #endif 200 static int iwn_tx(struct iwn_softc *, struct mbuf *, 201 struct ieee80211_node *, int); 202 static void iwn_start(struct ifnet *); 203 static void iwn_watchdog(struct ifnet *); 204 static int iwn_ioctl(struct ifnet *, u_long, void *); 205 static int iwn_cmd(struct iwn_softc *, int, const void *, int, int); 206 static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *, 207 int); 208 static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *, 209 int); 210 static int iwn_set_link_quality(struct iwn_softc *, 211 struct ieee80211_node *); 212 static int iwn_add_broadcast_node(struct iwn_softc *, int); 213 static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t); 214 static int iwn_set_critical_temp(struct iwn_softc *); 215 static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *); 216 static void iwn4965_power_calibration(struct iwn_softc *, int); 217 static int iwn4965_set_txpower(struct iwn_softc *, int); 218 static int iwn5000_set_txpower(struct iwn_softc *, int); 219 static int iwn4965_get_rssi(const struct iwn_rx_stat *); 220 static int iwn5000_get_rssi(const struct iwn_rx_stat *); 221 static int iwn_get_noise(const struct iwn_rx_general_stats *); 222 static int iwn4965_get_temperature(struct iwn_softc *); 223 static int iwn5000_get_temperature(struct iwn_softc *); 224 static int iwn_init_sensitivity(struct iwn_softc *); 225 static void iwn_collect_noise(struct iwn_softc *, 226 const struct iwn_rx_general_stats *); 227 static int iwn4965_init_gains(struct iwn_softc *); 228 static int iwn5000_init_gains(struct iwn_softc *); 229 static int iwn4965_set_gains(struct iwn_softc *); 230 static int iwn5000_set_gains(struct iwn_softc *); 231 static void iwn_tune_sensitivity(struct iwn_softc *, 232 const struct iwn_rx_stats *); 233 static int iwn_send_sensitivity(struct iwn_softc *); 234 static int iwn_set_pslevel(struct iwn_softc *, int, int, int); 235 static int iwn5000_runtime_calib(struct iwn_softc *); 236 237 static int iwn_config_bt_coex_bluetooth(struct iwn_softc *); 238 static int iwn_config_bt_coex_prio_table(struct iwn_softc *); 239 static int iwn_config_bt_coex_adv1(struct iwn_softc *); 240 static int iwn_config_bt_coex_adv2(struct iwn_softc *); 241 242 static int iwn_config(struct iwn_softc *); 243 static uint16_t iwn_get_active_dwell_time(struct iwn_softc *, uint16_t, 244 uint8_t); 245 static uint16_t iwn_limit_dwell(struct iwn_softc *, uint16_t); 246 static uint16_t iwn_get_passive_dwell_time(struct iwn_softc *, uint16_t); 247 static int iwn_scan(struct iwn_softc *, uint16_t); 248 static int iwn_auth(struct iwn_softc *); 249 static int iwn_run(struct iwn_softc *); 250 #ifdef IWN_HWCRYPTO 251 static int iwn_set_key(struct ieee80211com *, struct ieee80211_node *, 252 struct ieee80211_key *); 253 static void iwn_delete_key(struct ieee80211com *, struct ieee80211_node *, 254 struct ieee80211_key *); 255 #endif 256 static int iwn_wme_update(struct ieee80211com *); 257 #ifndef IEEE80211_NO_HT 258 static int iwn_ampdu_rx_start(struct ieee80211com *, 259 struct ieee80211_node *, uint8_t); 260 static void iwn_ampdu_rx_stop(struct ieee80211com *, 261 struct ieee80211_node *, uint8_t); 262 static int iwn_ampdu_tx_start(struct ieee80211com *, 263 struct ieee80211_node *, uint8_t); 264 static void iwn_ampdu_tx_stop(struct ieee80211com *, 265 struct ieee80211_node *, uint8_t); 266 static void iwn4965_ampdu_tx_start(struct iwn_softc *, 267 struct ieee80211_node *, uint8_t, uint16_t); 268 static void iwn4965_ampdu_tx_stop(struct iwn_softc *, 269 uint8_t, uint16_t); 270 static void iwn5000_ampdu_tx_start(struct iwn_softc *, 271 struct ieee80211_node *, uint8_t, uint16_t); 272 static void iwn5000_ampdu_tx_stop(struct iwn_softc *, 273 uint8_t, uint16_t); 274 #endif 275 static int iwn5000_query_calibration(struct iwn_softc *); 276 static int iwn5000_send_calibration(struct iwn_softc *); 277 static int iwn5000_send_wimax_coex(struct iwn_softc *); 278 static int iwn6000_temp_offset_calib(struct iwn_softc *); 279 static int iwn2000_temp_offset_calib(struct iwn_softc *); 280 static int iwn4965_post_alive(struct iwn_softc *); 281 static int iwn5000_post_alive(struct iwn_softc *); 282 static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *, 283 int); 284 static int iwn4965_load_firmware(struct iwn_softc *); 285 static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t, 286 const uint8_t *, int); 287 static int iwn5000_load_firmware(struct iwn_softc *); 288 static int iwn_read_firmware_leg(struct iwn_softc *, 289 struct iwn_fw_info *); 290 static int iwn_read_firmware_tlv(struct iwn_softc *, 291 struct iwn_fw_info *, uint16_t); 292 static int iwn_read_firmware(struct iwn_softc *); 293 static int iwn_clock_wait(struct iwn_softc *); 294 static int iwn_apm_init(struct iwn_softc *); 295 static void iwn_apm_stop_master(struct iwn_softc *); 296 static void iwn_apm_stop(struct iwn_softc *); 297 static int iwn4965_nic_config(struct iwn_softc *); 298 static int iwn5000_nic_config(struct iwn_softc *); 299 static int iwn_hw_prepare(struct iwn_softc *); 300 static int iwn_hw_init(struct iwn_softc *); 301 static void iwn_hw_stop(struct iwn_softc *); 302 static int iwn_init(struct ifnet *); 303 static void iwn_stop(struct ifnet *, int); 304 305 /* XXX MCLGETI alternative */ 306 static struct mbuf *MCLGETIalt(struct iwn_softc *, int, 307 struct ifnet *, u_int); 308 #ifdef IWN_USE_RBUF 309 static struct iwn_rbuf *iwn_alloc_rbuf(struct iwn_softc *); 310 static void iwn_free_rbuf(struct mbuf *, void *, size_t, void *); 311 static int iwn_alloc_rpool(struct iwn_softc *); 312 static void iwn_free_rpool(struct iwn_softc *); 313 #endif 314 315 static void iwn_fix_channel(struct ieee80211com *, struct mbuf *, 316 struct iwn_rx_stat *); 317 318 #ifdef IWN_DEBUG 319 #define DPRINTF(x) do { if (iwn_debug > 0) printf x; } while (0) 320 #define DPRINTFN(n, x) do { if (iwn_debug >= (n)) printf x; } while (0) 321 int iwn_debug = 0; 322 #else 323 #define DPRINTF(x) 324 #define DPRINTFN(n, x) 325 #endif 326 327 CFATTACH_DECL_NEW(iwn, sizeof(struct iwn_softc), iwn_match, iwn_attach, 328 iwn_detach, NULL); 329 330 static int 331 iwn_match(device_t parent, cfdata_t match __unused, void *aux) 332 { 333 struct pci_attach_args *pa = aux; 334 size_t i; 335 336 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL) 337 return 0; 338 339 for (i = 0; i < __arraycount(iwn_devices); i++) 340 if (PCI_PRODUCT(pa->pa_id) == iwn_devices[i]) 341 return 1; 342 343 return 0; 344 } 345 346 static void 347 iwn_attach(device_t parent __unused, device_t self, void *aux) 348 { 349 struct iwn_softc *sc = device_private(self); 350 struct ieee80211com *ic = &sc->sc_ic; 351 struct ifnet *ifp = &sc->sc_ec.ec_if; 352 struct pci_attach_args *pa = aux; 353 const char *intrstr; 354 pcireg_t memtype, reg; 355 int i, error; 356 char intrbuf[PCI_INTRSTR_LEN]; 357 358 sc->sc_dev = self; 359 sc->sc_pct = pa->pa_pc; 360 sc->sc_pcitag = pa->pa_tag; 361 sc->sc_dmat = pa->pa_dmat; 362 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE); 363 364 callout_init(&sc->calib_to, 0); 365 callout_setfunc(&sc->calib_to, iwn_calib_timeout, sc); 366 367 pci_aprint_devinfo(pa, NULL); 368 369 /* 370 * Get the offset of the PCI Express Capability Structure in PCI 371 * Configuration Space. 372 */ 373 error = pci_get_capability(sc->sc_pct, sc->sc_pcitag, 374 PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL); 375 if (error == 0) { 376 aprint_error_dev(self, 377 "PCIe capability structure not found!\n"); 378 return; 379 } 380 381 /* Clear device-specific "PCI retry timeout" register (41h). */ 382 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40); 383 if (reg & 0xff00) 384 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00); 385 386 /* Enable bus-mastering. */ 387 /* XXX verify the bus-mastering is really needed (not in OpenBSD) */ 388 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 389 reg |= PCI_COMMAND_MASTER_ENABLE; 390 pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg); 391 392 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IWN_PCI_BAR0); 393 error = pci_mapreg_map(pa, IWN_PCI_BAR0, memtype, 0, &sc->sc_st, 394 &sc->sc_sh, NULL, &sc->sc_sz); 395 if (error != 0) { 396 aprint_error_dev(self, "can't map mem space\n"); 397 return; 398 } 399 400 sc->sc_soft_ih = softint_establish(SOFTINT_NET, iwn_softintr, sc); 401 if (sc->sc_soft_ih == NULL) { 402 aprint_error_dev(self, "can't establish soft interrupt\n"); 403 goto unmap; 404 } 405 406 /* Install interrupt handler. */ 407 error = pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0); 408 if (error) { 409 aprint_error_dev(self, "can't allocate interrupt\n"); 410 goto failsi; 411 } 412 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 413 if (pci_intr_type(sc->sc_pct, sc->sc_pihp[0]) == PCI_INTR_TYPE_INTX) 414 CLR(reg, PCI_COMMAND_INTERRUPT_DISABLE); 415 else 416 SET(reg, PCI_COMMAND_INTERRUPT_DISABLE); 417 pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg); 418 intrstr = pci_intr_string(sc->sc_pct, sc->sc_pihp[0], intrbuf, 419 sizeof(intrbuf)); 420 sc->sc_ih = pci_intr_establish_xname(sc->sc_pct, sc->sc_pihp[0], 421 IPL_NET, iwn_intr, sc, device_xname(self)); 422 if (sc->sc_ih == NULL) { 423 aprint_error_dev(self, "can't establish interrupt"); 424 if (intrstr != NULL) 425 aprint_error(" at %s", intrstr); 426 aprint_error("\n"); 427 goto failia; 428 } 429 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 430 431 /* Read hardware revision and attach. */ 432 sc->hw_type = 433 (IWN_READ(sc, IWN_HW_REV) & IWN_HW_REV_TYPE_MASK) 434 >> IWN_HW_REV_TYPE_SHIFT; 435 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 436 error = iwn4965_attach(sc, PCI_PRODUCT(pa->pa_id)); 437 else 438 error = iwn5000_attach(sc, PCI_PRODUCT(pa->pa_id)); 439 if (error != 0) { 440 aprint_error_dev(self, "could not attach device\n"); 441 goto failih; 442 } 443 444 if ((error = iwn_hw_prepare(sc)) != 0) { 445 aprint_error_dev(self, "hardware not ready\n"); 446 goto failih; 447 } 448 449 /* Read MAC address, channels, etc from EEPROM. */ 450 if ((error = iwn_read_eeprom(sc)) != 0) { 451 aprint_error_dev(self, "could not read EEPROM\n"); 452 goto failih; 453 } 454 455 /* Allocate DMA memory for firmware transfers. */ 456 if ((error = iwn_alloc_fwmem(sc)) != 0) { 457 aprint_error_dev(self, 458 "could not allocate memory for firmware\n"); 459 goto failih; 460 } 461 462 /* Allocate "Keep Warm" page. */ 463 if ((error = iwn_alloc_kw(sc)) != 0) { 464 aprint_error_dev(self, "could not allocate keep warm page\n"); 465 goto fail1; 466 } 467 468 /* Allocate ICT table for 5000 Series. */ 469 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 470 (error = iwn_alloc_ict(sc)) != 0) { 471 aprint_error_dev(self, "could not allocate ICT table\n"); 472 goto fail2; 473 } 474 475 /* Allocate TX scheduler "rings". */ 476 if ((error = iwn_alloc_sched(sc)) != 0) { 477 aprint_error_dev(self, 478 "could not allocate TX scheduler rings\n"); 479 goto fail3; 480 } 481 482 #ifdef IWN_USE_RBUF 483 /* Allocate RX buffers. */ 484 if ((error = iwn_alloc_rpool(sc)) != 0) { 485 aprint_error_dev(self, "could not allocate RX buffers\n"); 486 goto fail3; 487 } 488 #endif 489 490 /* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */ 491 for (i = 0; i < sc->ntxqs; i++) { 492 if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 493 aprint_error_dev(self, 494 "could not allocate TX ring %d\n", i); 495 goto fail4; 496 } 497 } 498 499 /* Allocate RX ring. */ 500 if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) { 501 aprint_error_dev(self, "could not allocate RX ring\n"); 502 goto fail4; 503 } 504 505 /* Clear pending interrupts. */ 506 IWN_WRITE(sc, IWN_INT, 0xffffffff); 507 508 /* Count the number of available chains. */ 509 sc->ntxchains = 510 ((sc->txchainmask >> 2) & 1) + 511 ((sc->txchainmask >> 1) & 1) + 512 ((sc->txchainmask >> 0) & 1); 513 sc->nrxchains = 514 ((sc->rxchainmask >> 2) & 1) + 515 ((sc->rxchainmask >> 1) & 1) + 516 ((sc->rxchainmask >> 0) & 1); 517 aprint_normal_dev(self, "MIMO %dT%dR, %.4s, address %s\n", 518 sc->ntxchains, sc->nrxchains, sc->eeprom_domain, 519 ether_sprintf(ic->ic_myaddr)); 520 521 ic->ic_ifp = ifp; 522 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 523 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 524 ic->ic_state = IEEE80211_S_INIT; 525 526 /* Set device capabilities. */ 527 /* XXX OpenBSD has IEEE80211_C_WEP, IEEE80211_C_RSN, 528 * and IEEE80211_C_PMGT too. */ 529 ic->ic_caps = 530 IEEE80211_C_IBSS | /* IBSS mode support */ 531 IEEE80211_C_WPA | /* 802.11i */ 532 IEEE80211_C_MONITOR | /* monitor mode supported */ 533 IEEE80211_C_TXPMGT | /* tx power management */ 534 IEEE80211_C_SHSLOT | /* short slot time supported */ 535 IEEE80211_C_SHPREAMBLE | /* short preamble supported */ 536 IEEE80211_C_WME; /* 802.11e */ 537 538 #ifndef IEEE80211_NO_HT 539 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 540 /* Set HT capabilities. */ 541 ic->ic_htcaps = 542 #if IWN_RBUF_SIZE == 8192 543 IEEE80211_HTCAP_AMSDU7935 | 544 #endif 545 IEEE80211_HTCAP_CBW20_40 | 546 IEEE80211_HTCAP_SGI20 | 547 IEEE80211_HTCAP_SGI40; 548 if (sc->hw_type != IWN_HW_REV_TYPE_4965) 549 ic->ic_htcaps |= IEEE80211_HTCAP_GF; 550 if (sc->hw_type == IWN_HW_REV_TYPE_6050) 551 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DYN; 552 else 553 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DIS; 554 } 555 #endif /* !IEEE80211_NO_HT */ 556 557 /* Set supported legacy rates. */ 558 ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b; 559 ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g; 560 if (sc->sc_flags & IWN_FLAG_HAS_5GHZ) { 561 ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a; 562 } 563 #ifndef IEEE80211_NO_HT 564 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 565 /* Set supported HT rates. */ 566 ic->ic_sup_mcs[0] = 0xff; /* MCS 0-7 */ 567 if (sc->nrxchains > 1) 568 ic->ic_sup_mcs[1] = 0xff; /* MCS 7-15 */ 569 if (sc->nrxchains > 2) 570 ic->ic_sup_mcs[2] = 0xff; /* MCS 16-23 */ 571 } 572 #endif 573 574 /* IBSS channel undefined for now. */ 575 ic->ic_ibss_chan = &ic->ic_channels[0]; 576 577 ifp->if_softc = sc; 578 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 579 ifp->if_init = iwn_init; 580 ifp->if_ioctl = iwn_ioctl; 581 ifp->if_start = iwn_start; 582 ifp->if_stop = iwn_stop; 583 ifp->if_watchdog = iwn_watchdog; 584 IFQ_SET_READY(&ifp->if_snd); 585 memcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 586 587 error = if_initialize(ifp); 588 if (error != 0) { 589 aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n", 590 error); 591 goto fail5; 592 } 593 ieee80211_ifattach(ic); 594 /* Use common softint-based if_input */ 595 ifp->if_percpuq = if_percpuq_create(ifp); 596 if_register(ifp); 597 598 ic->ic_node_alloc = iwn_node_alloc; 599 ic->ic_newassoc = iwn_newassoc; 600 #ifdef IWN_HWCRYPTO 601 ic->ic_crypto.cs_key_set = iwn_set_key; 602 ic->ic_crypto.cs_key_delete = iwn_delete_key; 603 #endif 604 ic->ic_wme.wme_update = iwn_wme_update; 605 #ifndef IEEE80211_NO_HT 606 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start; 607 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop; 608 ic->ic_ampdu_tx_start = iwn_ampdu_tx_start; 609 ic->ic_ampdu_tx_stop = iwn_ampdu_tx_stop; 610 #endif 611 612 /* Override 802.11 state transition machine. */ 613 sc->sc_newstate = ic->ic_newstate; 614 ic->ic_newstate = iwn_newstate; 615 ieee80211_media_init(ic, iwn_media_change, ieee80211_media_status); 616 617 sc->amrr.amrr_min_success_threshold = 1; 618 sc->amrr.amrr_max_success_threshold = 15; 619 620 iwn_radiotap_attach(sc); 621 622 /* 623 * XXX for NetBSD, OpenBSD timeout_set replaced by 624 * callout_init and callout_setfunc, above. 625 */ 626 627 if (pmf_device_register(self, NULL, iwn_resume)) 628 pmf_class_network_register(self, ifp); 629 else 630 aprint_error_dev(self, "couldn't establish power handler\n"); 631 632 /* XXX NetBSD add call to ieee80211_announce for dmesg. */ 633 ieee80211_announce(ic); 634 635 sc->sc_flags |= IWN_FLAG_ATTACHED; 636 return; 637 638 /* Free allocated memory if something failed during attachment. */ 639 fail5: iwn_free_rx_ring(sc, &sc->rxq); 640 fail4: while (--i >= 0) 641 iwn_free_tx_ring(sc, &sc->txq[i]); 642 #ifdef IWN_USE_RBUF 643 iwn_free_rpool(sc); 644 #endif 645 iwn_free_sched(sc); 646 fail3: if (sc->ict != NULL) 647 iwn_free_ict(sc); 648 fail2: iwn_free_kw(sc); 649 fail1: iwn_free_fwmem(sc); 650 failih: pci_intr_disestablish(sc->sc_pct, sc->sc_ih); 651 sc->sc_ih = NULL; 652 failia: pci_intr_release(sc->sc_pct, sc->sc_pihp, 1); 653 sc->sc_pihp = NULL; 654 failsi: softint_disestablish(sc->sc_soft_ih); 655 sc->sc_soft_ih = NULL; 656 unmap: bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz); 657 } 658 659 int 660 iwn4965_attach(struct iwn_softc *sc, pci_product_id_t pid) 661 { 662 struct iwn_ops *ops = &sc->ops; 663 664 ops->load_firmware = iwn4965_load_firmware; 665 ops->read_eeprom = iwn4965_read_eeprom; 666 ops->post_alive = iwn4965_post_alive; 667 ops->nic_config = iwn4965_nic_config; 668 ops->config_bt_coex = iwn_config_bt_coex_bluetooth; 669 ops->update_sched = iwn4965_update_sched; 670 ops->get_temperature = iwn4965_get_temperature; 671 ops->get_rssi = iwn4965_get_rssi; 672 ops->set_txpower = iwn4965_set_txpower; 673 ops->init_gains = iwn4965_init_gains; 674 ops->set_gains = iwn4965_set_gains; 675 ops->add_node = iwn4965_add_node; 676 ops->tx_done = iwn4965_tx_done; 677 #ifndef IEEE80211_NO_HT 678 ops->ampdu_tx_start = iwn4965_ampdu_tx_start; 679 ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop; 680 #endif 681 sc->ntxqs = IWN4965_NTXQUEUES; 682 sc->ndmachnls = IWN4965_NDMACHNLS; 683 sc->broadcast_id = IWN4965_ID_BROADCAST; 684 sc->rxonsz = IWN4965_RXONSZ; 685 sc->schedsz = IWN4965_SCHEDSZ; 686 sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ; 687 sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ; 688 sc->fwsz = IWN4965_FWSZ; 689 sc->sched_txfact_addr = IWN4965_SCHED_TXFACT; 690 sc->limits = &iwn4965_sensitivity_limits; 691 sc->fwname = "iwlwifi-4965-2.ucode"; 692 /* Override chains masks, ROM is known to be broken. */ 693 sc->txchainmask = IWN_ANT_AB; 694 sc->rxchainmask = IWN_ANT_ABC; 695 696 return 0; 697 } 698 699 int 700 iwn5000_attach(struct iwn_softc *sc, pci_product_id_t pid) 701 { 702 struct iwn_ops *ops = &sc->ops; 703 704 ops->load_firmware = iwn5000_load_firmware; 705 ops->read_eeprom = iwn5000_read_eeprom; 706 ops->post_alive = iwn5000_post_alive; 707 ops->nic_config = iwn5000_nic_config; 708 ops->config_bt_coex = iwn_config_bt_coex_bluetooth; 709 ops->update_sched = iwn5000_update_sched; 710 ops->get_temperature = iwn5000_get_temperature; 711 ops->get_rssi = iwn5000_get_rssi; 712 ops->set_txpower = iwn5000_set_txpower; 713 ops->init_gains = iwn5000_init_gains; 714 ops->set_gains = iwn5000_set_gains; 715 ops->add_node = iwn5000_add_node; 716 ops->tx_done = iwn5000_tx_done; 717 #ifndef IEEE80211_NO_HT 718 ops->ampdu_tx_start = iwn5000_ampdu_tx_start; 719 ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop; 720 #endif 721 sc->ntxqs = IWN5000_NTXQUEUES; 722 sc->ndmachnls = IWN5000_NDMACHNLS; 723 sc->broadcast_id = IWN5000_ID_BROADCAST; 724 sc->rxonsz = IWN5000_RXONSZ; 725 sc->schedsz = IWN5000_SCHEDSZ; 726 sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ; 727 sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ; 728 sc->fwsz = IWN5000_FWSZ; 729 sc->sched_txfact_addr = IWN5000_SCHED_TXFACT; 730 731 switch (sc->hw_type) { 732 case IWN_HW_REV_TYPE_5100: 733 sc->limits = &iwn5000_sensitivity_limits; 734 sc->fwname = "iwlwifi-5000-2.ucode"; 735 /* Override chains masks, ROM is known to be broken. */ 736 sc->txchainmask = IWN_ANT_B; 737 sc->rxchainmask = IWN_ANT_AB; 738 break; 739 case IWN_HW_REV_TYPE_5150: 740 sc->limits = &iwn5150_sensitivity_limits; 741 sc->fwname = "iwlwifi-5150-2.ucode"; 742 break; 743 case IWN_HW_REV_TYPE_5300: 744 case IWN_HW_REV_TYPE_5350: 745 sc->limits = &iwn5000_sensitivity_limits; 746 sc->fwname = "iwlwifi-5000-2.ucode"; 747 break; 748 case IWN_HW_REV_TYPE_1000: 749 sc->limits = &iwn1000_sensitivity_limits; 750 if (pid == PCI_PRODUCT_INTEL_WIFI_LINK_100_1 || 751 pid == PCI_PRODUCT_INTEL_WIFI_LINK_100_2) 752 sc->fwname = "iwlwifi-100-5.ucode"; 753 else 754 sc->fwname = "iwlwifi-1000-3.ucode"; 755 break; 756 case IWN_HW_REV_TYPE_6000: 757 sc->limits = &iwn6000_sensitivity_limits; 758 sc->fwname = "iwlwifi-6000-4.ucode"; 759 if (pid == PCI_PRODUCT_INTEL_WIFI_LINK_6000_IPA_1 || 760 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6000_IPA_2) { 761 sc->sc_flags |= IWN_FLAG_INTERNAL_PA; 762 /* Override chains masks, ROM is known to be broken. */ 763 sc->txchainmask = IWN_ANT_BC; 764 sc->rxchainmask = IWN_ANT_BC; 765 } 766 break; 767 case IWN_HW_REV_TYPE_6050: 768 sc->limits = &iwn6000_sensitivity_limits; 769 sc->fwname = "iwlwifi-6050-5.ucode"; 770 break; 771 case IWN_HW_REV_TYPE_6005: 772 sc->limits = &iwn6000_sensitivity_limits; 773 /* Type 6030 cards return IWN_HW_REV_TYPE_6005 */ 774 if (pid == PCI_PRODUCT_INTEL_WIFI_LINK_1030_1 || 775 pid == PCI_PRODUCT_INTEL_WIFI_LINK_1030_2 || 776 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6230_1 || 777 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6230_2 || 778 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6235 || 779 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6235_2) { 780 sc->fwname = "iwlwifi-6000g2b-6.ucode"; 781 ops->config_bt_coex = iwn_config_bt_coex_adv1; 782 } 783 else 784 sc->fwname = "iwlwifi-6000g2a-5.ucode"; 785 break; 786 case IWN_HW_REV_TYPE_2030: 787 sc->limits = &iwn2030_sensitivity_limits; 788 sc->fwname = "iwlwifi-2030-6.ucode"; 789 ops->config_bt_coex = iwn_config_bt_coex_adv2; 790 break; 791 case IWN_HW_REV_TYPE_2000: 792 sc->limits = &iwn2000_sensitivity_limits; 793 sc->fwname = "iwlwifi-2000-6.ucode"; 794 break; 795 case IWN_HW_REV_TYPE_135: 796 sc->limits = &iwn2000_sensitivity_limits; 797 sc->fwname = "iwlwifi-135-6.ucode"; 798 ops->config_bt_coex = iwn_config_bt_coex_adv2; 799 break; 800 case IWN_HW_REV_TYPE_105: 801 sc->limits = &iwn2000_sensitivity_limits; 802 sc->fwname = "iwlwifi-105-6.ucode"; 803 break; 804 default: 805 aprint_normal(": adapter type %d not supported\n", sc->hw_type); 806 return ENOTSUP; 807 } 808 return 0; 809 } 810 811 /* 812 * Attach the interface to 802.11 radiotap. 813 */ 814 static void 815 iwn_radiotap_attach(struct iwn_softc *sc) 816 { 817 struct ifnet *ifp = sc->sc_ic.ic_ifp; 818 819 bpf_attach2(ifp, DLT_IEEE802_11_RADIO, 820 sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN, 821 &sc->sc_drvbpf); 822 823 sc->sc_rxtap_len = sizeof sc->sc_rxtapu; 824 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); 825 sc->sc_rxtap.wr_ihdr.it_present = htole32(IWN_RX_RADIOTAP_PRESENT); 826 827 sc->sc_txtap_len = sizeof sc->sc_txtapu; 828 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); 829 sc->sc_txtap.wt_ihdr.it_present = htole32(IWN_TX_RADIOTAP_PRESENT); 830 } 831 832 static int 833 iwn_detach(device_t self, int flags __unused) 834 { 835 struct iwn_softc *sc = device_private(self); 836 struct ifnet *ifp = sc->sc_ic.ic_ifp; 837 int qid; 838 839 if (!(sc->sc_flags & IWN_FLAG_ATTACHED)) 840 return 0; 841 842 callout_stop(&sc->calib_to); 843 844 /* Uninstall interrupt handler. */ 845 if (sc->sc_ih != NULL) 846 pci_intr_disestablish(sc->sc_pct, sc->sc_ih); 847 if (sc->sc_pihp != NULL) 848 pci_intr_release(sc->sc_pct, sc->sc_pihp, 1); 849 if (sc->sc_soft_ih != NULL) 850 softint_disestablish(sc->sc_soft_ih); 851 852 /* Free DMA resources. */ 853 iwn_free_rx_ring(sc, &sc->rxq); 854 for (qid = 0; qid < sc->ntxqs; qid++) 855 iwn_free_tx_ring(sc, &sc->txq[qid]); 856 #ifdef IWN_USE_RBUF 857 iwn_free_rpool(sc); 858 #endif 859 iwn_free_sched(sc); 860 iwn_free_kw(sc); 861 if (sc->ict != NULL) 862 iwn_free_ict(sc); 863 iwn_free_fwmem(sc); 864 865 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz); 866 867 ieee80211_ifdetach(&sc->sc_ic); 868 if_detach(ifp); 869 870 return 0; 871 } 872 873 #if 0 874 /* 875 * XXX Investigate if clearing the PCI retry timeout could eliminate 876 * the repeated scan calls. Also the calls to if_init and if_start 877 * are similar to the effect of adding the call to ifioctl_common . 878 */ 879 static void 880 iwn_power(int why, void *arg) 881 { 882 struct iwn_softc *sc = arg; 883 struct ifnet *ifp; 884 pcireg_t reg; 885 int s; 886 887 if (why != PWR_RESUME) 888 return; 889 890 /* Clear device-specific "PCI retry timeout" register (41h). */ 891 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40); 892 if (reg & 0xff00) 893 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00); 894 895 s = splnet(); 896 ifp = &sc->sc_ic.ic_if; 897 if (ifp->if_flags & IFF_UP) { 898 ifp->if_init(ifp); 899 if (ifp->if_flags & IFF_RUNNING) 900 ifp->if_start(ifp); 901 } 902 splx(s); 903 } 904 #endif 905 906 static bool 907 iwn_resume(device_t dv, const pmf_qual_t *qual) 908 { 909 return true; 910 } 911 912 static int 913 iwn_nic_lock(struct iwn_softc *sc) 914 { 915 int ntries; 916 917 /* Request exclusive access to NIC. */ 918 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 919 920 /* Spin until we actually get the lock. */ 921 for (ntries = 0; ntries < 1000; ntries++) { 922 if ((IWN_READ(sc, IWN_GP_CNTRL) & 923 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) == 924 IWN_GP_CNTRL_MAC_ACCESS_ENA) 925 return 0; 926 DELAY(10); 927 } 928 return ETIMEDOUT; 929 } 930 931 static __inline void 932 iwn_nic_unlock(struct iwn_softc *sc) 933 { 934 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 935 } 936 937 static __inline uint32_t 938 iwn_prph_read(struct iwn_softc *sc, uint32_t addr) 939 { 940 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr); 941 IWN_BARRIER_READ_WRITE(sc); 942 return IWN_READ(sc, IWN_PRPH_RDATA); 943 } 944 945 static __inline void 946 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 947 { 948 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr); 949 IWN_BARRIER_WRITE(sc); 950 IWN_WRITE(sc, IWN_PRPH_WDATA, data); 951 } 952 953 static __inline void 954 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 955 { 956 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask); 957 } 958 959 static __inline void 960 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 961 { 962 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask); 963 } 964 965 static __inline void 966 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr, 967 const uint32_t *data, int count) 968 { 969 for (; count > 0; count--, data++, addr += 4) 970 iwn_prph_write(sc, addr, *data); 971 } 972 973 static __inline uint32_t 974 iwn_mem_read(struct iwn_softc *sc, uint32_t addr) 975 { 976 IWN_WRITE(sc, IWN_MEM_RADDR, addr); 977 IWN_BARRIER_READ_WRITE(sc); 978 return IWN_READ(sc, IWN_MEM_RDATA); 979 } 980 981 static __inline void 982 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 983 { 984 IWN_WRITE(sc, IWN_MEM_WADDR, addr); 985 IWN_BARRIER_WRITE(sc); 986 IWN_WRITE(sc, IWN_MEM_WDATA, data); 987 } 988 989 #ifndef IEEE80211_NO_HT 990 static __inline void 991 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data) 992 { 993 uint32_t tmp; 994 995 tmp = iwn_mem_read(sc, addr & ~3); 996 if (addr & 3) 997 tmp = (tmp & 0x0000ffff) | data << 16; 998 else 999 tmp = (tmp & 0xffff0000) | data; 1000 iwn_mem_write(sc, addr & ~3, tmp); 1001 } 1002 #endif 1003 1004 static __inline void 1005 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data, 1006 int count) 1007 { 1008 for (; count > 0; count--, addr += 4) 1009 *data++ = iwn_mem_read(sc, addr); 1010 } 1011 1012 static __inline void 1013 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val, 1014 int count) 1015 { 1016 for (; count > 0; count--, addr += 4) 1017 iwn_mem_write(sc, addr, val); 1018 } 1019 1020 static int 1021 iwn_eeprom_lock(struct iwn_softc *sc) 1022 { 1023 int i, ntries; 1024 1025 for (i = 0; i < 100; i++) { 1026 /* Request exclusive access to EEPROM. */ 1027 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 1028 IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1029 1030 /* Spin until we actually get the lock. */ 1031 for (ntries = 0; ntries < 100; ntries++) { 1032 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 1033 IWN_HW_IF_CONFIG_EEPROM_LOCKED) 1034 return 0; 1035 DELAY(10); 1036 } 1037 } 1038 return ETIMEDOUT; 1039 } 1040 1041 static __inline void 1042 iwn_eeprom_unlock(struct iwn_softc *sc) 1043 { 1044 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1045 } 1046 1047 /* 1048 * Initialize access by host to One Time Programmable ROM. 1049 * NB: This kind of ROM can be found on 1000 or 6000 Series only. 1050 */ 1051 static int 1052 iwn_init_otprom(struct iwn_softc *sc) 1053 { 1054 uint16_t prev = 0, base, next; 1055 int count, error; 1056 1057 /* Wait for clock stabilization before accessing prph. */ 1058 if ((error = iwn_clock_wait(sc)) != 0) 1059 return error; 1060 1061 if ((error = iwn_nic_lock(sc)) != 0) 1062 return error; 1063 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1064 DELAY(5); 1065 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1066 iwn_nic_unlock(sc); 1067 1068 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */ 1069 if (sc->hw_type != IWN_HW_REV_TYPE_1000) { 1070 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT, 1071 IWN_RESET_LINK_PWR_MGMT_DIS); 1072 } 1073 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER); 1074 /* Clear ECC status. */ 1075 IWN_SETBITS(sc, IWN_OTP_GP, 1076 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS); 1077 1078 /* 1079 * Find the block before last block (contains the EEPROM image) 1080 * for HW without OTP shadow RAM. 1081 */ 1082 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 1083 /* Switch to absolute addressing mode. */ 1084 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS); 1085 base = 0; 1086 for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) { 1087 error = iwn_read_prom_data(sc, base, &next, 2); 1088 if (error != 0) 1089 return error; 1090 if (next == 0) /* End of linked-list. */ 1091 break; 1092 prev = base; 1093 base = le16toh(next); 1094 } 1095 if (count == 0 || count == IWN1000_OTP_NBLOCKS) 1096 return EIO; 1097 /* Skip "next" word. */ 1098 sc->prom_base = prev + 1; 1099 } 1100 return 0; 1101 } 1102 1103 static int 1104 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count) 1105 { 1106 uint8_t *out = data; 1107 uint32_t val, tmp; 1108 int ntries; 1109 1110 addr += sc->prom_base; 1111 for (; count > 0; count -= 2, addr++) { 1112 IWN_WRITE(sc, IWN_EEPROM, addr << 2); 1113 for (ntries = 0; ntries < 10; ntries++) { 1114 val = IWN_READ(sc, IWN_EEPROM); 1115 if (val & IWN_EEPROM_READ_VALID) 1116 break; 1117 DELAY(5); 1118 } 1119 if (ntries == 10) { 1120 aprint_error_dev(sc->sc_dev, 1121 "timeout reading ROM at 0x%x\n", addr); 1122 return ETIMEDOUT; 1123 } 1124 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1125 /* OTPROM, check for ECC errors. */ 1126 tmp = IWN_READ(sc, IWN_OTP_GP); 1127 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) { 1128 aprint_error_dev(sc->sc_dev, 1129 "OTPROM ECC error at 0x%x\n", addr); 1130 return EIO; 1131 } 1132 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) { 1133 /* Correctable ECC error, clear bit. */ 1134 IWN_SETBITS(sc, IWN_OTP_GP, 1135 IWN_OTP_GP_ECC_CORR_STTS); 1136 } 1137 } 1138 *out++ = val >> 16; 1139 if (count > 1) 1140 *out++ = val >> 24; 1141 } 1142 return 0; 1143 } 1144 1145 static int 1146 iwn_dma_contig_alloc(bus_dma_tag_t tag, struct iwn_dma_info *dma, void **kvap, 1147 bus_size_t size, bus_size_t alignment) 1148 { 1149 int nsegs, error; 1150 1151 dma->tag = tag; 1152 dma->size = size; 1153 1154 error = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT, 1155 &dma->map); 1156 if (error != 0) 1157 goto fail; 1158 1159 error = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs, 1160 BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_ZERO */ 1161 if (error != 0) 1162 goto fail; 1163 1164 error = bus_dmamem_map(tag, &dma->seg, 1, size, &dma->vaddr, 1165 BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_COHERENT */ 1166 if (error != 0) 1167 goto fail; 1168 1169 error = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL, 1170 BUS_DMA_NOWAIT); 1171 if (error != 0) 1172 goto fail; 1173 1174 /* XXX Presumably needed because of missing BUS_DMA_ZERO, above. */ 1175 memset(dma->vaddr, 0, size); 1176 bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE); 1177 1178 dma->paddr = dma->map->dm_segs[0].ds_addr; 1179 if (kvap != NULL) 1180 *kvap = dma->vaddr; 1181 1182 return 0; 1183 1184 fail: iwn_dma_contig_free(dma); 1185 return error; 1186 } 1187 1188 static void 1189 iwn_dma_contig_free(struct iwn_dma_info *dma) 1190 { 1191 if (dma->map != NULL) { 1192 if (dma->vaddr != NULL) { 1193 bus_dmamap_sync(dma->tag, dma->map, 0, dma->size, 1194 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1195 bus_dmamap_unload(dma->tag, dma->map); 1196 bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size); 1197 bus_dmamem_free(dma->tag, &dma->seg, 1); 1198 dma->vaddr = NULL; 1199 } 1200 bus_dmamap_destroy(dma->tag, dma->map); 1201 dma->map = NULL; 1202 } 1203 } 1204 1205 static int 1206 iwn_alloc_sched(struct iwn_softc *sc) 1207 { 1208 /* TX scheduler rings must be aligned on a 1KB boundary. */ 1209 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma, 1210 (void **)&sc->sched, sc->schedsz, 1024); 1211 } 1212 1213 static void 1214 iwn_free_sched(struct iwn_softc *sc) 1215 { 1216 iwn_dma_contig_free(&sc->sched_dma); 1217 } 1218 1219 static int 1220 iwn_alloc_kw(struct iwn_softc *sc) 1221 { 1222 /* "Keep Warm" page must be aligned on a 4KB boundary. */ 1223 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, NULL, 4096, 1224 4096); 1225 } 1226 1227 static void 1228 iwn_free_kw(struct iwn_softc *sc) 1229 { 1230 iwn_dma_contig_free(&sc->kw_dma); 1231 } 1232 1233 static int 1234 iwn_alloc_ict(struct iwn_softc *sc) 1235 { 1236 /* ICT table must be aligned on a 4KB boundary. */ 1237 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, 1238 (void **)&sc->ict, IWN_ICT_SIZE, 4096); 1239 } 1240 1241 static void 1242 iwn_free_ict(struct iwn_softc *sc) 1243 { 1244 iwn_dma_contig_free(&sc->ict_dma); 1245 } 1246 1247 static int 1248 iwn_alloc_fwmem(struct iwn_softc *sc) 1249 { 1250 /* Must be aligned on a 16-byte boundary. */ 1251 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, NULL, 1252 sc->fwsz, 16); 1253 } 1254 1255 static void 1256 iwn_free_fwmem(struct iwn_softc *sc) 1257 { 1258 iwn_dma_contig_free(&sc->fw_dma); 1259 } 1260 1261 static int 1262 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1263 { 1264 bus_size_t size; 1265 int i, error; 1266 1267 ring->cur = 0; 1268 1269 /* Allocate RX descriptors (256-byte aligned). */ 1270 size = IWN_RX_RING_COUNT * sizeof (uint32_t); 1271 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, 1272 (void **)&ring->desc, size, 256); 1273 if (error != 0) { 1274 aprint_error_dev(sc->sc_dev, 1275 "could not allocate RX ring DMA memory\n"); 1276 goto fail; 1277 } 1278 1279 /* Allocate RX status area (16-byte aligned). */ 1280 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, 1281 (void **)&ring->stat, sizeof (struct iwn_rx_status), 16); 1282 if (error != 0) { 1283 aprint_error_dev(sc->sc_dev, 1284 "could not allocate RX status DMA memory\n"); 1285 goto fail; 1286 } 1287 1288 /* 1289 * Allocate and map RX buffers. 1290 */ 1291 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1292 struct iwn_rx_data *data = &ring->data[i]; 1293 1294 error = bus_dmamap_create(sc->sc_dmat, IWN_RBUF_SIZE, 1, 1295 IWN_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1296 &data->map); 1297 if (error != 0) { 1298 aprint_error_dev(sc->sc_dev, 1299 "could not create RX buf DMA map\n"); 1300 goto fail; 1301 } 1302 1303 data->m = MCLGETIalt(sc, M_DONTWAIT, NULL, IWN_RBUF_SIZE); 1304 if (data->m == NULL) { 1305 aprint_error_dev(sc->sc_dev, 1306 "could not allocate RX mbuf\n"); 1307 error = ENOBUFS; 1308 goto fail; 1309 } 1310 1311 error = bus_dmamap_load(sc->sc_dmat, data->map, 1312 mtod(data->m, void *), IWN_RBUF_SIZE, NULL, 1313 BUS_DMA_NOWAIT | BUS_DMA_READ); 1314 if (error != 0) { 1315 aprint_error_dev(sc->sc_dev, 1316 "can't not map mbuf (error %d)\n", error); 1317 goto fail; 1318 } 1319 1320 /* Set physical address of RX buffer (256-byte aligned). */ 1321 ring->desc[i] = htole32(data->map->dm_segs[0].ds_addr >> 8); 1322 } 1323 1324 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0, size, 1325 BUS_DMASYNC_PREWRITE); 1326 1327 return 0; 1328 1329 fail: iwn_free_rx_ring(sc, ring); 1330 return error; 1331 } 1332 1333 static void 1334 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1335 { 1336 int ntries; 1337 1338 if (iwn_nic_lock(sc) == 0) { 1339 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 1340 for (ntries = 0; ntries < 1000; ntries++) { 1341 if (IWN_READ(sc, IWN_FH_RX_STATUS) & 1342 IWN_FH_RX_STATUS_IDLE) 1343 break; 1344 DELAY(10); 1345 } 1346 iwn_nic_unlock(sc); 1347 } 1348 ring->cur = 0; 1349 sc->last_rx_valid = 0; 1350 } 1351 1352 static void 1353 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1354 { 1355 int i; 1356 1357 iwn_dma_contig_free(&ring->desc_dma); 1358 iwn_dma_contig_free(&ring->stat_dma); 1359 1360 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1361 struct iwn_rx_data *data = &ring->data[i]; 1362 1363 if (data->m != NULL) { 1364 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1365 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1366 bus_dmamap_unload(sc->sc_dmat, data->map); 1367 m_freem(data->m); 1368 } 1369 if (data->map != NULL) 1370 bus_dmamap_destroy(sc->sc_dmat, data->map); 1371 } 1372 } 1373 1374 static int 1375 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid) 1376 { 1377 bus_addr_t paddr; 1378 bus_size_t size; 1379 int i, error; 1380 1381 ring->qid = qid; 1382 ring->queued = 0; 1383 ring->cur = 0; 1384 1385 /* Allocate TX descriptors (256-byte aligned). */ 1386 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc); 1387 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, 1388 (void **)&ring->desc, size, 256); 1389 if (error != 0) { 1390 aprint_error_dev(sc->sc_dev, 1391 "could not allocate TX ring DMA memory\n"); 1392 goto fail; 1393 } 1394 /* 1395 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need 1396 * to allocate commands space for other rings. 1397 * XXX Do we really need to allocate descriptors for other rings? 1398 */ 1399 if (qid > 4) 1400 return 0; 1401 1402 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd); 1403 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, 1404 (void **)&ring->cmd, size, 4); 1405 if (error != 0) { 1406 aprint_error_dev(sc->sc_dev, 1407 "could not allocate TX cmd DMA memory\n"); 1408 goto fail; 1409 } 1410 1411 paddr = ring->cmd_dma.paddr; 1412 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1413 struct iwn_tx_data *data = &ring->data[i]; 1414 1415 data->cmd_paddr = paddr; 1416 data->scratch_paddr = paddr + 12; 1417 paddr += sizeof (struct iwn_tx_cmd); 1418 1419 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1420 IWN_MAX_SCATTER - 1, MCLBYTES, 0, BUS_DMA_NOWAIT, 1421 &data->map); 1422 if (error != 0) { 1423 aprint_error_dev(sc->sc_dev, 1424 "could not create TX buf DMA map\n"); 1425 goto fail; 1426 } 1427 } 1428 return 0; 1429 1430 fail: iwn_free_tx_ring(sc, ring); 1431 return error; 1432 } 1433 1434 static void 1435 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1436 { 1437 int i; 1438 1439 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1440 struct iwn_tx_data *data = &ring->data[i]; 1441 1442 if (data->m != NULL) { 1443 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1444 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1445 bus_dmamap_unload(sc->sc_dmat, data->map); 1446 m_freem(data->m); 1447 data->m = NULL; 1448 } 1449 } 1450 /* Clear TX descriptors. */ 1451 memset(ring->desc, 0, ring->desc_dma.size); 1452 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0, 1453 ring->desc_dma.size, BUS_DMASYNC_PREWRITE); 1454 sc->qfullmsk &= ~(1 << ring->qid); 1455 ring->queued = 0; 1456 ring->cur = 0; 1457 } 1458 1459 static void 1460 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1461 { 1462 int i; 1463 1464 iwn_dma_contig_free(&ring->desc_dma); 1465 iwn_dma_contig_free(&ring->cmd_dma); 1466 1467 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1468 struct iwn_tx_data *data = &ring->data[i]; 1469 1470 if (data->m != NULL) { 1471 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1472 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1473 bus_dmamap_unload(sc->sc_dmat, data->map); 1474 m_freem(data->m); 1475 } 1476 if (data->map != NULL) 1477 bus_dmamap_destroy(sc->sc_dmat, data->map); 1478 } 1479 } 1480 1481 static void 1482 iwn5000_ict_reset(struct iwn_softc *sc) 1483 { 1484 /* Disable interrupts. */ 1485 IWN_WRITE(sc, IWN_INT_MASK, 0); 1486 1487 /* Reset ICT table. */ 1488 memset(sc->ict, 0, IWN_ICT_SIZE); 1489 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map, 0, IWN_ICT_SIZE, 1490 BUS_DMASYNC_PREWRITE); 1491 sc->ict_cur = 0; 1492 1493 /* Set physical address of ICT table (4KB aligned). */ 1494 DPRINTF(("enabling ICT\n")); 1495 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE | 1496 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12); 1497 1498 /* Enable periodic RX interrupt. */ 1499 sc->int_mask |= IWN_INT_RX_PERIODIC; 1500 /* Switch to ICT interrupt mode in driver. */ 1501 sc->sc_flags |= IWN_FLAG_USE_ICT; 1502 1503 /* Re-enable interrupts. */ 1504 IWN_WRITE(sc, IWN_INT, 0xffffffff); 1505 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 1506 } 1507 1508 static int 1509 iwn_read_eeprom(struct iwn_softc *sc) 1510 { 1511 struct iwn_ops *ops = &sc->ops; 1512 struct ieee80211com *ic = &sc->sc_ic; 1513 uint16_t val; 1514 int error; 1515 1516 /* Check whether adapter has an EEPROM or an OTPROM. */ 1517 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 && 1518 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP)) 1519 sc->sc_flags |= IWN_FLAG_HAS_OTPROM; 1520 DPRINTF(("%s found\n", (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? 1521 "OTPROM" : "EEPROM")); 1522 1523 /* Adapter has to be powered on for EEPROM access to work. */ 1524 if ((error = iwn_apm_init(sc)) != 0) { 1525 aprint_error_dev(sc->sc_dev, 1526 "could not power ON adapter\n"); 1527 return error; 1528 } 1529 1530 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) { 1531 aprint_error_dev(sc->sc_dev, 1532 "bad ROM signature\n"); 1533 return EIO; 1534 } 1535 if ((error = iwn_eeprom_lock(sc)) != 0) { 1536 aprint_error_dev(sc->sc_dev, 1537 "could not lock ROM (error=%d)\n", error); 1538 return error; 1539 } 1540 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1541 if ((error = iwn_init_otprom(sc)) != 0) { 1542 aprint_error_dev(sc->sc_dev, 1543 "could not initialize OTPROM\n"); 1544 return error; 1545 } 1546 } 1547 1548 iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2); 1549 DPRINTF(("SKU capabilities=0x%04x\n", le16toh(val))); 1550 /* Check if HT support is bonded out. */ 1551 if (val & htole16(IWN_EEPROM_SKU_CAP_11N)) 1552 sc->sc_flags |= IWN_FLAG_HAS_11N; 1553 1554 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2); 1555 sc->rfcfg = le16toh(val); 1556 DPRINTF(("radio config=0x%04x\n", sc->rfcfg)); 1557 /* Read Tx/Rx chains from ROM unless it's known to be broken. */ 1558 if (sc->txchainmask == 0) 1559 sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg); 1560 if (sc->rxchainmask == 0) 1561 sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg); 1562 1563 /* Read MAC address. */ 1564 iwn_read_prom_data(sc, IWN_EEPROM_MAC, ic->ic_myaddr, 6); 1565 1566 /* Read adapter-specific information from EEPROM. */ 1567 ops->read_eeprom(sc); 1568 1569 iwn_apm_stop(sc); /* Power OFF adapter. */ 1570 1571 iwn_eeprom_unlock(sc); 1572 return 0; 1573 } 1574 1575 static void 1576 iwn4965_read_eeprom(struct iwn_softc *sc) 1577 { 1578 uint32_t addr; 1579 uint16_t val; 1580 int i; 1581 1582 /* Read regulatory domain (4 ASCII characters). */ 1583 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4); 1584 1585 /* Read the list of authorized channels (20MHz ones only). */ 1586 for (i = 0; i < 5; i++) { 1587 addr = iwn4965_regulatory_bands[i]; 1588 iwn_read_eeprom_channels(sc, i, addr); 1589 } 1590 1591 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */ 1592 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2); 1593 sc->maxpwr2GHz = val & 0xff; 1594 sc->maxpwr5GHz = val >> 8; 1595 /* Check that EEPROM values are within valid range. */ 1596 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50) 1597 sc->maxpwr5GHz = 38; 1598 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50) 1599 sc->maxpwr2GHz = 38; 1600 DPRINTF(("maxpwr 2GHz=%d 5GHz=%d\n", sc->maxpwr2GHz, sc->maxpwr5GHz)); 1601 1602 /* Read samples for each TX power group. */ 1603 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands, 1604 sizeof sc->bands); 1605 1606 /* Read voltage at which samples were taken. */ 1607 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2); 1608 sc->eeprom_voltage = (int16_t)le16toh(val); 1609 DPRINTF(("voltage=%d (in 0.3V)\n", sc->eeprom_voltage)); 1610 1611 #ifdef IWN_DEBUG 1612 /* Print samples. */ 1613 if (iwn_debug > 0) { 1614 for (i = 0; i < IWN_NBANDS; i++) 1615 iwn4965_print_power_group(sc, i); 1616 } 1617 #endif 1618 } 1619 1620 #ifdef IWN_DEBUG 1621 static void 1622 iwn4965_print_power_group(struct iwn_softc *sc, int i) 1623 { 1624 struct iwn4965_eeprom_band *band = &sc->bands[i]; 1625 struct iwn4965_eeprom_chan_samples *chans = band->chans; 1626 int j, c; 1627 1628 aprint_normal("===band %d===\n", i); 1629 aprint_normal("chan lo=%d, chan hi=%d\n", band->lo, band->hi); 1630 aprint_normal("chan1 num=%d\n", chans[0].num); 1631 for (c = 0; c < 2; c++) { 1632 for (j = 0; j < IWN_NSAMPLES; j++) { 1633 aprint_normal("chain %d, sample %d: temp=%d gain=%d " 1634 "power=%d pa_det=%d\n", c, j, 1635 chans[0].samples[c][j].temp, 1636 chans[0].samples[c][j].gain, 1637 chans[0].samples[c][j].power, 1638 chans[0].samples[c][j].pa_det); 1639 } 1640 } 1641 aprint_normal("chan2 num=%d\n", chans[1].num); 1642 for (c = 0; c < 2; c++) { 1643 for (j = 0; j < IWN_NSAMPLES; j++) { 1644 aprint_normal("chain %d, sample %d: temp=%d gain=%d " 1645 "power=%d pa_det=%d\n", c, j, 1646 chans[1].samples[c][j].temp, 1647 chans[1].samples[c][j].gain, 1648 chans[1].samples[c][j].power, 1649 chans[1].samples[c][j].pa_det); 1650 } 1651 } 1652 } 1653 #endif 1654 1655 static void 1656 iwn5000_read_eeprom(struct iwn_softc *sc) 1657 { 1658 struct iwn5000_eeprom_calib_hdr hdr; 1659 int32_t volt; 1660 uint32_t base, addr; 1661 uint16_t val; 1662 int i; 1663 1664 /* Read regulatory domain (4 ASCII characters). */ 1665 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 1666 base = le16toh(val); 1667 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN, 1668 sc->eeprom_domain, 4); 1669 1670 /* Read the list of authorized channels (20MHz ones only). */ 1671 for (i = 0; i < 5; i++) { 1672 addr = base + iwn5000_regulatory_bands[i]; 1673 iwn_read_eeprom_channels(sc, i, addr); 1674 } 1675 1676 /* Read enhanced TX power information for 6000 Series. */ 1677 if (sc->hw_type >= IWN_HW_REV_TYPE_6000) 1678 iwn_read_eeprom_enhinfo(sc); 1679 1680 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2); 1681 base = le16toh(val); 1682 iwn_read_prom_data(sc, base, &hdr, sizeof hdr); 1683 DPRINTF(("calib version=%u pa type=%u voltage=%u\n", 1684 hdr.version, hdr.pa_type, le16toh(hdr.volt))); 1685 sc->calib_ver = hdr.version; 1686 1687 if (sc->hw_type == IWN_HW_REV_TYPE_2030 || 1688 sc->hw_type == IWN_HW_REV_TYPE_2000 || 1689 sc->hw_type == IWN_HW_REV_TYPE_135 || 1690 sc->hw_type == IWN_HW_REV_TYPE_105) { 1691 sc->eeprom_voltage = le16toh(hdr.volt); 1692 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 1693 sc->eeprom_temp = le16toh(val); 1694 iwn_read_prom_data(sc, base + IWN2000_EEPROM_RAWTEMP, &val, 2); 1695 sc->eeprom_rawtemp = le16toh(val); 1696 } 1697 1698 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 1699 /* Compute temperature offset. */ 1700 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 1701 sc->eeprom_temp = le16toh(val); 1702 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 1703 volt = le16toh(val); 1704 sc->temp_off = sc->eeprom_temp - (volt / -5); 1705 DPRINTF(("temp=%d volt=%d offset=%dK\n", 1706 sc->eeprom_temp, volt, sc->temp_off)); 1707 } else { 1708 /* Read crystal calibration. */ 1709 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL, 1710 &sc->eeprom_crystal, sizeof (uint32_t)); 1711 DPRINTF(("crystal calibration 0x%08x\n", 1712 le32toh(sc->eeprom_crystal))); 1713 } 1714 } 1715 1716 static void 1717 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr) 1718 { 1719 struct ieee80211com *ic = &sc->sc_ic; 1720 const struct iwn_chan_band *band = &iwn_bands[n]; 1721 struct iwn_eeprom_chan channels[IWN_MAX_CHAN_PER_BAND]; 1722 uint8_t chan; 1723 int i; 1724 1725 iwn_read_prom_data(sc, addr, channels, 1726 band->nchan * sizeof (struct iwn_eeprom_chan)); 1727 1728 for (i = 0; i < band->nchan; i++) { 1729 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) 1730 continue; 1731 1732 chan = band->chan[i]; 1733 1734 if (n == 0) { /* 2GHz band */ 1735 ic->ic_channels[chan].ic_freq = 1736 ieee80211_ieee2mhz(chan, IEEE80211_CHAN_2GHZ); 1737 ic->ic_channels[chan].ic_flags = 1738 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM | 1739 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; 1740 1741 } else { /* 5GHz band */ 1742 /* 1743 * Some adapters support channels 7, 8, 11 and 12 1744 * both in the 2GHz and 4.9GHz bands. 1745 * Because of limitations in our net80211 layer, 1746 * we don't support them in the 4.9GHz band. 1747 */ 1748 if (chan <= 14) 1749 continue; 1750 1751 ic->ic_channels[chan].ic_freq = 1752 ieee80211_ieee2mhz(chan, IEEE80211_CHAN_5GHZ); 1753 ic->ic_channels[chan].ic_flags = IEEE80211_CHAN_A; 1754 /* We have at least one valid 5GHz channel. */ 1755 sc->sc_flags |= IWN_FLAG_HAS_5GHZ; 1756 } 1757 1758 /* Is active scan allowed on this channel? */ 1759 if (!(channels[i].flags & IWN_EEPROM_CHAN_ACTIVE)) { 1760 ic->ic_channels[chan].ic_flags |= 1761 IEEE80211_CHAN_PASSIVE; 1762 } 1763 1764 /* Save maximum allowed TX power for this channel. */ 1765 sc->maxpwr[chan] = channels[i].maxpwr; 1766 1767 DPRINTF(("adding chan %d flags=0x%x maxpwr=%d\n", 1768 chan, channels[i].flags, sc->maxpwr[chan])); 1769 } 1770 } 1771 1772 static void 1773 iwn_read_eeprom_enhinfo(struct iwn_softc *sc) 1774 { 1775 struct iwn_eeprom_enhinfo enhinfo[35]; 1776 uint16_t val, base; 1777 int8_t maxpwr; 1778 uint8_t flags; 1779 int i; 1780 1781 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 1782 base = le16toh(val); 1783 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO, 1784 enhinfo, sizeof enhinfo); 1785 1786 memset(sc->enh_maxpwr, 0, sizeof sc->enh_maxpwr); 1787 for (i = 0; i < __arraycount(enhinfo); i++) { 1788 flags = enhinfo[i].flags; 1789 if (!(flags & IWN_ENHINFO_VALID)) 1790 continue; /* Skip invalid entries. */ 1791 1792 maxpwr = 0; 1793 if (sc->txchainmask & IWN_ANT_A) 1794 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]); 1795 if (sc->txchainmask & IWN_ANT_B) 1796 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]); 1797 if (sc->txchainmask & IWN_ANT_C) 1798 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]); 1799 if (sc->ntxchains == 2) 1800 maxpwr = MAX(maxpwr, enhinfo[i].mimo2); 1801 else if (sc->ntxchains == 3) 1802 maxpwr = MAX(maxpwr, enhinfo[i].mimo3); 1803 maxpwr /= 2; /* Convert half-dBm to dBm. */ 1804 1805 DPRINTF(("enhinfo %d, maxpwr=%d\n", i, maxpwr)); 1806 sc->enh_maxpwr[i] = maxpwr; 1807 } 1808 } 1809 1810 static struct ieee80211_node * 1811 iwn_node_alloc(struct ieee80211_node_table *ic __unused) 1812 { 1813 return malloc(sizeof (struct iwn_node), M_80211_NODE, M_NOWAIT | M_ZERO); 1814 } 1815 1816 static void 1817 iwn_newassoc(struct ieee80211_node *ni, int isnew) 1818 { 1819 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 1820 struct iwn_node *wn = (void *)ni; 1821 uint8_t rate; 1822 int ridx, i; 1823 1824 ieee80211_amrr_node_init(&sc->amrr, &wn->amn); 1825 /* Start at lowest available bit-rate, AMRR will raise. */ 1826 ni->ni_txrate = 0; 1827 1828 for (i = 0; i < ni->ni_rates.rs_nrates; i++) { 1829 rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL; 1830 /* Map 802.11 rate to HW rate index. */ 1831 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) 1832 if (iwn_rates[ridx].rate == rate) 1833 break; 1834 wn->ridx[i] = ridx; 1835 } 1836 } 1837 1838 static int 1839 iwn_media_change(struct ifnet *ifp) 1840 { 1841 struct iwn_softc *sc = ifp->if_softc; 1842 struct ieee80211com *ic = &sc->sc_ic; 1843 uint8_t rate, ridx; 1844 int error; 1845 1846 error = ieee80211_media_change(ifp); 1847 if (error != ENETRESET) 1848 return error; 1849 1850 if (ic->ic_fixed_rate != -1) { 1851 rate = ic->ic_sup_rates[ic->ic_curmode]. 1852 rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL; 1853 /* Map 802.11 rate to HW rate index. */ 1854 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) 1855 if (iwn_rates[ridx].rate == rate) 1856 break; 1857 sc->fixed_ridx = ridx; 1858 } 1859 1860 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 1861 (IFF_UP | IFF_RUNNING)) { 1862 iwn_stop(ifp, 0); 1863 error = iwn_init(ifp); 1864 } 1865 return error; 1866 } 1867 1868 static int 1869 iwn_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg) 1870 { 1871 struct ifnet *ifp = ic->ic_ifp; 1872 struct iwn_softc *sc = ifp->if_softc; 1873 int error; 1874 1875 callout_stop(&sc->calib_to); 1876 1877 switch (nstate) { 1878 case IEEE80211_S_SCAN: 1879 /* XXX Do not abort a running scan. */ 1880 if (sc->sc_flags & IWN_FLAG_SCANNING) { 1881 if (ic->ic_state != nstate) 1882 aprint_debug_dev(sc->sc_dev, "scan request(%d) " 1883 "while scanning(%d) ignored\n", nstate, 1884 ic->ic_state); 1885 break; 1886 } 1887 1888 /* XXX Not sure if call and flags are needed. */ 1889 ieee80211_node_table_reset(&ic->ic_scan); 1890 ic->ic_flags |= IEEE80211_F_SCAN | IEEE80211_F_ASCAN; 1891 sc->sc_flags |= IWN_FLAG_SCANNING_2GHZ; 1892 1893 /* Make the link LED blink while we're scanning. */ 1894 iwn_set_led(sc, IWN_LED_LINK, 10, 10); 1895 1896 if ((error = iwn_scan(sc, IEEE80211_CHAN_2GHZ)) != 0) { 1897 aprint_error_dev(sc->sc_dev, 1898 "could not initiate scan\n"); 1899 return error; 1900 } 1901 ic->ic_state = nstate; 1902 return 0; 1903 1904 case IEEE80211_S_ASSOC: 1905 if (ic->ic_state != IEEE80211_S_RUN) 1906 break; 1907 /* FALLTHROUGH */ 1908 case IEEE80211_S_AUTH: 1909 /* Reset state to handle reassociations correctly. */ 1910 sc->rxon.associd = 0; 1911 sc->rxon.filter &= ~htole32(IWN_FILTER_BSS); 1912 sc->calib.state = IWN_CALIB_STATE_INIT; 1913 1914 /* Wait until we hear a beacon before we transmit */ 1915 if (IEEE80211_IS_CHAN_PASSIVE(ic->ic_curchan)) 1916 sc->sc_beacon_wait = 1; 1917 1918 if ((error = iwn_auth(sc)) != 0) { 1919 aprint_error_dev(sc->sc_dev, 1920 "could not move to auth state\n"); 1921 return error; 1922 } 1923 break; 1924 1925 case IEEE80211_S_RUN: 1926 /* 1927 * RUN -> RUN transition; Just restart timers. 1928 */ 1929 if (ic->ic_state == IEEE80211_S_RUN) { 1930 sc->calib_cnt = 0; 1931 break; 1932 } 1933 1934 /* Wait until we hear a beacon before we transmit */ 1935 if (IEEE80211_IS_CHAN_PASSIVE(ic->ic_curchan)) 1936 sc->sc_beacon_wait = 1; 1937 1938 if ((error = iwn_run(sc)) != 0) { 1939 aprint_error_dev(sc->sc_dev, 1940 "could not move to run state\n"); 1941 return error; 1942 } 1943 break; 1944 1945 case IEEE80211_S_INIT: 1946 sc->sc_flags &= ~IWN_FLAG_SCANNING; 1947 sc->calib.state = IWN_CALIB_STATE_INIT; 1948 /* 1949 * Purge the xmit queue so we don't have old frames 1950 * during a new association attempt. 1951 */ 1952 sc->sc_beacon_wait = 0; 1953 ifp->if_flags &= ~IFF_OACTIVE; 1954 iwn_start(ifp); 1955 break; 1956 } 1957 1958 return sc->sc_newstate(ic, nstate, arg); 1959 } 1960 1961 static void 1962 iwn_iter_func(void *arg, struct ieee80211_node *ni) 1963 { 1964 struct iwn_softc *sc = arg; 1965 struct iwn_node *wn = (struct iwn_node *)ni; 1966 1967 ieee80211_amrr_choose(&sc->amrr, ni, &wn->amn); 1968 } 1969 1970 static void 1971 iwn_calib_timeout(void *arg) 1972 { 1973 struct iwn_softc *sc = arg; 1974 struct ieee80211com *ic = &sc->sc_ic; 1975 int s; 1976 1977 s = splnet(); 1978 if (ic->ic_fixed_rate == -1) { 1979 if (ic->ic_opmode == IEEE80211_M_STA) 1980 iwn_iter_func(sc, ic->ic_bss); 1981 else 1982 ieee80211_iterate_nodes(&ic->ic_sta, iwn_iter_func, sc); 1983 } 1984 /* Force automatic TX power calibration every 60 secs. */ 1985 if (++sc->calib_cnt >= 120) { 1986 uint32_t flags = 0; 1987 1988 DPRINTF(("sending request for statistics\n")); 1989 (void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, 1990 sizeof flags, 1); 1991 sc->calib_cnt = 0; 1992 } 1993 splx(s); 1994 1995 /* Automatic rate control triggered every 500ms. */ 1996 callout_schedule(&sc->calib_to, mstohz(500)); 1997 } 1998 1999 /* 2000 * Process an RX_PHY firmware notification. This is usually immediately 2001 * followed by an MPDU_RX_DONE notification. 2002 */ 2003 static void 2004 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2005 struct iwn_rx_data *data) 2006 { 2007 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1); 2008 2009 DPRINTFN(2, ("received PHY stats\n")); 2010 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2011 sizeof (*stat), BUS_DMASYNC_POSTREAD); 2012 2013 /* Save RX statistics, they will be used on MPDU_RX_DONE. */ 2014 memcpy(&sc->last_rx_stat, stat, sizeof (*stat)); 2015 sc->last_rx_valid = 1; 2016 } 2017 2018 /* 2019 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification. 2020 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one. 2021 */ 2022 static void 2023 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2024 struct iwn_rx_data *data) 2025 { 2026 struct iwn_ops *ops = &sc->ops; 2027 struct ieee80211com *ic = &sc->sc_ic; 2028 struct ifnet *ifp = ic->ic_ifp; 2029 struct iwn_rx_ring *ring = &sc->rxq; 2030 struct ieee80211_frame *wh; 2031 struct ieee80211_node *ni; 2032 struct mbuf *m, *m1; 2033 struct iwn_rx_stat *stat; 2034 char *head; 2035 uint32_t flags; 2036 int error, len, rssi, s; 2037 2038 if (desc->type == IWN_MPDU_RX_DONE) { 2039 /* Check for prior RX_PHY notification. */ 2040 if (!sc->last_rx_valid) { 2041 DPRINTF(("missing RX_PHY\n")); 2042 return; 2043 } 2044 sc->last_rx_valid = 0; 2045 stat = &sc->last_rx_stat; 2046 } else 2047 stat = (struct iwn_rx_stat *)(desc + 1); 2048 2049 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWN_RBUF_SIZE, 2050 BUS_DMASYNC_POSTREAD); 2051 2052 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) { 2053 aprint_error_dev(sc->sc_dev, 2054 "invalid RX statistic header\n"); 2055 return; 2056 } 2057 if (desc->type == IWN_MPDU_RX_DONE) { 2058 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1); 2059 head = (char *)(mpdu + 1); 2060 len = le16toh(mpdu->len); 2061 } else { 2062 head = (char *)(stat + 1) + stat->cfg_phy_len; 2063 len = le16toh(stat->len); 2064 } 2065 2066 flags = le32toh(*(uint32_t *)(head + len)); 2067 2068 /* Discard frames with a bad FCS early. */ 2069 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) { 2070 DPRINTFN(2, ("RX flags error %x\n", flags)); 2071 ifp->if_ierrors++; 2072 return; 2073 } 2074 /* Discard frames that are too short. */ 2075 if (len < sizeof (*wh)) { 2076 DPRINTF(("frame too short: %d\n", len)); 2077 ic->ic_stats.is_rx_tooshort++; 2078 ifp->if_ierrors++; 2079 return; 2080 } 2081 2082 m1 = MCLGETIalt(sc, M_DONTWAIT, NULL, IWN_RBUF_SIZE); 2083 if (m1 == NULL) { 2084 ic->ic_stats.is_rx_nobuf++; 2085 ifp->if_ierrors++; 2086 return; 2087 } 2088 bus_dmamap_unload(sc->sc_dmat, data->map); 2089 2090 error = bus_dmamap_load(sc->sc_dmat, data->map, mtod(m1, void *), 2091 IWN_RBUF_SIZE, NULL, BUS_DMA_NOWAIT | BUS_DMA_READ); 2092 if (error != 0) { 2093 m_freem(m1); 2094 2095 /* Try to reload the old mbuf. */ 2096 error = bus_dmamap_load(sc->sc_dmat, data->map, 2097 mtod(data->m, void *), IWN_RBUF_SIZE, NULL, 2098 BUS_DMA_NOWAIT | BUS_DMA_READ); 2099 if (error != 0) { 2100 panic("%s: could not load old RX mbuf", 2101 device_xname(sc->sc_dev)); 2102 } 2103 /* Physical address may have changed. */ 2104 ring->desc[ring->cur] = 2105 htole32(data->map->dm_segs[0].ds_addr >> 8); 2106 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 2107 ring->cur * sizeof (uint32_t), sizeof (uint32_t), 2108 BUS_DMASYNC_PREWRITE); 2109 ifp->if_ierrors++; 2110 return; 2111 } 2112 2113 m = data->m; 2114 data->m = m1; 2115 /* Update RX descriptor. */ 2116 ring->desc[ring->cur] = htole32(data->map->dm_segs[0].ds_addr >> 8); 2117 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 2118 ring->cur * sizeof (uint32_t), sizeof (uint32_t), 2119 BUS_DMASYNC_PREWRITE); 2120 2121 /* Finalize mbuf. */ 2122 m_set_rcvif(m, ifp); 2123 m->m_data = head; 2124 m->m_pkthdr.len = m->m_len = len; 2125 2126 s = splnet(); 2127 2128 /* Grab a reference to the source node. */ 2129 wh = mtod(m, struct ieee80211_frame *); 2130 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 2131 2132 /* XXX OpenBSD adds decryption here (see also comments in iwn_tx). */ 2133 /* NetBSD does decryption in ieee80211_input. */ 2134 2135 rssi = ops->get_rssi(stat); 2136 2137 /* XXX Added for NetBSD: scans never stop without it */ 2138 if (ic->ic_state == IEEE80211_S_SCAN) 2139 iwn_fix_channel(ic, m, stat); 2140 2141 if (sc->sc_drvbpf != NULL) { 2142 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap; 2143 2144 tap->wr_flags = 0; 2145 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE)) 2146 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2147 tap->wr_chan_freq = 2148 htole16(ic->ic_channels[stat->chan].ic_freq); 2149 tap->wr_chan_flags = 2150 htole16(ic->ic_channels[stat->chan].ic_flags); 2151 tap->wr_dbm_antsignal = (int8_t)rssi; 2152 tap->wr_dbm_antnoise = (int8_t)sc->noise; 2153 tap->wr_tsft = stat->tstamp; 2154 switch (stat->rate) { 2155 /* CCK rates. */ 2156 case 10: tap->wr_rate = 2; break; 2157 case 20: tap->wr_rate = 4; break; 2158 case 55: tap->wr_rate = 11; break; 2159 case 110: tap->wr_rate = 22; break; 2160 /* OFDM rates. */ 2161 case 0xd: tap->wr_rate = 12; break; 2162 case 0xf: tap->wr_rate = 18; break; 2163 case 0x5: tap->wr_rate = 24; break; 2164 case 0x7: tap->wr_rate = 36; break; 2165 case 0x9: tap->wr_rate = 48; break; 2166 case 0xb: tap->wr_rate = 72; break; 2167 case 0x1: tap->wr_rate = 96; break; 2168 case 0x3: tap->wr_rate = 108; break; 2169 /* Unknown rate: should not happen. */ 2170 default: tap->wr_rate = 0; 2171 } 2172 2173 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m, BPF_D_IN); 2174 } 2175 2176 /* 2177 * If it's a beacon and we're waiting, then do the wakeup. 2178 */ 2179 if (sc->sc_beacon_wait) { 2180 uint8_t type, subtype; 2181 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2182 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2183 /* 2184 * This assumes at this point we've received our own 2185 * beacon. 2186 */ 2187 if (type == IEEE80211_FC0_TYPE_MGT && 2188 subtype == IEEE80211_FC0_SUBTYPE_BEACON) { 2189 sc->sc_beacon_wait = 0; 2190 ifp->if_flags &= ~IFF_OACTIVE; 2191 iwn_start(ifp); 2192 } 2193 } 2194 2195 /* Send the frame to the 802.11 layer. */ 2196 ieee80211_input(ic, m, ni, rssi, 0); 2197 2198 /* Node is no longer needed. */ 2199 ieee80211_free_node(ni); 2200 2201 splx(s); 2202 } 2203 2204 #ifndef IEEE80211_NO_HT 2205 /* Process an incoming Compressed BlockAck. */ 2206 static void 2207 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2208 struct iwn_rx_data *data) 2209 { 2210 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1); 2211 struct iwn_tx_ring *txq; 2212 2213 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), sizeof (*ba), 2214 BUS_DMASYNC_POSTREAD); 2215 2216 txq = &sc->txq[le16toh(ba->qid)]; 2217 /* XXX TBD */ 2218 } 2219 #endif 2220 2221 /* 2222 * Process a CALIBRATION_RESULT notification sent by the initialization 2223 * firmware on response to a CMD_CALIB_CONFIG command (5000 only). 2224 */ 2225 static void 2226 iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2227 struct iwn_rx_data *data) 2228 { 2229 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1); 2230 int len, idx = -1; 2231 2232 /* Runtime firmware should not send such a notification. */ 2233 if (sc->sc_flags & IWN_FLAG_CALIB_DONE) 2234 return; 2235 2236 len = (le32toh(desc->len) & 0x3fff) - 4; 2237 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), len, 2238 BUS_DMASYNC_POSTREAD); 2239 2240 switch (calib->code) { 2241 case IWN5000_PHY_CALIB_DC: 2242 if (sc->hw_type == IWN_HW_REV_TYPE_5150 || 2243 sc->hw_type == IWN_HW_REV_TYPE_2030 || 2244 sc->hw_type == IWN_HW_REV_TYPE_2000 || 2245 sc->hw_type == IWN_HW_REV_TYPE_135 || 2246 sc->hw_type == IWN_HW_REV_TYPE_105) 2247 idx = 0; 2248 break; 2249 case IWN5000_PHY_CALIB_LO: 2250 idx = 1; 2251 break; 2252 case IWN5000_PHY_CALIB_TX_IQ: 2253 idx = 2; 2254 break; 2255 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC: 2256 if (sc->hw_type < IWN_HW_REV_TYPE_6000 && 2257 sc->hw_type != IWN_HW_REV_TYPE_5150) 2258 idx = 3; 2259 break; 2260 case IWN5000_PHY_CALIB_BASE_BAND: 2261 idx = 4; 2262 break; 2263 } 2264 if (idx == -1) /* Ignore other results. */ 2265 return; 2266 2267 /* Save calibration result. */ 2268 if (sc->calibcmd[idx].buf != NULL) 2269 free(sc->calibcmd[idx].buf, M_DEVBUF); 2270 sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT); 2271 if (sc->calibcmd[idx].buf == NULL) { 2272 DPRINTF(("not enough memory for calibration result %d\n", 2273 calib->code)); 2274 return; 2275 } 2276 DPRINTF(("saving calibration result code=%d len=%d\n", 2277 calib->code, len)); 2278 sc->calibcmd[idx].len = len; 2279 memcpy(sc->calibcmd[idx].buf, calib, len); 2280 } 2281 2282 /* 2283 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification. 2284 * The latter is sent by the firmware after each received beacon. 2285 */ 2286 static void 2287 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2288 struct iwn_rx_data *data) 2289 { 2290 struct iwn_ops *ops = &sc->ops; 2291 struct ieee80211com *ic = &sc->sc_ic; 2292 struct iwn_calib_state *calib = &sc->calib; 2293 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1); 2294 int temp; 2295 2296 /* Ignore statistics received during a scan. */ 2297 if (ic->ic_state != IEEE80211_S_RUN) 2298 return; 2299 2300 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2301 sizeof (*stats), BUS_DMASYNC_POSTREAD); 2302 2303 DPRINTFN(3, ("received statistics (cmd=%d)\n", desc->type)); 2304 sc->calib_cnt = 0; /* Reset TX power calibration timeout. */ 2305 2306 /* Test if temperature has changed. */ 2307 if (stats->general.temp != sc->rawtemp) { 2308 /* Convert "raw" temperature to degC. */ 2309 sc->rawtemp = stats->general.temp; 2310 temp = ops->get_temperature(sc); 2311 DPRINTFN(2, ("temperature=%dC\n", temp)); 2312 2313 /* Update TX power if need be (4965AGN only). */ 2314 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 2315 iwn4965_power_calibration(sc, temp); 2316 } 2317 2318 if (desc->type != IWN_BEACON_STATISTICS) 2319 return; /* Reply to a statistics request. */ 2320 2321 sc->noise = iwn_get_noise(&stats->rx.general); 2322 2323 /* Test that RSSI and noise are present in stats report. */ 2324 if (le32toh(stats->rx.general.flags) != 1) { 2325 DPRINTF(("received statistics without RSSI\n")); 2326 return; 2327 } 2328 2329 /* 2330 * XXX Differential gain calibration makes the 6005 firmware 2331 * crap out, so skip it for now. This effectively disables 2332 * sensitivity tuning as well. 2333 */ 2334 if (sc->hw_type == IWN_HW_REV_TYPE_6005) 2335 return; 2336 2337 if (calib->state == IWN_CALIB_STATE_ASSOC) 2338 iwn_collect_noise(sc, &stats->rx.general); 2339 else if (calib->state == IWN_CALIB_STATE_RUN) 2340 iwn_tune_sensitivity(sc, &stats->rx); 2341 } 2342 2343 /* 2344 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN 2345 * and 5000 adapters have different incompatible TX status formats. 2346 */ 2347 static void 2348 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2349 struct iwn_rx_data *data) 2350 { 2351 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1); 2352 2353 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2354 sizeof (*stat), BUS_DMASYNC_POSTREAD); 2355 iwn_tx_done(sc, desc, stat->ackfailcnt, le32toh(stat->status) & 0xff); 2356 } 2357 2358 static void 2359 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2360 struct iwn_rx_data *data) 2361 { 2362 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1); 2363 2364 #ifdef notyet 2365 /* Reset TX scheduler slot. */ 2366 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx); 2367 #endif 2368 2369 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2370 sizeof (*stat), BUS_DMASYNC_POSTREAD); 2371 iwn_tx_done(sc, desc, stat->ackfailcnt, le16toh(stat->status) & 0xff); 2372 } 2373 2374 /* 2375 * Adapter-independent backend for TX_DONE firmware notifications. 2376 */ 2377 static void 2378 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt, 2379 uint8_t status) 2380 { 2381 struct ieee80211com *ic = &sc->sc_ic; 2382 struct ifnet *ifp = ic->ic_ifp; 2383 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf]; 2384 struct iwn_tx_data *data = &ring->data[desc->idx]; 2385 struct iwn_node *wn = (struct iwn_node *)data->ni; 2386 int s; 2387 2388 s = splnet(); 2389 2390 /* Update rate control statistics. */ 2391 wn->amn.amn_txcnt++; 2392 if (ackfailcnt > 0) 2393 wn->amn.amn_retrycnt++; 2394 2395 if (status != 1 && status != 2) 2396 ifp->if_oerrors++; 2397 else 2398 ifp->if_opackets++; 2399 2400 /* Unmap and free mbuf. */ 2401 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize, 2402 BUS_DMASYNC_POSTWRITE); 2403 bus_dmamap_unload(sc->sc_dmat, data->map); 2404 m_freem(data->m); 2405 data->m = NULL; 2406 ieee80211_free_node(data->ni); 2407 data->ni = NULL; 2408 2409 sc->sc_tx_timer = 0; 2410 if (--ring->queued < IWN_TX_RING_LOMARK) { 2411 sc->qfullmsk &= ~(1 << ring->qid); 2412 if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) { 2413 ifp->if_flags &= ~IFF_OACTIVE; 2414 iwn_start(ifp); 2415 } 2416 } 2417 2418 splx(s); 2419 } 2420 2421 /* 2422 * Process a "command done" firmware notification. This is where we wakeup 2423 * processes waiting for a synchronous command completion. 2424 */ 2425 static void 2426 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc) 2427 { 2428 struct iwn_tx_ring *ring = &sc->txq[4]; 2429 struct iwn_tx_data *data; 2430 2431 if ((desc->qid & 0xf) != 4) 2432 return; /* Not a command ack. */ 2433 2434 data = &ring->data[desc->idx]; 2435 2436 /* If the command was mapped in an mbuf, free it. */ 2437 if (data->m != NULL) { 2438 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 2439 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2440 bus_dmamap_unload(sc->sc_dmat, data->map); 2441 m_freem(data->m); 2442 data->m = NULL; 2443 } 2444 wakeup(&ring->desc[desc->idx]); 2445 } 2446 2447 /* 2448 * Process an INT_FH_RX or INT_SW_RX interrupt. 2449 */ 2450 static void 2451 iwn_notif_intr(struct iwn_softc *sc) 2452 { 2453 struct iwn_ops *ops = &sc->ops; 2454 struct ieee80211com *ic = &sc->sc_ic; 2455 struct ifnet *ifp = ic->ic_ifp; 2456 uint16_t hw; 2457 int s; 2458 2459 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map, 2460 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD); 2461 2462 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff; 2463 while (sc->rxq.cur != hw) { 2464 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 2465 struct iwn_rx_desc *desc; 2466 2467 bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof (*desc), 2468 BUS_DMASYNC_POSTREAD); 2469 desc = mtod(data->m, struct iwn_rx_desc *); 2470 2471 DPRINTFN(4, ("notification qid=%d idx=%d flags=%x type=%d\n", 2472 desc->qid & 0xf, desc->idx, desc->flags, desc->type)); 2473 2474 if (!(desc->qid & 0x80)) /* Reply to a command. */ 2475 iwn_cmd_done(sc, desc); 2476 2477 switch (desc->type) { 2478 case IWN_RX_PHY: 2479 iwn_rx_phy(sc, desc, data); 2480 break; 2481 2482 case IWN_RX_DONE: /* 4965AGN only. */ 2483 case IWN_MPDU_RX_DONE: 2484 /* An 802.11 frame has been received. */ 2485 iwn_rx_done(sc, desc, data); 2486 break; 2487 #ifndef IEEE80211_NO_HT 2488 case IWN_RX_COMPRESSED_BA: 2489 /* A Compressed BlockAck has been received. */ 2490 iwn_rx_compressed_ba(sc, desc, data); 2491 break; 2492 #endif 2493 case IWN_TX_DONE: 2494 /* An 802.11 frame has been transmitted. */ 2495 ops->tx_done(sc, desc, data); 2496 break; 2497 2498 case IWN_RX_STATISTICS: 2499 case IWN_BEACON_STATISTICS: 2500 iwn_rx_statistics(sc, desc, data); 2501 break; 2502 2503 case IWN_BEACON_MISSED: 2504 { 2505 struct iwn_beacon_missed *miss = 2506 (struct iwn_beacon_missed *)(desc + 1); 2507 2508 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2509 sizeof (*miss), BUS_DMASYNC_POSTREAD); 2510 /* 2511 * If more than 5 consecutive beacons are missed, 2512 * reinitialize the sensitivity state machine. 2513 */ 2514 DPRINTF(("beacons missed %d/%d\n", 2515 le32toh(miss->consecutive), le32toh(miss->total))); 2516 if (ic->ic_state == IEEE80211_S_RUN && 2517 le32toh(miss->consecutive) > 5) 2518 (void)iwn_init_sensitivity(sc); 2519 break; 2520 } 2521 case IWN_UC_READY: 2522 { 2523 struct iwn_ucode_info *uc = 2524 (struct iwn_ucode_info *)(desc + 1); 2525 2526 /* The microcontroller is ready. */ 2527 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2528 sizeof (*uc), BUS_DMASYNC_POSTREAD); 2529 DPRINTF(("microcode alive notification version=%d.%d " 2530 "subtype=%x alive=%x\n", uc->major, uc->minor, 2531 uc->subtype, le32toh(uc->valid))); 2532 2533 if (le32toh(uc->valid) != 1) { 2534 aprint_error_dev(sc->sc_dev, 2535 "microcontroller initialization " 2536 "failed\n"); 2537 break; 2538 } 2539 if (uc->subtype == IWN_UCODE_INIT) { 2540 /* Save microcontroller report. */ 2541 memcpy(&sc->ucode_info, uc, sizeof (*uc)); 2542 } 2543 /* Save the address of the error log in SRAM. */ 2544 sc->errptr = le32toh(uc->errptr); 2545 break; 2546 } 2547 case IWN_STATE_CHANGED: 2548 { 2549 uint32_t *status = (uint32_t *)(desc + 1); 2550 2551 /* Enabled/disabled notification. */ 2552 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2553 sizeof (*status), BUS_DMASYNC_POSTREAD); 2554 DPRINTF(("state changed to %x\n", le32toh(*status))); 2555 2556 if (le32toh(*status) & 1) { 2557 /* The radio button has to be pushed. */ 2558 aprint_error_dev(sc->sc_dev, 2559 "Radio transmitter is off\n"); 2560 /* Turn the interface down. */ 2561 s = splnet(); 2562 ifp->if_flags &= ~IFF_UP; 2563 iwn_stop(ifp, 1); 2564 splx(s); 2565 return; /* No further processing. */ 2566 } 2567 break; 2568 } 2569 case IWN_START_SCAN: 2570 { 2571 struct iwn_start_scan *scan = 2572 (struct iwn_start_scan *)(desc + 1); 2573 2574 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2575 sizeof (*scan), BUS_DMASYNC_POSTREAD); 2576 DPRINTFN(2, ("scanning channel %d status %x\n", 2577 scan->chan, le32toh(scan->status))); 2578 2579 /* Fix current channel. */ 2580 ic->ic_bss->ni_chan = &ic->ic_channels[scan->chan]; 2581 break; 2582 } 2583 case IWN_STOP_SCAN: 2584 { 2585 struct iwn_stop_scan *scan = 2586 (struct iwn_stop_scan *)(desc + 1); 2587 2588 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2589 sizeof (*scan), BUS_DMASYNC_POSTREAD); 2590 DPRINTF(("scan finished nchan=%d status=%d chan=%d\n", 2591 scan->nchan, scan->status, scan->chan)); 2592 2593 if (scan->status == 1 && scan->chan <= 14 && 2594 (sc->sc_flags & IWN_FLAG_HAS_5GHZ)) { 2595 /* 2596 * We just finished scanning 2GHz channels, 2597 * start scanning 5GHz ones. 2598 */ 2599 sc->sc_flags &= ~IWN_FLAG_SCANNING_2GHZ; 2600 sc->sc_flags |= IWN_FLAG_SCANNING_5GHZ; 2601 if (iwn_scan(sc, IEEE80211_CHAN_5GHZ) == 0) 2602 break; 2603 } 2604 sc->sc_flags &= ~IWN_FLAG_SCANNING; 2605 ieee80211_end_scan(ic); 2606 break; 2607 } 2608 case IWN5000_CALIBRATION_RESULT: 2609 iwn5000_rx_calib_results(sc, desc, data); 2610 break; 2611 2612 case IWN5000_CALIBRATION_DONE: 2613 sc->sc_flags |= IWN_FLAG_CALIB_DONE; 2614 wakeup(sc); 2615 break; 2616 } 2617 2618 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT; 2619 } 2620 2621 /* Tell the firmware what we have processed. */ 2622 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1; 2623 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7); 2624 } 2625 2626 /* 2627 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 2628 * from power-down sleep mode. 2629 */ 2630 static void 2631 iwn_wakeup_intr(struct iwn_softc *sc) 2632 { 2633 int qid; 2634 2635 DPRINTF(("ucode wakeup from power-down sleep\n")); 2636 2637 /* Wakeup RX and TX rings. */ 2638 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7); 2639 for (qid = 0; qid < sc->ntxqs; qid++) { 2640 struct iwn_tx_ring *ring = &sc->txq[qid]; 2641 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur); 2642 } 2643 } 2644 2645 /* 2646 * Dump the error log of the firmware when a firmware panic occurs. Although 2647 * we can't debug the firmware because it is neither open source nor free, it 2648 * can help us to identify certain classes of problems. 2649 */ 2650 static void 2651 iwn_fatal_intr(struct iwn_softc *sc) 2652 { 2653 struct iwn_fw_dump dump; 2654 int i; 2655 2656 /* Force a complete recalibration on next init. */ 2657 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE; 2658 2659 /* Check that the error log address is valid. */ 2660 if (sc->errptr < IWN_FW_DATA_BASE || 2661 sc->errptr + sizeof (dump) > 2662 IWN_FW_DATA_BASE + sc->fw_data_maxsz) { 2663 aprint_error_dev(sc->sc_dev, 2664 "bad firmware error log address 0x%08x\n", sc->errptr); 2665 return; 2666 } 2667 if (iwn_nic_lock(sc) != 0) { 2668 aprint_error_dev(sc->sc_dev, 2669 "could not read firmware error log\n"); 2670 return; 2671 } 2672 /* Read firmware error log from SRAM. */ 2673 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump, 2674 sizeof (dump) / sizeof (uint32_t)); 2675 iwn_nic_unlock(sc); 2676 2677 if (dump.valid == 0) { 2678 aprint_error_dev(sc->sc_dev, 2679 "firmware error log is empty\n"); 2680 return; 2681 } 2682 aprint_error("firmware error log:\n"); 2683 aprint_error(" error type = \"%s\" (0x%08X)\n", 2684 (dump.id < __arraycount(iwn_fw_errmsg)) ? 2685 iwn_fw_errmsg[dump.id] : "UNKNOWN", 2686 dump.id); 2687 aprint_error(" program counter = 0x%08X\n", dump.pc); 2688 aprint_error(" source line = 0x%08X\n", dump.src_line); 2689 aprint_error(" error data = 0x%08X%08X\n", 2690 dump.error_data[0], dump.error_data[1]); 2691 aprint_error(" branch link = 0x%08X%08X\n", 2692 dump.branch_link[0], dump.branch_link[1]); 2693 aprint_error(" interrupt link = 0x%08X%08X\n", 2694 dump.interrupt_link[0], dump.interrupt_link[1]); 2695 aprint_error(" time = %u\n", dump.time[0]); 2696 2697 /* Dump driver status (TX and RX rings) while we're here. */ 2698 aprint_error("driver status:\n"); 2699 for (i = 0; i < sc->ntxqs; i++) { 2700 struct iwn_tx_ring *ring = &sc->txq[i]; 2701 aprint_error(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 2702 i, ring->qid, ring->cur, ring->queued); 2703 } 2704 aprint_error(" rx ring: cur=%d\n", sc->rxq.cur); 2705 aprint_error(" 802.11 state %d\n", sc->sc_ic.ic_state); 2706 } 2707 2708 static int 2709 iwn_intr(void *arg) 2710 { 2711 struct iwn_softc *sc = arg; 2712 2713 /* Disable interrupts. */ 2714 IWN_WRITE(sc, IWN_INT_MASK, 0); 2715 2716 softint_schedule(sc->sc_soft_ih); 2717 return 1; 2718 } 2719 2720 static void 2721 iwn_softintr(void *arg) 2722 { 2723 struct iwn_softc *sc = arg; 2724 struct ifnet *ifp = sc->sc_ic.ic_ifp; 2725 uint32_t r1, r2, tmp; 2726 int s; 2727 2728 /* Read interrupts from ICT (fast) or from registers (slow). */ 2729 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 2730 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map, 0, 2731 IWN_ICT_SIZE, BUS_DMASYNC_POSTREAD); 2732 tmp = 0; 2733 while (sc->ict[sc->ict_cur] != 0) { 2734 tmp |= sc->ict[sc->ict_cur]; 2735 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */ 2736 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT; 2737 } 2738 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map, 0, 2739 IWN_ICT_SIZE, BUS_DMASYNC_PREWRITE); 2740 tmp = le32toh(tmp); 2741 if (tmp == 0xffffffff) /* Shouldn't happen. */ 2742 tmp = 0; 2743 else if (tmp & 0xc0000) /* Workaround a HW bug. */ 2744 tmp |= 0x8000; 2745 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff); 2746 r2 = 0; /* Unused. */ 2747 } else { 2748 r1 = IWN_READ(sc, IWN_INT); 2749 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 2750 return; /* Hardware gone! */ 2751 r2 = IWN_READ(sc, IWN_FH_INT); 2752 } 2753 if (r1 == 0 && r2 == 0) { 2754 goto out; /* Interrupt not for us. */ 2755 } 2756 2757 /* Acknowledge interrupts. */ 2758 IWN_WRITE(sc, IWN_INT, r1); 2759 if (!(sc->sc_flags & IWN_FLAG_USE_ICT)) 2760 IWN_WRITE(sc, IWN_FH_INT, r2); 2761 2762 if (r1 & IWN_INT_RF_TOGGLED) { 2763 tmp = IWN_READ(sc, IWN_GP_CNTRL); 2764 aprint_error_dev(sc->sc_dev, 2765 "RF switch: radio %s\n", 2766 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled"); 2767 } 2768 if (r1 & IWN_INT_CT_REACHED) { 2769 aprint_error_dev(sc->sc_dev, 2770 "critical temperature reached!\n"); 2771 } 2772 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) { 2773 aprint_error_dev(sc->sc_dev, 2774 "fatal firmware error\n"); 2775 /* Dump firmware error log and stop. */ 2776 iwn_fatal_intr(sc); 2777 s = splnet(); 2778 ifp->if_flags &= ~IFF_UP; 2779 iwn_stop(ifp, 1); 2780 splx(s); 2781 return; 2782 } 2783 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) || 2784 (r2 & IWN_FH_INT_RX)) { 2785 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 2786 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) 2787 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX); 2788 IWN_WRITE_1(sc, IWN_INT_PERIODIC, IWN_INT_PERIODIC_DIS); 2789 iwn_notif_intr(sc); 2790 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) { 2791 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 2792 IWN_INT_PERIODIC_ENA); 2793 } 2794 } else 2795 iwn_notif_intr(sc); 2796 } 2797 2798 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) { 2799 if (sc->sc_flags & IWN_FLAG_USE_ICT) 2800 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX); 2801 wakeup(sc); /* FH DMA transfer completed. */ 2802 } 2803 2804 if (r1 & IWN_INT_ALIVE) 2805 wakeup(sc); /* Firmware is alive. */ 2806 2807 if (r1 & IWN_INT_WAKEUP) 2808 iwn_wakeup_intr(sc); 2809 2810 out: 2811 /* Re-enable interrupts. */ 2812 if (ifp->if_flags & IFF_UP) 2813 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 2814 } 2815 2816 /* 2817 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and 2818 * 5000 adapters use a slightly different format). 2819 */ 2820 static void 2821 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 2822 uint16_t len) 2823 { 2824 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx]; 2825 2826 *w = htole16(len + 8); 2827 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2828 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr, 2829 sizeof (uint16_t), 2830 BUS_DMASYNC_PREWRITE); 2831 if (idx < IWN_SCHED_WINSZ) { 2832 *(w + IWN_TX_RING_COUNT) = *w; 2833 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2834 (char *)(void *)(w + IWN_TX_RING_COUNT) - 2835 (char *)(void *)sc->sched_dma.vaddr, 2836 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 2837 } 2838 } 2839 2840 static void 2841 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 2842 uint16_t len) 2843 { 2844 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 2845 2846 *w = htole16(id << 12 | (len + 8)); 2847 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2848 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr, 2849 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 2850 if (idx < IWN_SCHED_WINSZ) { 2851 *(w + IWN_TX_RING_COUNT) = *w; 2852 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2853 (char *)(void *)(w + IWN_TX_RING_COUNT) - 2854 (char *)(void *)sc->sched_dma.vaddr, 2855 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 2856 } 2857 } 2858 2859 #ifdef notyet 2860 static void 2861 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx) 2862 { 2863 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 2864 2865 *w = (*w & htole16(0xf000)) | htole16(1); 2866 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2867 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr, 2868 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 2869 if (idx < IWN_SCHED_WINSZ) { 2870 *(w + IWN_TX_RING_COUNT) = *w; 2871 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2872 (char *)(void *)(w + IWN_TX_RING_COUNT) - 2873 (char *)(void *)sc->sched_dma.vaddr, 2874 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 2875 } 2876 } 2877 #endif 2878 2879 static int 2880 iwn_tx(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac) 2881 { 2882 struct ieee80211com *ic = &sc->sc_ic; 2883 struct iwn_node *wn = (void *)ni; 2884 struct iwn_tx_ring *ring; 2885 struct iwn_tx_desc *desc; 2886 struct iwn_tx_data *data; 2887 struct iwn_tx_cmd *cmd; 2888 struct iwn_cmd_data *tx; 2889 const struct iwn_rate *rinfo; 2890 struct ieee80211_frame *wh; 2891 struct ieee80211_key *k = NULL; 2892 struct mbuf *m1; 2893 uint32_t flags; 2894 u_int hdrlen; 2895 bus_dma_segment_t *seg; 2896 uint8_t tid, ridx, txant, type; 2897 int i, totlen, error, pad; 2898 2899 const struct chanAccParams *cap; 2900 int noack; 2901 int hdrlen2; 2902 2903 wh = mtod(m, struct ieee80211_frame *); 2904 hdrlen = ieee80211_anyhdrsize(wh); 2905 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2906 2907 hdrlen2 = (ieee80211_has_qos(wh)) ? 2908 sizeof (struct ieee80211_qosframe) : 2909 sizeof (struct ieee80211_frame); 2910 2911 if (hdrlen != hdrlen2) 2912 aprint_error_dev(sc->sc_dev, "hdrlen error (%d != %d)\n", 2913 hdrlen, hdrlen2); 2914 2915 /* XXX OpenBSD sets a different tid when using QOS */ 2916 tid = 0; 2917 if (ieee80211_has_qos(wh)) { 2918 cap = &ic->ic_wme.wme_chanParams; 2919 noack = cap->cap_wmeParams[ac].wmep_noackPolicy; 2920 } 2921 else 2922 noack = 0; 2923 2924 ring = &sc->txq[ac]; 2925 desc = &ring->desc[ring->cur]; 2926 data = &ring->data[ring->cur]; 2927 2928 /* Choose a TX rate index. */ 2929 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 2930 type != IEEE80211_FC0_TYPE_DATA) { 2931 ridx = (ic->ic_curmode == IEEE80211_MODE_11A) ? 2932 IWN_RIDX_OFDM6 : IWN_RIDX_CCK1; 2933 } else if (ic->ic_fixed_rate != -1) { 2934 ridx = sc->fixed_ridx; 2935 } else 2936 ridx = wn->ridx[ni->ni_txrate]; 2937 rinfo = &iwn_rates[ridx]; 2938 2939 /* Encrypt the frame if need be. */ 2940 /* 2941 * XXX For now, NetBSD swaps the encryption and bpf sections 2942 * in order to match old code and other drivers. Tests with 2943 * tcpdump indicates that the order is irrelevant, however, 2944 * as bpf produces unencrypted data for both ordering choices. 2945 */ 2946 if (wh->i_fc[1] & IEEE80211_FC1_WEP) { 2947 k = ieee80211_crypto_encap(ic, ni, m); 2948 if (k == NULL) { 2949 m_freem(m); 2950 return ENOBUFS; 2951 } 2952 /* Packet header may have moved, reset our local pointer. */ 2953 wh = mtod(m, struct ieee80211_frame *); 2954 } 2955 totlen = m->m_pkthdr.len; 2956 2957 if (sc->sc_drvbpf != NULL) { 2958 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 2959 2960 tap->wt_flags = 0; 2961 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq); 2962 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags); 2963 tap->wt_rate = rinfo->rate; 2964 tap->wt_hwqueue = ac; 2965 if (wh->i_fc[1] & IEEE80211_FC1_WEP) 2966 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2967 2968 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m, BPF_D_OUT); 2969 } 2970 2971 /* Prepare TX firmware command. */ 2972 cmd = &ring->cmd[ring->cur]; 2973 cmd->code = IWN_CMD_TX_DATA; 2974 cmd->flags = 0; 2975 cmd->qid = ring->qid; 2976 cmd->idx = ring->cur; 2977 2978 tx = (struct iwn_cmd_data *)cmd->data; 2979 /* NB: No need to clear tx, all fields are reinitialized here. */ 2980 tx->scratch = 0; /* clear "scratch" area */ 2981 2982 flags = 0; 2983 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2984 /* Unicast frame, check if an ACK is expected. */ 2985 if (!noack) 2986 flags |= IWN_TX_NEED_ACK; 2987 } 2988 2989 #ifdef notyet 2990 /* XXX NetBSD does not define IEEE80211_FC0_SUBTYPE_BAR */ 2991 if ((wh->i_fc[0] & 2992 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == 2993 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR)) 2994 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */ 2995 #endif 2996 2997 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2998 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */ 2999 3000 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 3001 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3002 /* NB: Group frames are sent using CCK in 802.11b/g. */ 3003 if (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold) { 3004 flags |= IWN_TX_NEED_RTS; 3005 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 3006 ridx >= IWN_RIDX_OFDM6) { 3007 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 3008 flags |= IWN_TX_NEED_CTS; 3009 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 3010 flags |= IWN_TX_NEED_RTS; 3011 } 3012 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) { 3013 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 3014 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 3015 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS); 3016 flags |= IWN_TX_NEED_PROTECTION; 3017 } else 3018 flags |= IWN_TX_FULL_TXOP; 3019 } 3020 } 3021 3022 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 3023 type != IEEE80211_FC0_TYPE_DATA) 3024 tx->id = sc->broadcast_id; 3025 else 3026 tx->id = wn->id; 3027 3028 if (type == IEEE80211_FC0_TYPE_MGT) { 3029 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3030 3031 #ifndef IEEE80211_STA_ONLY 3032 /* Tell HW to set timestamp in probe responses. */ 3033 /* XXX NetBSD rev 1.11 added probe requests here but */ 3034 /* probe requests do not take timestamps (from Bergamini). */ 3035 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 3036 flags |= IWN_TX_INSERT_TSTAMP; 3037 #endif 3038 /* XXX NetBSD rev 1.11 and 1.20 added AUTH/DAUTH and RTS/CTS */ 3039 /* changes here. These are not needed (from Bergamini). */ 3040 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 3041 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 3042 tx->timeout = htole16(3); 3043 else 3044 tx->timeout = htole16(2); 3045 } else 3046 tx->timeout = htole16(0); 3047 3048 if (hdrlen & 3) { 3049 /* First segment length must be a multiple of 4. */ 3050 flags |= IWN_TX_NEED_PADDING; 3051 pad = 4 - (hdrlen & 3); 3052 } else 3053 pad = 0; 3054 3055 tx->len = htole16(totlen); 3056 tx->tid = tid; 3057 tx->rts_ntries = 60; 3058 tx->data_ntries = 15; 3059 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 3060 tx->plcp = rinfo->plcp; 3061 tx->rflags = rinfo->flags; 3062 if (tx->id == sc->broadcast_id) { 3063 /* Group or management frame. */ 3064 tx->linkq = 0; 3065 /* XXX Alternate between antenna A and B? */ 3066 txant = IWN_LSB(sc->txchainmask); 3067 tx->rflags |= IWN_RFLAG_ANT(txant); 3068 } else { 3069 tx->linkq = ni->ni_rates.rs_nrates - ni->ni_txrate - 1; 3070 flags |= IWN_TX_LINKQ; /* enable MRR */ 3071 } 3072 /* Set physical address of "scratch area". */ 3073 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 3074 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 3075 3076 /* Copy 802.11 header in TX command. */ 3077 /* XXX NetBSD changed this in rev 1.20 */ 3078 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen); 3079 3080 /* Trim 802.11 header. */ 3081 m_adj(m, hdrlen); 3082 tx->security = 0; 3083 tx->flags = htole32(flags); 3084 3085 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m, 3086 BUS_DMA_NOWAIT | BUS_DMA_WRITE); 3087 if (error != 0) { 3088 if (error != EFBIG) { 3089 aprint_error_dev(sc->sc_dev, 3090 "can't map mbuf (error %d)\n", error); 3091 m_freem(m); 3092 return error; 3093 } 3094 /* Too many DMA segments, linearize mbuf. */ 3095 MGETHDR(m1, M_DONTWAIT, MT_DATA); 3096 if (m1 == NULL) { 3097 m_freem(m); 3098 return ENOBUFS; 3099 } 3100 if (m->m_pkthdr.len > MHLEN) { 3101 MCLGET(m1, M_DONTWAIT); 3102 if (!(m1->m_flags & M_EXT)) { 3103 m_freem(m); 3104 m_freem(m1); 3105 return ENOBUFS; 3106 } 3107 } 3108 m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *)); 3109 m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len; 3110 m_freem(m); 3111 m = m1; 3112 3113 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m, 3114 BUS_DMA_NOWAIT | BUS_DMA_WRITE); 3115 if (error != 0) { 3116 aprint_error_dev(sc->sc_dev, 3117 "can't map mbuf (error %d)\n", error); 3118 m_freem(m); 3119 return error; 3120 } 3121 } 3122 3123 data->m = m; 3124 data->ni = ni; 3125 3126 DPRINTFN(4, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n", 3127 ring->qid, ring->cur, m->m_pkthdr.len, data->map->dm_nsegs)); 3128 3129 /* Fill TX descriptor. */ 3130 desc->nsegs = 1 + data->map->dm_nsegs; 3131 /* First DMA segment is used by the TX command. */ 3132 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 3133 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 3134 (4 + sizeof (*tx) + hdrlen + pad) << 4); 3135 /* Other DMA segments are for data payload. */ 3136 seg = data->map->dm_segs; 3137 for (i = 1; i <= data->map->dm_nsegs; i++) { 3138 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); 3139 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | 3140 seg->ds_len << 4); 3141 seg++; 3142 } 3143 3144 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize, 3145 BUS_DMASYNC_PREWRITE); 3146 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map, 3147 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr, 3148 sizeof (*cmd), BUS_DMASYNC_PREWRITE); 3149 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 3150 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr, 3151 sizeof (*desc), BUS_DMASYNC_PREWRITE); 3152 3153 #ifdef notyet 3154 /* Update TX scheduler. */ 3155 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 3156 #endif 3157 3158 /* Kick TX ring. */ 3159 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3160 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3161 3162 /* Mark TX ring as full if we reach a certain threshold. */ 3163 if (++ring->queued > IWN_TX_RING_HIMARK) 3164 sc->qfullmsk |= 1 << ring->qid; 3165 3166 return 0; 3167 } 3168 3169 static void 3170 iwn_start(struct ifnet *ifp) 3171 { 3172 struct iwn_softc *sc = ifp->if_softc; 3173 struct ieee80211com *ic = &sc->sc_ic; 3174 struct ieee80211_node *ni; 3175 struct ether_header *eh; 3176 struct mbuf *m; 3177 int ac; 3178 3179 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 3180 return; 3181 3182 for (;;) { 3183 if (sc->sc_beacon_wait == 1) { 3184 ifp->if_flags |= IFF_OACTIVE; 3185 break; 3186 } 3187 3188 if (sc->qfullmsk != 0) { 3189 ifp->if_flags |= IFF_OACTIVE; 3190 break; 3191 } 3192 /* Send pending management frames first. */ 3193 IF_DEQUEUE(&ic->ic_mgtq, m); 3194 if (m != NULL) { 3195 ni = M_GETCTX(m, struct ieee80211_node *); 3196 ac = 0; 3197 goto sendit; 3198 } 3199 if (ic->ic_state != IEEE80211_S_RUN) 3200 break; 3201 3202 /* Encapsulate and send data frames. */ 3203 IFQ_DEQUEUE(&ifp->if_snd, m); 3204 if (m == NULL) 3205 break; 3206 if (m->m_len < sizeof (*eh) && 3207 (m = m_pullup(m, sizeof (*eh))) == NULL) { 3208 ifp->if_oerrors++; 3209 continue; 3210 } 3211 eh = mtod(m, struct ether_header *); 3212 ni = ieee80211_find_txnode(ic, eh->ether_dhost); 3213 if (ni == NULL) { 3214 m_freem(m); 3215 ifp->if_oerrors++; 3216 continue; 3217 } 3218 /* classify mbuf so we can find which tx ring to use */ 3219 if (ieee80211_classify(ic, m, ni) != 0) { 3220 m_freem(m); 3221 ieee80211_free_node(ni); 3222 ifp->if_oerrors++; 3223 continue; 3224 } 3225 3226 /* No QoS encapsulation for EAPOL frames. */ 3227 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ? 3228 M_WME_GETAC(m) : WME_AC_BE; 3229 3230 if (sc->sc_beacon_wait == 0) 3231 bpf_mtap(ifp, m, BPF_D_OUT); 3232 3233 if ((m = ieee80211_encap(ic, m, ni)) == NULL) { 3234 ieee80211_free_node(ni); 3235 ifp->if_oerrors++; 3236 continue; 3237 } 3238 sendit: 3239 if (sc->sc_beacon_wait) 3240 continue; 3241 3242 bpf_mtap3(ic->ic_rawbpf, m, BPF_D_OUT); 3243 3244 if (iwn_tx(sc, m, ni, ac) != 0) { 3245 ieee80211_free_node(ni); 3246 ifp->if_oerrors++; 3247 continue; 3248 } 3249 3250 sc->sc_tx_timer = 5; 3251 ifp->if_timer = 1; 3252 } 3253 3254 if (sc->sc_beacon_wait > 1) 3255 sc->sc_beacon_wait = 0; 3256 } 3257 3258 static void 3259 iwn_watchdog(struct ifnet *ifp) 3260 { 3261 struct iwn_softc *sc = ifp->if_softc; 3262 3263 ifp->if_timer = 0; 3264 3265 if (sc->sc_tx_timer > 0) { 3266 if (--sc->sc_tx_timer == 0) { 3267 aprint_error_dev(sc->sc_dev, 3268 "device timeout\n"); 3269 ifp->if_flags &= ~IFF_UP; 3270 iwn_stop(ifp, 1); 3271 ifp->if_oerrors++; 3272 return; 3273 } 3274 ifp->if_timer = 1; 3275 } 3276 3277 ieee80211_watchdog(&sc->sc_ic); 3278 } 3279 3280 static int 3281 iwn_ioctl(struct ifnet *ifp, u_long cmd, void *data) 3282 { 3283 struct iwn_softc *sc = ifp->if_softc; 3284 struct ieee80211com *ic = &sc->sc_ic; 3285 const struct sockaddr *sa; 3286 int s, error = 0; 3287 3288 s = splnet(); 3289 3290 switch (cmd) { 3291 case SIOCSIFADDR: 3292 ifp->if_flags |= IFF_UP; 3293 /* FALLTHROUGH */ 3294 case SIOCSIFFLAGS: 3295 /* XXX Added as it is in every NetBSD driver */ 3296 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 3297 break; 3298 if (ifp->if_flags & IFF_UP) { 3299 if (!(ifp->if_flags & IFF_RUNNING)) 3300 error = iwn_init(ifp); 3301 } else { 3302 if (ifp->if_flags & IFF_RUNNING) 3303 iwn_stop(ifp, 1); 3304 } 3305 break; 3306 3307 case SIOCADDMULTI: 3308 case SIOCDELMULTI: 3309 sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data); 3310 error = (cmd == SIOCADDMULTI) ? 3311 ether_addmulti(sa, &sc->sc_ec) : 3312 ether_delmulti(sa, &sc->sc_ec); 3313 3314 if (error == ENETRESET) 3315 error = 0; 3316 break; 3317 3318 default: 3319 error = ieee80211_ioctl(ic, cmd, data); 3320 } 3321 3322 if (error == ENETRESET) { 3323 error = 0; 3324 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 3325 (IFF_UP | IFF_RUNNING)) { 3326 iwn_stop(ifp, 0); 3327 error = iwn_init(ifp); 3328 } 3329 } 3330 3331 splx(s); 3332 return error; 3333 } 3334 3335 /* 3336 * Send a command to the firmware. 3337 */ 3338 static int 3339 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async) 3340 { 3341 struct iwn_tx_ring *ring = &sc->txq[4]; 3342 struct iwn_tx_desc *desc; 3343 struct iwn_tx_data *data; 3344 struct iwn_tx_cmd *cmd; 3345 struct mbuf *m; 3346 bus_addr_t paddr; 3347 int totlen, error; 3348 3349 desc = &ring->desc[ring->cur]; 3350 data = &ring->data[ring->cur]; 3351 totlen = 4 + size; 3352 3353 if (size > sizeof cmd->data) { 3354 /* Command is too large to fit in a descriptor. */ 3355 if (totlen > MCLBYTES) 3356 return EINVAL; 3357 MGETHDR(m, M_DONTWAIT, MT_DATA); 3358 if (m == NULL) 3359 return ENOMEM; 3360 if (totlen > MHLEN) { 3361 MCLGET(m, M_DONTWAIT); 3362 if (!(m->m_flags & M_EXT)) { 3363 m_freem(m); 3364 return ENOMEM; 3365 } 3366 } 3367 cmd = mtod(m, struct iwn_tx_cmd *); 3368 error = bus_dmamap_load(sc->sc_dmat, data->map, cmd, totlen, 3369 NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE); 3370 if (error != 0) { 3371 m_freem(m); 3372 return error; 3373 } 3374 data->m = m; 3375 paddr = data->map->dm_segs[0].ds_addr; 3376 } else { 3377 cmd = &ring->cmd[ring->cur]; 3378 paddr = data->cmd_paddr; 3379 } 3380 3381 cmd->code = code; 3382 cmd->flags = 0; 3383 cmd->qid = ring->qid; 3384 cmd->idx = ring->cur; 3385 /* 3386 * Coverity:[OUT_OF_BOUNDS] 3387 * false positive since, allocated in mbuf if it does not fit 3388 */ 3389 memcpy(cmd->data, buf, size); 3390 3391 desc->nsegs = 1; 3392 desc->segs[0].addr = htole32(IWN_LOADDR(paddr)); 3393 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4); 3394 3395 if (size > sizeof cmd->data) { 3396 bus_dmamap_sync(sc->sc_dmat, data->map, 0, totlen, 3397 BUS_DMASYNC_PREWRITE); 3398 } else { 3399 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map, 3400 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr, 3401 totlen, BUS_DMASYNC_PREWRITE); 3402 } 3403 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 3404 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr, 3405 sizeof (*desc), BUS_DMASYNC_PREWRITE); 3406 3407 #ifdef notyet 3408 /* Update TX scheduler. */ 3409 ops->update_sched(sc, ring->qid, ring->cur, 0, 0); 3410 #endif 3411 DPRINTFN(4, ("iwn_cmd %d size=%d %s\n", code, size, async ? " (async)" : "")); 3412 3413 /* Kick command ring. */ 3414 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3415 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3416 3417 return async ? 0 : tsleep(desc, PCATCH, "iwncmd", hz); 3418 } 3419 3420 static int 3421 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 3422 { 3423 struct iwn4965_node_info hnode; 3424 char *src, *dst; 3425 3426 /* 3427 * We use the node structure for 5000 Series internally (it is 3428 * a superset of the one for 4965AGN). We thus copy the common 3429 * fields before sending the command. 3430 */ 3431 src = (char *)node; 3432 dst = (char *)&hnode; 3433 memcpy(dst, src, 48); 3434 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */ 3435 memcpy(dst + 48, src + 72, 20); 3436 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async); 3437 } 3438 3439 static int 3440 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 3441 { 3442 /* Direct mapping. */ 3443 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async); 3444 } 3445 3446 static int 3447 iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni) 3448 { 3449 struct iwn_node *wn = (void *)ni; 3450 struct ieee80211_rateset *rs = &ni->ni_rates; 3451 struct iwn_cmd_link_quality linkq; 3452 const struct iwn_rate *rinfo; 3453 uint8_t txant; 3454 int i, txrate; 3455 3456 /* Use the first valid TX antenna. */ 3457 txant = IWN_LSB(sc->txchainmask); 3458 3459 memset(&linkq, 0, sizeof linkq); 3460 linkq.id = wn->id; 3461 linkq.antmsk_1stream = txant; 3462 linkq.antmsk_2stream = IWN_ANT_AB; 3463 linkq.ampdu_max = 31; 3464 linkq.ampdu_threshold = 3; 3465 linkq.ampdu_limit = htole16(4000); /* 4ms */ 3466 3467 /* Start at highest available bit-rate. */ 3468 txrate = rs->rs_nrates - 1; 3469 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) { 3470 rinfo = &iwn_rates[wn->ridx[txrate]]; 3471 linkq.retry[i].plcp = rinfo->plcp; 3472 linkq.retry[i].rflags = rinfo->flags; 3473 linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant); 3474 /* Next retry at immediate lower bit-rate. */ 3475 if (txrate > 0) 3476 txrate--; 3477 } 3478 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1); 3479 } 3480 3481 /* 3482 * Broadcast node is used to send group-addressed and management frames. 3483 */ 3484 static int 3485 iwn_add_broadcast_node(struct iwn_softc *sc, int async) 3486 { 3487 struct iwn_ops *ops = &sc->ops; 3488 struct iwn_node_info node; 3489 struct iwn_cmd_link_quality linkq; 3490 const struct iwn_rate *rinfo; 3491 uint8_t txant; 3492 int i, error; 3493 3494 memset(&node, 0, sizeof node); 3495 IEEE80211_ADDR_COPY(node.macaddr, etherbroadcastaddr); 3496 node.id = sc->broadcast_id; 3497 DPRINTF(("adding broadcast node\n")); 3498 if ((error = ops->add_node(sc, &node, async)) != 0) 3499 return error; 3500 3501 /* Use the first valid TX antenna. */ 3502 txant = IWN_LSB(sc->txchainmask); 3503 3504 memset(&linkq, 0, sizeof linkq); 3505 linkq.id = sc->broadcast_id; 3506 linkq.antmsk_1stream = txant; 3507 linkq.antmsk_2stream = IWN_ANT_AB; 3508 linkq.ampdu_max = 64; 3509 linkq.ampdu_threshold = 3; 3510 linkq.ampdu_limit = htole16(4000); /* 4ms */ 3511 3512 /* Use lowest mandatory bit-rate. */ 3513 rinfo = (sc->sc_ic.ic_curmode != IEEE80211_MODE_11A) ? 3514 &iwn_rates[IWN_RIDX_CCK1] : &iwn_rates[IWN_RIDX_OFDM6]; 3515 linkq.retry[0].plcp = rinfo->plcp; 3516 linkq.retry[0].rflags = rinfo->flags; 3517 linkq.retry[0].rflags |= IWN_RFLAG_ANT(txant); 3518 /* Use same bit-rate for all TX retries. */ 3519 for (i = 1; i < IWN_MAX_TX_RETRIES; i++) { 3520 linkq.retry[i].plcp = linkq.retry[0].plcp; 3521 linkq.retry[i].rflags = linkq.retry[0].rflags; 3522 } 3523 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async); 3524 } 3525 3526 static void 3527 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on) 3528 { 3529 struct iwn_cmd_led led; 3530 3531 /* Clear microcode LED ownership. */ 3532 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL); 3533 3534 led.which = which; 3535 led.unit = htole32(10000); /* on/off in unit of 100ms */ 3536 led.off = off; 3537 led.on = on; 3538 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1); 3539 } 3540 3541 /* 3542 * Set the critical temperature at which the firmware will stop the radio 3543 * and notify us. 3544 */ 3545 static int 3546 iwn_set_critical_temp(struct iwn_softc *sc) 3547 { 3548 struct iwn_critical_temp crit; 3549 int32_t temp; 3550 3551 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF); 3552 3553 if (sc->hw_type == IWN_HW_REV_TYPE_5150) 3554 temp = (IWN_CTOK(110) - sc->temp_off) * -5; 3555 else if (sc->hw_type == IWN_HW_REV_TYPE_4965) 3556 temp = IWN_CTOK(110); 3557 else 3558 temp = 110; 3559 memset(&crit, 0, sizeof crit); 3560 crit.tempR = htole32(temp); 3561 DPRINTF(("setting critical temperature to %d\n", temp)); 3562 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0); 3563 } 3564 3565 static int 3566 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni) 3567 { 3568 struct iwn_cmd_timing cmd; 3569 uint64_t val, mod; 3570 3571 memset(&cmd, 0, sizeof cmd); 3572 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 3573 cmd.bintval = htole16(ni->ni_intval); 3574 cmd.lintval = htole16(10); 3575 3576 /* Compute remaining time until next beacon. */ 3577 val = (uint64_t)ni->ni_intval * 1024; /* msecs -> usecs */ 3578 mod = le64toh(cmd.tstamp) % val; 3579 cmd.binitval = htole32((uint32_t)(val - mod)); 3580 3581 DPRINTF(("timing bintval=%u, tstamp=%" PRIu64 ", init=%" PRIu32 "\n", 3582 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod))); 3583 3584 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1); 3585 } 3586 3587 static void 3588 iwn4965_power_calibration(struct iwn_softc *sc, int temp) 3589 { 3590 /* Adjust TX power if need be (delta >= 3 degC). */ 3591 DPRINTF(("temperature %d->%d\n", sc->temp, temp)); 3592 if (abs(temp - sc->temp) >= 3) { 3593 /* Record temperature of last calibration. */ 3594 sc->temp = temp; 3595 (void)iwn4965_set_txpower(sc, 1); 3596 } 3597 } 3598 3599 /* 3600 * Set TX power for current channel (each rate has its own power settings). 3601 * This function takes into account the regulatory information from EEPROM, 3602 * the current temperature and the current voltage. 3603 */ 3604 static int 3605 iwn4965_set_txpower(struct iwn_softc *sc, int async) 3606 { 3607 /* Fixed-point arithmetic division using a n-bit fractional part. */ 3608 #define fdivround(a, b, n) \ 3609 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 3610 /* Linear interpolation. */ 3611 #define interpolate(x, x1, y1, x2, y2, n) \ 3612 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 3613 3614 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 }; 3615 struct ieee80211com *ic = &sc->sc_ic; 3616 struct iwn_ucode_info *uc = &sc->ucode_info; 3617 struct ieee80211_channel *ch; 3618 struct iwn4965_cmd_txpower cmd; 3619 struct iwn4965_eeprom_chan_samples *chans; 3620 const uint8_t *rf_gain, *dsp_gain; 3621 int32_t vdiff, tdiff; 3622 int i, c, grp, maxpwr; 3623 uint8_t chan; 3624 3625 /* Retrieve current channel from last RXON. */ 3626 chan = sc->rxon.chan; 3627 DPRINTF(("setting TX power for channel %d\n", chan)); 3628 ch = &ic->ic_channels[chan]; 3629 3630 memset(&cmd, 0, sizeof cmd); 3631 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1; 3632 cmd.chan = chan; 3633 3634 if (IEEE80211_IS_CHAN_5GHZ(ch)) { 3635 maxpwr = sc->maxpwr5GHz; 3636 rf_gain = iwn4965_rf_gain_5ghz; 3637 dsp_gain = iwn4965_dsp_gain_5ghz; 3638 } else { 3639 maxpwr = sc->maxpwr2GHz; 3640 rf_gain = iwn4965_rf_gain_2ghz; 3641 dsp_gain = iwn4965_dsp_gain_2ghz; 3642 } 3643 3644 /* Compute voltage compensation. */ 3645 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7; 3646 if (vdiff > 0) 3647 vdiff *= 2; 3648 if (abs(vdiff) > 2) 3649 vdiff = 0; 3650 DPRINTF(("voltage compensation=%d (UCODE=%d, EEPROM=%d)\n", 3651 vdiff, le32toh(uc->volt), sc->eeprom_voltage)); 3652 3653 /* Get channel attenuation group. */ 3654 if (chan <= 20) /* 1-20 */ 3655 grp = 4; 3656 else if (chan <= 43) /* 34-43 */ 3657 grp = 0; 3658 else if (chan <= 70) /* 44-70 */ 3659 grp = 1; 3660 else if (chan <= 124) /* 71-124 */ 3661 grp = 2; 3662 else /* 125-200 */ 3663 grp = 3; 3664 DPRINTF(("chan %d, attenuation group=%d\n", chan, grp)); 3665 3666 /* Get channel sub-band. */ 3667 for (i = 0; i < IWN_NBANDS; i++) 3668 if (sc->bands[i].lo != 0 && 3669 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi) 3670 break; 3671 if (i == IWN_NBANDS) /* Can't happen in real-life. */ 3672 return EINVAL; 3673 chans = sc->bands[i].chans; 3674 DPRINTF(("chan %d sub-band=%d\n", chan, i)); 3675 3676 for (c = 0; c < 2; c++) { 3677 uint8_t power, gain, temp; 3678 int maxchpwr, pwr, ridx, idx; 3679 3680 power = interpolate(chan, 3681 chans[0].num, chans[0].samples[c][1].power, 3682 chans[1].num, chans[1].samples[c][1].power, 1); 3683 gain = interpolate(chan, 3684 chans[0].num, chans[0].samples[c][1].gain, 3685 chans[1].num, chans[1].samples[c][1].gain, 1); 3686 temp = interpolate(chan, 3687 chans[0].num, chans[0].samples[c][1].temp, 3688 chans[1].num, chans[1].samples[c][1].temp, 1); 3689 DPRINTF(("TX chain %d: power=%d gain=%d temp=%d\n", 3690 c, power, gain, temp)); 3691 3692 /* Compute temperature compensation. */ 3693 tdiff = ((sc->temp - temp) * 2) / tdiv[grp]; 3694 DPRINTF(("temperature compensation=%d (current=%d, " 3695 "EEPROM=%d)\n", tdiff, sc->temp, temp)); 3696 3697 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) { 3698 /* Convert dBm to half-dBm. */ 3699 maxchpwr = sc->maxpwr[chan] * 2; 3700 if ((ridx / 8) & 1) 3701 maxchpwr -= 6; /* MIMO 2T: -3dB */ 3702 3703 pwr = maxpwr; 3704 3705 /* Adjust TX power based on rate. */ 3706 if ((ridx % 8) == 5) 3707 pwr -= 15; /* OFDM48: -7.5dB */ 3708 else if ((ridx % 8) == 6) 3709 pwr -= 17; /* OFDM54: -8.5dB */ 3710 else if ((ridx % 8) == 7) 3711 pwr -= 20; /* OFDM60: -10dB */ 3712 else 3713 pwr -= 10; /* Others: -5dB */ 3714 3715 /* Do not exceed channel max TX power. */ 3716 if (pwr > maxchpwr) 3717 pwr = maxchpwr; 3718 3719 idx = gain - (pwr - power) - tdiff - vdiff; 3720 if ((ridx / 8) & 1) /* MIMO */ 3721 idx += (int32_t)le32toh(uc->atten[grp][c]); 3722 3723 if (cmd.band == 0) 3724 idx += 9; /* 5GHz */ 3725 if (ridx == IWN_RIDX_MAX) 3726 idx += 5; /* CCK */ 3727 3728 /* Make sure idx stays in a valid range. */ 3729 if (idx < 0) 3730 idx = 0; 3731 else if (idx > IWN4965_MAX_PWR_INDEX) 3732 idx = IWN4965_MAX_PWR_INDEX; 3733 3734 DPRINTF(("TX chain %d, rate idx %d: power=%d\n", 3735 c, ridx, idx)); 3736 cmd.power[ridx].rf_gain[c] = rf_gain[idx]; 3737 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx]; 3738 } 3739 } 3740 3741 DPRINTF(("setting TX power for chan %d\n", chan)); 3742 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async); 3743 3744 #undef interpolate 3745 #undef fdivround 3746 } 3747 3748 static int 3749 iwn5000_set_txpower(struct iwn_softc *sc, int async) 3750 { 3751 struct iwn5000_cmd_txpower cmd; 3752 int cmdid; 3753 3754 /* 3755 * TX power calibration is handled automatically by the firmware 3756 * for 5000 Series. 3757 */ 3758 memset(&cmd, 0, sizeof cmd); 3759 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */ 3760 cmd.flags = IWN5000_TXPOWER_NO_CLOSED; 3761 cmd.srv_limit = IWN5000_TXPOWER_AUTO; 3762 DPRINTF(("setting TX power\n")); 3763 if (IWN_UCODE_API(sc->ucode_rev) == 1) 3764 cmdid = IWN_CMD_TXPOWER_DBM_V1; 3765 else 3766 cmdid = IWN_CMD_TXPOWER_DBM; 3767 return iwn_cmd(sc, cmdid, &cmd, sizeof cmd, async); 3768 } 3769 3770 /* 3771 * Retrieve the maximum RSSI (in dBm) among receivers. 3772 */ 3773 static int 3774 iwn4965_get_rssi(const struct iwn_rx_stat *stat) 3775 { 3776 const struct iwn4965_rx_phystat *phy = (const void *)stat->phybuf; 3777 uint8_t mask, agc; 3778 int rssi; 3779 3780 mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC; 3781 agc = (le16toh(phy->agc) >> 7) & 0x7f; 3782 3783 rssi = 0; 3784 if (mask & IWN_ANT_A) 3785 rssi = MAX(rssi, phy->rssi[0]); 3786 if (mask & IWN_ANT_B) 3787 rssi = MAX(rssi, phy->rssi[2]); 3788 if (mask & IWN_ANT_C) 3789 rssi = MAX(rssi, phy->rssi[4]); 3790 3791 return rssi - agc - IWN_RSSI_TO_DBM; 3792 } 3793 3794 static int 3795 iwn5000_get_rssi(const struct iwn_rx_stat *stat) 3796 { 3797 const struct iwn5000_rx_phystat *phy = (const void *)stat->phybuf; 3798 uint8_t agc; 3799 int rssi; 3800 3801 agc = (le32toh(phy->agc) >> 9) & 0x7f; 3802 3803 rssi = MAX(le16toh(phy->rssi[0]) & 0xff, 3804 le16toh(phy->rssi[1]) & 0xff); 3805 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi); 3806 3807 return rssi - agc - IWN_RSSI_TO_DBM; 3808 } 3809 3810 /* 3811 * Retrieve the average noise (in dBm) among receivers. 3812 */ 3813 static int 3814 iwn_get_noise(const struct iwn_rx_general_stats *stats) 3815 { 3816 int i, total, nbant, noise; 3817 3818 total = nbant = 0; 3819 for (i = 0; i < 3; i++) { 3820 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0) 3821 continue; 3822 total += noise; 3823 nbant++; 3824 } 3825 /* There should be at least one antenna but check anyway. */ 3826 return (nbant == 0) ? -127 : (total / nbant) - 107; 3827 } 3828 3829 /* 3830 * Compute temperature (in degC) from last received statistics. 3831 */ 3832 static int 3833 iwn4965_get_temperature(struct iwn_softc *sc) 3834 { 3835 struct iwn_ucode_info *uc = &sc->ucode_info; 3836 int32_t r1, r2, r3, r4, temp; 3837 3838 r1 = le32toh(uc->temp[0].chan20MHz); 3839 r2 = le32toh(uc->temp[1].chan20MHz); 3840 r3 = le32toh(uc->temp[2].chan20MHz); 3841 r4 = le32toh(sc->rawtemp); 3842 3843 if (r1 == r3) /* Prevents division by 0 (should not happen). */ 3844 return 0; 3845 3846 /* Sign-extend 23-bit R4 value to 32-bit. */ 3847 r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000; 3848 /* Compute temperature in Kelvin. */ 3849 temp = (259 * (r4 - r2)) / (r3 - r1); 3850 temp = (temp * 97) / 100 + 8; 3851 3852 DPRINTF(("temperature %dK/%dC\n", temp, IWN_KTOC(temp))); 3853 return IWN_KTOC(temp); 3854 } 3855 3856 static int 3857 iwn5000_get_temperature(struct iwn_softc *sc) 3858 { 3859 int32_t temp; 3860 3861 /* 3862 * Temperature is not used by the driver for 5000 Series because 3863 * TX power calibration is handled by firmware. We export it to 3864 * users through the sensor framework though. 3865 */ 3866 temp = le32toh(sc->rawtemp); 3867 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 3868 temp = (temp / -5) + sc->temp_off; 3869 temp = IWN_KTOC(temp); 3870 } 3871 return temp; 3872 } 3873 3874 /* 3875 * Initialize sensitivity calibration state machine. 3876 */ 3877 static int 3878 iwn_init_sensitivity(struct iwn_softc *sc) 3879 { 3880 struct iwn_ops *ops = &sc->ops; 3881 struct iwn_calib_state *calib = &sc->calib; 3882 uint32_t flags; 3883 int error; 3884 3885 /* Reset calibration state machine. */ 3886 memset(calib, 0, sizeof (*calib)); 3887 calib->state = IWN_CALIB_STATE_INIT; 3888 calib->cck_state = IWN_CCK_STATE_HIFA; 3889 /* Set initial correlation values. */ 3890 calib->ofdm_x1 = sc->limits->min_ofdm_x1; 3891 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1; 3892 calib->ofdm_x4 = sc->limits->min_ofdm_x4; 3893 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4; 3894 calib->cck_x4 = 125; 3895 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4; 3896 calib->energy_cck = sc->limits->energy_cck; 3897 3898 /* Write initial sensitivity. */ 3899 if ((error = iwn_send_sensitivity(sc)) != 0) 3900 return error; 3901 3902 /* Write initial gains. */ 3903 if ((error = ops->init_gains(sc)) != 0) 3904 return error; 3905 3906 /* Request statistics at each beacon interval. */ 3907 flags = 0; 3908 DPRINTF(("sending request for statistics\n")); 3909 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1); 3910 } 3911 3912 /* 3913 * Collect noise and RSSI statistics for the first 20 beacons received 3914 * after association and use them to determine connected antennas and 3915 * to set differential gains. 3916 */ 3917 static void 3918 iwn_collect_noise(struct iwn_softc *sc, 3919 const struct iwn_rx_general_stats *stats) 3920 { 3921 struct iwn_ops *ops = &sc->ops; 3922 struct iwn_calib_state *calib = &sc->calib; 3923 uint32_t val; 3924 int i; 3925 3926 /* Accumulate RSSI and noise for all 3 antennas. */ 3927 for (i = 0; i < 3; i++) { 3928 calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff; 3929 calib->noise[i] += le32toh(stats->noise[i]) & 0xff; 3930 } 3931 /* NB: We update differential gains only once after 20 beacons. */ 3932 if (++calib->nbeacons < 20) 3933 return; 3934 3935 /* Determine highest average RSSI. */ 3936 val = MAX(calib->rssi[0], calib->rssi[1]); 3937 val = MAX(calib->rssi[2], val); 3938 3939 /* Determine which antennas are connected. */ 3940 sc->chainmask = sc->rxchainmask; 3941 for (i = 0; i < 3; i++) 3942 if (val - calib->rssi[i] > 15 * 20) 3943 sc->chainmask &= ~(1 << i); 3944 DPRINTF(("RX chains mask: theoretical=0x%x, actual=0x%x\n", 3945 sc->rxchainmask, sc->chainmask)); 3946 3947 /* If none of the TX antennas are connected, keep at least one. */ 3948 if ((sc->chainmask & sc->txchainmask) == 0) 3949 sc->chainmask |= IWN_LSB(sc->txchainmask); 3950 3951 (void)ops->set_gains(sc); 3952 calib->state = IWN_CALIB_STATE_RUN; 3953 3954 #ifdef notyet 3955 /* XXX Disable RX chains with no antennas connected. */ 3956 sc->rxon.rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask)); 3957 (void)iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1); 3958 #endif 3959 3960 /* Enable power-saving mode if requested by user. */ 3961 if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON) 3962 (void)iwn_set_pslevel(sc, 0, 3, 1); 3963 } 3964 3965 static int 3966 iwn4965_init_gains(struct iwn_softc *sc) 3967 { 3968 struct iwn_phy_calib_gain cmd; 3969 3970 memset(&cmd, 0, sizeof cmd); 3971 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 3972 /* Differential gains initially set to 0 for all 3 antennas. */ 3973 DPRINTF(("setting initial differential gains\n")); 3974 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 3975 } 3976 3977 static int 3978 iwn5000_init_gains(struct iwn_softc *sc) 3979 { 3980 struct iwn_phy_calib cmd; 3981 3982 memset(&cmd, 0, sizeof cmd); 3983 cmd.code = sc->reset_noise_gain; 3984 cmd.ngroups = 1; 3985 cmd.isvalid = 1; 3986 DPRINTF(("setting initial differential gains\n")); 3987 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 3988 } 3989 3990 static int 3991 iwn4965_set_gains(struct iwn_softc *sc) 3992 { 3993 struct iwn_calib_state *calib = &sc->calib; 3994 struct iwn_phy_calib_gain cmd; 3995 int i, delta, noise; 3996 3997 /* Get minimal noise among connected antennas. */ 3998 noise = INT_MAX; /* NB: There's at least one antenna. */ 3999 for (i = 0; i < 3; i++) 4000 if (sc->chainmask & (1 << i)) 4001 noise = MIN(calib->noise[i], noise); 4002 4003 memset(&cmd, 0, sizeof cmd); 4004 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 4005 /* Set differential gains for connected antennas. */ 4006 for (i = 0; i < 3; i++) { 4007 if (sc->chainmask & (1 << i)) { 4008 /* Compute attenuation (in unit of 1.5dB). */ 4009 delta = (noise - (int32_t)calib->noise[i]) / 30; 4010 /* NB: delta <= 0 */ 4011 /* Limit to [-4.5dB,0]. */ 4012 cmd.gain[i] = MIN(abs(delta), 3); 4013 if (delta < 0) 4014 cmd.gain[i] |= 1 << 2; /* sign bit */ 4015 } 4016 } 4017 DPRINTF(("setting differential gains Ant A/B/C: %x/%x/%x (%x)\n", 4018 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask)); 4019 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4020 } 4021 4022 static int 4023 iwn5000_set_gains(struct iwn_softc *sc) 4024 { 4025 struct iwn_calib_state *calib = &sc->calib; 4026 struct iwn_phy_calib_gain cmd; 4027 int i, ant, div, delta; 4028 4029 /* We collected 20 beacons and !=6050 need a 1.5 factor. */ 4030 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30; 4031 4032 memset(&cmd, 0, sizeof cmd); 4033 cmd.code = sc->noise_gain; 4034 cmd.ngroups = 1; 4035 cmd.isvalid = 1; 4036 /* Get first available RX antenna as referential. */ 4037 ant = IWN_LSB(sc->rxchainmask); 4038 /* Set differential gains for other antennas. */ 4039 for (i = ant + 1; i < 3; i++) { 4040 if (sc->chainmask & (1 << i)) { 4041 /* The delta is relative to antenna "ant". */ 4042 delta = ((int32_t)calib->noise[ant] - 4043 (int32_t)calib->noise[i]) / div; 4044 /* Limit to [-4.5dB,+4.5dB]. */ 4045 cmd.gain[i - 1] = MIN(abs(delta), 3); 4046 if (delta < 0) 4047 cmd.gain[i - 1] |= 1 << 2; /* sign bit */ 4048 } 4049 } 4050 DPRINTF(("setting differential gains: %x/%x (%x)\n", 4051 cmd.gain[0], cmd.gain[1], sc->chainmask)); 4052 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4053 } 4054 4055 /* 4056 * Tune RF RX sensitivity based on the number of false alarms detected 4057 * during the last beacon period. 4058 */ 4059 static void 4060 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats) 4061 { 4062 #define inc(val, inc, max) \ 4063 if ((val) < (max)) { \ 4064 if ((val) < (max) - (inc)) \ 4065 (val) += (inc); \ 4066 else \ 4067 (val) = (max); \ 4068 needs_update = 1; \ 4069 } 4070 #define dec(val, dec, min) \ 4071 if ((val) > (min)) { \ 4072 if ((val) > (min) + (dec)) \ 4073 (val) -= (dec); \ 4074 else \ 4075 (val) = (min); \ 4076 needs_update = 1; \ 4077 } 4078 4079 const struct iwn_sensitivity_limits *limits = sc->limits; 4080 struct iwn_calib_state *calib = &sc->calib; 4081 uint32_t val, rxena, fa; 4082 uint32_t energy[3], energy_min; 4083 uint8_t noise[3], noise_ref; 4084 int i, needs_update = 0; 4085 4086 /* Check that we've been enabled long enough. */ 4087 if ((rxena = le32toh(stats->general.load)) == 0) 4088 return; 4089 4090 /* Compute number of false alarms since last call for OFDM. */ 4091 fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm; 4092 fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm; 4093 fa *= 200 * 1024; /* 200TU */ 4094 4095 /* Save counters values for next call. */ 4096 calib->bad_plcp_ofdm = le32toh(stats->ofdm.bad_plcp); 4097 calib->fa_ofdm = le32toh(stats->ofdm.fa); 4098 4099 if (fa > 50 * rxena) { 4100 /* High false alarm count, decrease sensitivity. */ 4101 DPRINTFN(2, ("OFDM high false alarm count: %u\n", fa)); 4102 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1); 4103 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1); 4104 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4); 4105 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4); 4106 4107 } else if (fa < 5 * rxena) { 4108 /* Low false alarm count, increase sensitivity. */ 4109 DPRINTFN(2, ("OFDM low false alarm count: %u\n", fa)); 4110 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1); 4111 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1); 4112 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4); 4113 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4); 4114 } 4115 4116 /* Compute maximum noise among 3 receivers. */ 4117 for (i = 0; i < 3; i++) 4118 noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff; 4119 val = MAX(noise[0], noise[1]); 4120 val = MAX(noise[2], val); 4121 /* Insert it into our samples table. */ 4122 calib->noise_samples[calib->cur_noise_sample] = val; 4123 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20; 4124 4125 /* Compute maximum noise among last 20 samples. */ 4126 noise_ref = calib->noise_samples[0]; 4127 for (i = 1; i < 20; i++) 4128 noise_ref = MAX(noise_ref, calib->noise_samples[i]); 4129 4130 /* Compute maximum energy among 3 receivers. */ 4131 for (i = 0; i < 3; i++) 4132 energy[i] = le32toh(stats->general.energy[i]); 4133 val = MIN(energy[0], energy[1]); 4134 val = MIN(energy[2], val); 4135 /* Insert it into our samples table. */ 4136 calib->energy_samples[calib->cur_energy_sample] = val; 4137 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10; 4138 4139 /* Compute minimum energy among last 10 samples. */ 4140 energy_min = calib->energy_samples[0]; 4141 for (i = 1; i < 10; i++) 4142 energy_min = MAX(energy_min, calib->energy_samples[i]); 4143 energy_min += 6; 4144 4145 /* Compute number of false alarms since last call for CCK. */ 4146 fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck; 4147 fa += le32toh(stats->cck.fa) - calib->fa_cck; 4148 fa *= 200 * 1024; /* 200TU */ 4149 4150 /* Save counters values for next call. */ 4151 calib->bad_plcp_cck = le32toh(stats->cck.bad_plcp); 4152 calib->fa_cck = le32toh(stats->cck.fa); 4153 4154 if (fa > 50 * rxena) { 4155 /* High false alarm count, decrease sensitivity. */ 4156 DPRINTFN(2, ("CCK high false alarm count: %u\n", fa)); 4157 calib->cck_state = IWN_CCK_STATE_HIFA; 4158 calib->low_fa = 0; 4159 4160 if (calib->cck_x4 > 160) { 4161 calib->noise_ref = noise_ref; 4162 if (calib->energy_cck > 2) 4163 dec(calib->energy_cck, 2, energy_min); 4164 } 4165 if (calib->cck_x4 < 160) { 4166 calib->cck_x4 = 161; 4167 needs_update = 1; 4168 } else 4169 inc(calib->cck_x4, 3, limits->max_cck_x4); 4170 4171 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4); 4172 4173 } else if (fa < 5 * rxena) { 4174 /* Low false alarm count, increase sensitivity. */ 4175 DPRINTFN(2, ("CCK low false alarm count: %u\n", fa)); 4176 calib->cck_state = IWN_CCK_STATE_LOFA; 4177 calib->low_fa++; 4178 4179 if (calib->cck_state != IWN_CCK_STATE_INIT && 4180 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 || 4181 calib->low_fa > 100)) { 4182 inc(calib->energy_cck, 2, limits->min_energy_cck); 4183 dec(calib->cck_x4, 3, limits->min_cck_x4); 4184 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4); 4185 } 4186 } else { 4187 /* Not worth to increase or decrease sensitivity. */ 4188 DPRINTFN(2, ("CCK normal false alarm count: %u\n", fa)); 4189 calib->low_fa = 0; 4190 calib->noise_ref = noise_ref; 4191 4192 if (calib->cck_state == IWN_CCK_STATE_HIFA) { 4193 /* Previous interval had many false alarms. */ 4194 dec(calib->energy_cck, 8, energy_min); 4195 } 4196 calib->cck_state = IWN_CCK_STATE_INIT; 4197 } 4198 4199 if (needs_update) 4200 (void)iwn_send_sensitivity(sc); 4201 #undef dec 4202 #undef inc 4203 } 4204 4205 static int 4206 iwn_send_sensitivity(struct iwn_softc *sc) 4207 { 4208 struct iwn_calib_state *calib = &sc->calib; 4209 struct iwn_enhanced_sensitivity_cmd cmd; 4210 int len; 4211 4212 memset(&cmd, 0, sizeof cmd); 4213 len = sizeof (struct iwn_sensitivity_cmd); 4214 cmd.which = IWN_SENSITIVITY_WORKTBL; 4215 /* OFDM modulation. */ 4216 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1); 4217 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1); 4218 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4); 4219 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4); 4220 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm); 4221 cmd.energy_ofdm_th = htole16(62); 4222 /* CCK modulation. */ 4223 cmd.corr_cck_x4 = htole16(calib->cck_x4); 4224 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4); 4225 cmd.energy_cck = htole16(calib->energy_cck); 4226 /* Barker modulation: use default values. */ 4227 cmd.corr_barker = htole16(190); 4228 cmd.corr_barker_mrc = htole16(sc->limits->barker_mrc); 4229 if (!(sc->sc_flags & IWN_FLAG_ENH_SENS)) 4230 goto send; 4231 /* Enhanced sensitivity settings. */ 4232 len = sizeof (struct iwn_enhanced_sensitivity_cmd); 4233 cmd.ofdm_det_slope_mrc = htole16(668); 4234 cmd.ofdm_det_icept_mrc = htole16(4); 4235 cmd.ofdm_det_slope = htole16(486); 4236 cmd.ofdm_det_icept = htole16(37); 4237 cmd.cck_det_slope_mrc = htole16(853); 4238 cmd.cck_det_icept_mrc = htole16(4); 4239 cmd.cck_det_slope = htole16(476); 4240 cmd.cck_det_icept = htole16(99); 4241 send: 4242 DPRINTFN(2, ("setting sensitivity %d/%d/%d/%d/%d/%d/%d\n", 4243 calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4, 4244 calib->ofdm_mrc_x4, calib->cck_x4, calib->cck_mrc_x4, 4245 calib->energy_cck)); 4246 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1); 4247 } 4248 4249 /* 4250 * Set STA mode power saving level (between 0 and 5). 4251 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 4252 */ 4253 static int 4254 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async) 4255 { 4256 struct iwn_pmgt_cmd cmd; 4257 const struct iwn_pmgt *pmgt; 4258 uint32_t maxp, skip_dtim; 4259 pcireg_t reg; 4260 int i; 4261 4262 /* Select which PS parameters to use. */ 4263 if (dtim <= 2) 4264 pmgt = &iwn_pmgt[0][level]; 4265 else if (dtim <= 10) 4266 pmgt = &iwn_pmgt[1][level]; 4267 else 4268 pmgt = &iwn_pmgt[2][level]; 4269 4270 memset(&cmd, 0, sizeof cmd); 4271 if (level != 0) /* not CAM */ 4272 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP); 4273 if (level == 5) 4274 cmd.flags |= htole16(IWN_PS_FAST_PD); 4275 /* Retrieve PCIe Active State Power Management (ASPM). */ 4276 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 4277 sc->sc_cap_off + PCIE_LCSR); 4278 if (!(reg & PCIE_LCSR_ASPM_L0S)) /* L0s Entry disabled. */ 4279 cmd.flags |= htole16(IWN_PS_PCI_PMGT); 4280 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024); 4281 cmd.txtimeout = htole32(pmgt->txtimeout * 1024); 4282 4283 if (dtim == 0) { 4284 dtim = 1; 4285 skip_dtim = 0; 4286 } else 4287 skip_dtim = pmgt->skip_dtim; 4288 if (skip_dtim != 0) { 4289 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM); 4290 maxp = pmgt->intval[4]; 4291 if (maxp == (uint32_t)-1) 4292 maxp = dtim * (skip_dtim + 1); 4293 else if (maxp > dtim) 4294 maxp = (maxp / dtim) * dtim; 4295 } else 4296 maxp = dtim; 4297 for (i = 0; i < 5; i++) 4298 cmd.intval[i] = htole32(MIN(maxp, pmgt->intval[i])); 4299 4300 DPRINTF(("setting power saving level to %d\n", level)); 4301 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 4302 } 4303 4304 int 4305 iwn5000_runtime_calib(struct iwn_softc *sc) 4306 { 4307 struct iwn5000_calib_config cmd; 4308 4309 memset(&cmd, 0, sizeof cmd); 4310 cmd.ucode.once.enable = 0xffffffff; 4311 cmd.ucode.once.start = IWN5000_CALIB_DC; 4312 DPRINTF(("configuring runtime calibration\n")); 4313 return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0); 4314 } 4315 4316 static int 4317 iwn_config_bt_coex_bluetooth(struct iwn_softc *sc) 4318 { 4319 struct iwn_bluetooth bluetooth; 4320 4321 memset(&bluetooth, 0, sizeof bluetooth); 4322 bluetooth.flags = IWN_BT_COEX_ENABLE; 4323 bluetooth.lead_time = IWN_BT_LEAD_TIME_DEF; 4324 bluetooth.max_kill = IWN_BT_MAX_KILL_DEF; 4325 4326 DPRINTF(("configuring bluetooth coexistence\n")); 4327 return iwn_cmd(sc, IWN_CMD_BT_COEX, &bluetooth, sizeof bluetooth, 0); 4328 } 4329 4330 static int 4331 iwn_config_bt_coex_prio_table(struct iwn_softc *sc) 4332 { 4333 uint8_t prio_table[16]; 4334 4335 memset(&prio_table, 0, sizeof prio_table); 4336 prio_table[ 0] = 6; /* init calibration 1 */ 4337 prio_table[ 1] = 7; /* init calibration 2 */ 4338 prio_table[ 2] = 2; /* periodic calib low 1 */ 4339 prio_table[ 3] = 3; /* periodic calib low 2 */ 4340 prio_table[ 4] = 4; /* periodic calib high 1 */ 4341 prio_table[ 5] = 5; /* periodic calib high 2 */ 4342 prio_table[ 6] = 6; /* dtim */ 4343 prio_table[ 7] = 8; /* scan52 */ 4344 prio_table[ 8] = 10; /* scan24 */ 4345 4346 DPRINTF(("sending priority lookup table\n")); 4347 return iwn_cmd(sc, IWN_CMD_BT_COEX_PRIO_TABLE, 4348 &prio_table, sizeof prio_table, 0); 4349 } 4350 4351 static int 4352 iwn_config_bt_coex_adv_config(struct iwn_softc *sc, struct iwn_bt_basic *basic, 4353 size_t len) 4354 { 4355 struct iwn_btcoex_prot btprot; 4356 int error; 4357 4358 basic->bt.flags = IWN_BT_COEX_ENABLE; 4359 basic->bt.lead_time = IWN_BT_LEAD_TIME_DEF; 4360 basic->bt.max_kill = IWN_BT_MAX_KILL_DEF; 4361 basic->bt.bt3_timer_t7_value = IWN_BT_BT3_T7_DEF; 4362 basic->bt.kill_ack_mask = IWN_BT_KILL_ACK_MASK_DEF; 4363 basic->bt.kill_cts_mask = IWN_BT_KILL_CTS_MASK_DEF; 4364 basic->bt3_prio_sample_time = IWN_BT_BT3_PRIO_SAMPLE_DEF; 4365 basic->bt3_timer_t2_value = IWN_BT_BT3_T2_DEF; 4366 basic->bt3_lookup_table[ 0] = htole32(0xaaaaaaaa); /* Normal */ 4367 basic->bt3_lookup_table[ 1] = htole32(0xaaaaaaaa); 4368 basic->bt3_lookup_table[ 2] = htole32(0xaeaaaaaa); 4369 basic->bt3_lookup_table[ 3] = htole32(0xaaaaaaaa); 4370 basic->bt3_lookup_table[ 4] = htole32(0xcc00ff28); 4371 basic->bt3_lookup_table[ 5] = htole32(0x0000aaaa); 4372 basic->bt3_lookup_table[ 6] = htole32(0xcc00aaaa); 4373 basic->bt3_lookup_table[ 7] = htole32(0x0000aaaa); 4374 basic->bt3_lookup_table[ 8] = htole32(0xc0004000); 4375 basic->bt3_lookup_table[ 9] = htole32(0x00004000); 4376 basic->bt3_lookup_table[10] = htole32(0xf0005000); 4377 basic->bt3_lookup_table[11] = htole32(0xf0005000); 4378 basic->reduce_txpower = 0; /* as not implemented */ 4379 basic->valid = IWN_BT_ALL_VALID_MASK; 4380 4381 DPRINTF(("configuring advanced bluetooth coexistence v1\n")); 4382 error = iwn_cmd(sc, IWN_CMD_BT_COEX, basic, len, 0); 4383 if (error != 0) { 4384 aprint_error_dev(sc->sc_dev, 4385 "could not configure advanced bluetooth coexistence\n"); 4386 return error; 4387 } 4388 4389 error = iwn_config_bt_coex_prio_table(sc); 4390 if (error != 0) { 4391 aprint_error_dev(sc->sc_dev, 4392 "could not configure send BT priority table\n"); 4393 return error; 4394 } 4395 4396 /* Force BT state machine change */ 4397 memset(&btprot, 0, sizeof btprot); 4398 btprot.open = 1; 4399 btprot.type = 1; 4400 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof btprot, 1); 4401 if (error != 0) { 4402 aprint_error_dev(sc->sc_dev, "could not open BT protcol\n"); 4403 return error; 4404 } 4405 4406 btprot.open = 0; 4407 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof btprot, 1); 4408 if (error != 0) { 4409 aprint_error_dev(sc->sc_dev, "could not close BT protcol\n"); 4410 return error; 4411 } 4412 return 0; 4413 } 4414 4415 static int 4416 iwn_config_bt_coex_adv1(struct iwn_softc *sc) 4417 { 4418 struct iwn_bt_adv1 d; 4419 4420 memset(&d, 0, sizeof d); 4421 d.prio_boost = IWN_BT_PRIO_BOOST_DEF; 4422 d.tx_prio_boost = 0; 4423 d.rx_prio_boost = 0; 4424 return iwn_config_bt_coex_adv_config(sc, &d.basic, sizeof d); 4425 } 4426 4427 static int 4428 iwn_config_bt_coex_adv2(struct iwn_softc *sc) 4429 { 4430 struct iwn_bt_adv2 d; 4431 4432 memset(&d, 0, sizeof d); 4433 d.prio_boost = IWN_BT_PRIO_BOOST_DEF; 4434 d.tx_prio_boost = 0; 4435 d.rx_prio_boost = 0; 4436 return iwn_config_bt_coex_adv_config(sc, &d.basic, sizeof d); 4437 } 4438 4439 static int 4440 iwn_config(struct iwn_softc *sc) 4441 { 4442 struct iwn_ops *ops = &sc->ops; 4443 struct ieee80211com *ic = &sc->sc_ic; 4444 struct ifnet *ifp = ic->ic_ifp; 4445 uint32_t txmask; 4446 uint16_t rxchain; 4447 int error; 4448 4449 error = ops->config_bt_coex(sc); 4450 if (error != 0) { 4451 aprint_error_dev(sc->sc_dev, 4452 "could not configure bluetooth coexistence\n"); 4453 return error; 4454 } 4455 4456 /* Set radio temperature sensor offset. */ 4457 if (sc->hw_type == IWN_HW_REV_TYPE_6005) { 4458 error = iwn6000_temp_offset_calib(sc); 4459 if (error != 0) { 4460 aprint_error_dev(sc->sc_dev, 4461 "could not set temperature offset\n"); 4462 return error; 4463 } 4464 } 4465 4466 if (sc->hw_type == IWN_HW_REV_TYPE_2030 || 4467 sc->hw_type == IWN_HW_REV_TYPE_2000 || 4468 sc->hw_type == IWN_HW_REV_TYPE_135 || 4469 sc->hw_type == IWN_HW_REV_TYPE_105) { 4470 error = iwn2000_temp_offset_calib(sc); 4471 if (error != 0) { 4472 aprint_error_dev(sc->sc_dev, 4473 "could not set temperature offset\n"); 4474 return error; 4475 } 4476 } 4477 4478 if (sc->hw_type == IWN_HW_REV_TYPE_6050 || 4479 sc->hw_type == IWN_HW_REV_TYPE_6005) { 4480 /* Configure runtime DC calibration. */ 4481 error = iwn5000_runtime_calib(sc); 4482 if (error != 0) { 4483 aprint_error_dev(sc->sc_dev, 4484 "could not configure runtime calibration\n"); 4485 return error; 4486 } 4487 } 4488 4489 /* Configure valid TX chains for 5000 Series. */ 4490 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4491 txmask = htole32(sc->txchainmask); 4492 DPRINTF(("configuring valid TX chains 0x%x\n", txmask)); 4493 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask, 4494 sizeof txmask, 0); 4495 if (error != 0) { 4496 aprint_error_dev(sc->sc_dev, 4497 "could not configure valid TX chains\n"); 4498 return error; 4499 } 4500 } 4501 4502 /* Set mode, channel, RX filter and enable RX. */ 4503 memset(&sc->rxon, 0, sizeof (struct iwn_rxon)); 4504 IEEE80211_ADDR_COPY(ic->ic_myaddr, CLLADDR(ifp->if_sadl)); 4505 IEEE80211_ADDR_COPY(sc->rxon.myaddr, ic->ic_myaddr); 4506 IEEE80211_ADDR_COPY(sc->rxon.wlap, ic->ic_myaddr); 4507 sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_ibss_chan); 4508 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 4509 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_ibss_chan)) 4510 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 4511 switch (ic->ic_opmode) { 4512 case IEEE80211_M_STA: 4513 sc->rxon.mode = IWN_MODE_STA; 4514 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST); 4515 break; 4516 case IEEE80211_M_MONITOR: 4517 sc->rxon.mode = IWN_MODE_MONITOR; 4518 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST | 4519 IWN_FILTER_CTL | IWN_FILTER_PROMISC); 4520 break; 4521 default: 4522 /* Should not get there. */ 4523 break; 4524 } 4525 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ 4526 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ 4527 sc->rxon.ht_single_mask = 0xff; 4528 sc->rxon.ht_dual_mask = 0xff; 4529 sc->rxon.ht_triple_mask = 0xff; 4530 rxchain = 4531 IWN_RXCHAIN_VALID(sc->rxchainmask) | 4532 IWN_RXCHAIN_MIMO_COUNT(2) | 4533 IWN_RXCHAIN_IDLE_COUNT(2); 4534 sc->rxon.rxchain = htole16(rxchain); 4535 DPRINTF(("setting configuration\n")); 4536 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 0); 4537 if (error != 0) { 4538 aprint_error_dev(sc->sc_dev, 4539 "RXON command failed\n"); 4540 return error; 4541 } 4542 4543 if ((error = iwn_add_broadcast_node(sc, 0)) != 0) { 4544 aprint_error_dev(sc->sc_dev, 4545 "could not add broadcast node\n"); 4546 return error; 4547 } 4548 4549 /* Configuration has changed, set TX power accordingly. */ 4550 if ((error = ops->set_txpower(sc, 0)) != 0) { 4551 aprint_error_dev(sc->sc_dev, 4552 "could not set TX power\n"); 4553 return error; 4554 } 4555 4556 if ((error = iwn_set_critical_temp(sc)) != 0) { 4557 aprint_error_dev(sc->sc_dev, 4558 "could not set critical temperature\n"); 4559 return error; 4560 } 4561 4562 /* Set power saving level to CAM during initialization. */ 4563 if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) { 4564 aprint_error_dev(sc->sc_dev, 4565 "could not set power saving level\n"); 4566 return error; 4567 } 4568 return 0; 4569 } 4570 4571 static uint16_t 4572 iwn_get_active_dwell_time(struct iwn_softc *sc, uint16_t flags, 4573 uint8_t n_probes) 4574 { 4575 /* No channel? Default to 2GHz settings */ 4576 if (flags & IEEE80211_CHAN_2GHZ) 4577 return IWN_ACTIVE_DWELL_TIME_2GHZ + 4578 IWN_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1); 4579 4580 /* 5GHz dwell time */ 4581 return IWN_ACTIVE_DWELL_TIME_5GHZ + 4582 IWN_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1); 4583 } 4584 4585 /* 4586 * Limit the total dwell time to 85% of the beacon interval. 4587 * 4588 * Returns the dwell time in milliseconds. 4589 */ 4590 static uint16_t 4591 iwn_limit_dwell(struct iwn_softc *sc, uint16_t dwell_time) 4592 { 4593 struct ieee80211com *ic = &sc->sc_ic; 4594 struct ieee80211_node *ni = ic->ic_bss; 4595 int bintval = 0; 4596 4597 /* bintval is in TU (1.024mS) */ 4598 if (ni != NULL) 4599 bintval = ni->ni_intval; 4600 4601 /* 4602 * If it's non-zero, we should calculate the minimum of 4603 * it and the DWELL_BASE. 4604 * 4605 * XXX Yes, the math should take into account that bintval 4606 * is 1.024mS, not 1mS.. 4607 */ 4608 if (bintval > 0) 4609 return MIN(IWN_PASSIVE_DWELL_BASE, ((bintval * 85) / 100)); 4610 4611 /* No association context? Default */ 4612 return IWN_PASSIVE_DWELL_BASE; 4613 } 4614 4615 static uint16_t 4616 iwn_get_passive_dwell_time(struct iwn_softc *sc, uint16_t flags) 4617 { 4618 uint16_t passive; 4619 if (flags & IEEE80211_CHAN_2GHZ) 4620 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_2GHZ; 4621 else 4622 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_5GHZ; 4623 4624 /* Clamp to the beacon interval if we're associated */ 4625 return iwn_limit_dwell(sc, passive); 4626 } 4627 4628 static int 4629 iwn_scan(struct iwn_softc *sc, uint16_t flags) 4630 { 4631 struct ieee80211com *ic = &sc->sc_ic; 4632 struct iwn_scan_hdr *hdr; 4633 struct iwn_cmd_data *tx; 4634 struct iwn_scan_essid *essid; 4635 struct iwn_scan_chan *chan; 4636 struct ieee80211_frame *wh; 4637 struct ieee80211_rateset *rs; 4638 struct ieee80211_channel *c; 4639 uint8_t *buf, *frm; 4640 uint16_t rxchain, dwell_active, dwell_passive; 4641 uint8_t txant; 4642 int buflen, error, is_active; 4643 4644 buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 4645 if (buf == NULL) { 4646 aprint_error_dev(sc->sc_dev, 4647 "could not allocate buffer for scan command\n"); 4648 return ENOMEM; 4649 } 4650 hdr = (struct iwn_scan_hdr *)buf; 4651 /* 4652 * Move to the next channel if no frames are received within 10ms 4653 * after sending the probe request. 4654 */ 4655 hdr->quiet_time = htole16(10); /* timeout in milliseconds */ 4656 hdr->quiet_threshold = htole16(1); /* min # of packets */ 4657 4658 /* Select antennas for scanning. */ 4659 rxchain = 4660 IWN_RXCHAIN_VALID(sc->rxchainmask) | 4661 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) | 4662 IWN_RXCHAIN_DRIVER_FORCE; 4663 if ((flags & IEEE80211_CHAN_5GHZ) && 4664 sc->hw_type == IWN_HW_REV_TYPE_4965) { 4665 /* Ant A must be avoided in 5GHz because of an HW bug. */ 4666 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_BC); 4667 } else /* Use all available RX antennas. */ 4668 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask); 4669 hdr->rxchain = htole16(rxchain); 4670 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON); 4671 4672 tx = (struct iwn_cmd_data *)(hdr + 1); 4673 tx->flags = htole32(IWN_TX_AUTO_SEQ); 4674 tx->id = sc->broadcast_id; 4675 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 4676 4677 if (flags & IEEE80211_CHAN_5GHZ) { 4678 hdr->crc_threshold = 0xffff; 4679 /* Send probe requests at 6Mbps. */ 4680 tx->plcp = iwn_rates[IWN_RIDX_OFDM6].plcp; 4681 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 4682 } else { 4683 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO); 4684 /* Send probe requests at 1Mbps. */ 4685 tx->plcp = iwn_rates[IWN_RIDX_CCK1].plcp; 4686 tx->rflags = IWN_RFLAG_CCK; 4687 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 4688 } 4689 /* Use the first valid TX antenna. */ 4690 txant = IWN_LSB(sc->txchainmask); 4691 tx->rflags |= IWN_RFLAG_ANT(txant); 4692 4693 /* 4694 * Only do active scanning if we're announcing a probe request 4695 * for a given SSID (or more, if we ever add it to the driver.) 4696 */ 4697 is_active = 0; 4698 4699 essid = (struct iwn_scan_essid *)(tx + 1); 4700 if (ic->ic_des_esslen != 0) { 4701 essid[0].id = IEEE80211_ELEMID_SSID; 4702 essid[0].len = ic->ic_des_esslen; 4703 memcpy(essid[0].data, ic->ic_des_essid, ic->ic_des_esslen); 4704 4705 is_active = 1; 4706 } 4707 /* 4708 * Build a probe request frame. Most of the following code is a 4709 * copy & paste of what is done in net80211. 4710 */ 4711 wh = (struct ieee80211_frame *)(essid + 20); 4712 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 4713 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 4714 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 4715 IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr); 4716 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr); 4717 IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr); 4718 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */ 4719 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */ 4720 4721 frm = (uint8_t *)(wh + 1); 4722 frm = ieee80211_add_ssid(frm, NULL, 0); 4723 frm = ieee80211_add_rates(frm, rs); 4724 #ifndef IEEE80211_NO_HT 4725 if (ic->ic_flags & IEEE80211_F_HTON) 4726 frm = ieee80211_add_htcaps(frm, ic); 4727 #endif 4728 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 4729 frm = ieee80211_add_xrates(frm, rs); 4730 4731 /* Set length of probe request. */ 4732 tx->len = htole16(frm - (uint8_t *)wh); 4733 4734 4735 /* 4736 * If active scanning is requested but a certain channel is 4737 * marked passive, we can do active scanning if we detect 4738 * transmissions. 4739 * 4740 * There is an issue with some firmware versions that triggers 4741 * a sysassert on a "good CRC threshold" of zero (== disabled), 4742 * on a radar channel even though this means that we should NOT 4743 * send probes. 4744 * 4745 * The "good CRC threshold" is the number of frames that we 4746 * need to receive during our dwell time on a channel before 4747 * sending out probes -- setting this to a huge value will 4748 * mean we never reach it, but at the same time work around 4749 * the aforementioned issue. Thus use IWN_GOOD_CRC_TH_NEVER 4750 * here instead of IWN_GOOD_CRC_TH_DISABLED. 4751 * 4752 * This was fixed in later versions along with some other 4753 * scan changes, and the threshold behaves as a flag in those 4754 * versions. 4755 */ 4756 4757 /* 4758 * If we're doing active scanning, set the crc_threshold 4759 * to a suitable value. This is different to active veruss 4760 * passive scanning depending upon the channel flags; the 4761 * firmware will obey that particular check for us. 4762 */ 4763 if (sc->tlv_feature_flags & IWN_UCODE_TLV_FLAGS_NEWSCAN) 4764 hdr->crc_threshold = is_active ? 4765 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_DISABLED; 4766 else 4767 hdr->crc_threshold = is_active ? 4768 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_NEVER; 4769 4770 chan = (struct iwn_scan_chan *)frm; 4771 for (c = &ic->ic_channels[1]; 4772 c <= &ic->ic_channels[IEEE80211_CHAN_MAX]; c++) { 4773 if ((c->ic_flags & flags) != flags) 4774 continue; 4775 4776 chan->chan = htole16(ieee80211_chan2ieee(ic, c)); 4777 DPRINTFN(2, ("adding channel %d\n", chan->chan)); 4778 chan->flags = 0; 4779 if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) 4780 chan->flags |= htole32(IWN_CHAN_ACTIVE); 4781 if (ic->ic_des_esslen != 0) 4782 chan->flags |= htole32(IWN_CHAN_NPBREQS(1)); 4783 4784 /* 4785 * Calculate the active/passive dwell times. 4786 */ 4787 4788 dwell_active = iwn_get_active_dwell_time(sc, flags, is_active); 4789 dwell_passive = iwn_get_passive_dwell_time(sc, flags); 4790 4791 /* Make sure they're valid */ 4792 if (dwell_passive <= dwell_active) 4793 dwell_passive = dwell_active + 1; 4794 4795 chan->active = htole16(dwell_active); 4796 chan->passive = htole16(dwell_passive); 4797 4798 chan->dsp_gain = 0x6e; 4799 if (IEEE80211_IS_CHAN_5GHZ(c)) { 4800 chan->rf_gain = 0x3b; 4801 } else { 4802 chan->rf_gain = 0x28; 4803 } 4804 hdr->nchan++; 4805 chan++; 4806 } 4807 4808 buflen = (uint8_t *)chan - buf; 4809 hdr->len = htole16(buflen); 4810 4811 DPRINTF(("sending scan command nchan=%d\n", hdr->nchan)); 4812 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1); 4813 free(buf, M_DEVBUF); 4814 return error; 4815 } 4816 4817 static int 4818 iwn_auth(struct iwn_softc *sc) 4819 { 4820 struct iwn_ops *ops = &sc->ops; 4821 struct ieee80211com *ic = &sc->sc_ic; 4822 struct ieee80211_node *ni = ic->ic_bss; 4823 int error; 4824 4825 /* Update adapter configuration. */ 4826 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4827 sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan); 4828 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 4829 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 4830 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 4831 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4832 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT); 4833 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4834 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE); 4835 switch (ic->ic_curmode) { 4836 case IEEE80211_MODE_11A: 4837 sc->rxon.cck_mask = 0; 4838 sc->rxon.ofdm_mask = 0x15; 4839 break; 4840 case IEEE80211_MODE_11B: 4841 sc->rxon.cck_mask = 0x03; 4842 sc->rxon.ofdm_mask = 0; 4843 break; 4844 default: /* Assume 802.11b/g. */ 4845 sc->rxon.cck_mask = 0x0f; 4846 sc->rxon.ofdm_mask = 0x15; 4847 } 4848 DPRINTF(("rxon chan %d flags %x cck %x ofdm %x\n", sc->rxon.chan, 4849 sc->rxon.flags, sc->rxon.cck_mask, sc->rxon.ofdm_mask)); 4850 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1); 4851 if (error != 0) { 4852 aprint_error_dev(sc->sc_dev, 4853 "RXON command failed\n"); 4854 return error; 4855 } 4856 4857 /* Configuration has changed, set TX power accordingly. */ 4858 if ((error = ops->set_txpower(sc, 1)) != 0) { 4859 aprint_error_dev(sc->sc_dev, 4860 "could not set TX power\n"); 4861 return error; 4862 } 4863 /* 4864 * Reconfiguring RXON clears the firmware nodes table so we must 4865 * add the broadcast node again. 4866 */ 4867 if ((error = iwn_add_broadcast_node(sc, 1)) != 0) { 4868 aprint_error_dev(sc->sc_dev, 4869 "could not add broadcast node\n"); 4870 return error; 4871 } 4872 return 0; 4873 } 4874 4875 static int 4876 iwn_run(struct iwn_softc *sc) 4877 { 4878 struct iwn_ops *ops = &sc->ops; 4879 struct ieee80211com *ic = &sc->sc_ic; 4880 struct ieee80211_node *ni = ic->ic_bss; 4881 struct iwn_node_info node; 4882 int error; 4883 4884 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 4885 /* Link LED blinks while monitoring. */ 4886 iwn_set_led(sc, IWN_LED_LINK, 5, 5); 4887 return 0; 4888 } 4889 if ((error = iwn_set_timing(sc, ni)) != 0) { 4890 aprint_error_dev(sc->sc_dev, 4891 "could not set timing\n"); 4892 return error; 4893 } 4894 4895 /* Update adapter configuration. */ 4896 sc->rxon.associd = htole16(IEEE80211_AID(ni->ni_associd)); 4897 /* Short preamble and slot time are negotiated when associating. */ 4898 sc->rxon.flags &= ~htole32(IWN_RXON_SHPREAMBLE | IWN_RXON_SHSLOT); 4899 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4900 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT); 4901 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4902 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE); 4903 sc->rxon.filter |= htole32(IWN_FILTER_BSS); 4904 DPRINTF(("rxon chan %d flags %x\n", sc->rxon.chan, sc->rxon.flags)); 4905 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1); 4906 if (error != 0) { 4907 aprint_error_dev(sc->sc_dev, 4908 "could not update configuration\n"); 4909 return error; 4910 } 4911 4912 /* Configuration has changed, set TX power accordingly. */ 4913 if ((error = ops->set_txpower(sc, 1)) != 0) { 4914 aprint_error_dev(sc->sc_dev, 4915 "could not set TX power\n"); 4916 return error; 4917 } 4918 4919 /* Fake a join to initialize the TX rate. */ 4920 ((struct iwn_node *)ni)->id = IWN_ID_BSS; 4921 iwn_newassoc(ni, 1); 4922 4923 /* Add BSS node. */ 4924 memset(&node, 0, sizeof node); 4925 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 4926 node.id = IWN_ID_BSS; 4927 #ifdef notyet 4928 node.htflags = htole32(IWN_AMDPU_SIZE_FACTOR(3) | 4929 IWN_AMDPU_DENSITY(5)); /* 2us */ 4930 #endif 4931 DPRINTF(("adding BSS node\n")); 4932 error = ops->add_node(sc, &node, 1); 4933 if (error != 0) { 4934 aprint_error_dev(sc->sc_dev, 4935 "could not add BSS node\n"); 4936 return error; 4937 } 4938 DPRINTF(("setting link quality for node %d\n", node.id)); 4939 if ((error = iwn_set_link_quality(sc, ni)) != 0) { 4940 aprint_error_dev(sc->sc_dev, 4941 "could not setup link quality for node %d\n", node.id); 4942 return error; 4943 } 4944 4945 if ((error = iwn_init_sensitivity(sc)) != 0) { 4946 aprint_error_dev(sc->sc_dev, 4947 "could not set sensitivity\n"); 4948 return error; 4949 } 4950 /* Start periodic calibration timer. */ 4951 sc->calib.state = IWN_CALIB_STATE_ASSOC; 4952 sc->calib_cnt = 0; 4953 callout_schedule(&sc->calib_to, hz/2); 4954 4955 /* Link LED always on while associated. */ 4956 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 4957 return 0; 4958 } 4959 4960 #ifdef IWN_HWCRYPTO 4961 /* 4962 * We support CCMP hardware encryption/decryption of unicast frames only. 4963 * HW support for TKIP really sucks. We should let TKIP die anyway. 4964 */ 4965 static int 4966 iwn_set_key(struct ieee80211com *ic, struct ieee80211_node *ni, 4967 struct ieee80211_key *k) 4968 { 4969 struct iwn_softc *sc = ic->ic_softc; 4970 struct iwn_ops *ops = &sc->ops; 4971 struct iwn_node *wn = (void *)ni; 4972 struct iwn_node_info node; 4973 uint16_t kflags; 4974 4975 if ((k->k_flags & IEEE80211_KEY_GROUP) || 4976 k->k_cipher != IEEE80211_CIPHER_CCMP) 4977 return ieee80211_set_key(ic, ni, k); 4978 4979 kflags = IWN_KFLAG_CCMP | IWN_KFLAG_MAP | IWN_KFLAG_KID(k->k_id); 4980 if (k->k_flags & IEEE80211_KEY_GROUP) 4981 kflags |= IWN_KFLAG_GROUP; 4982 4983 memset(&node, 0, sizeof node); 4984 node.id = (k->k_flags & IEEE80211_KEY_GROUP) ? 4985 sc->broadcast_id : wn->id; 4986 node.control = IWN_NODE_UPDATE; 4987 node.flags = IWN_FLAG_SET_KEY; 4988 node.kflags = htole16(kflags); 4989 node.kid = k->k_id; 4990 memcpy(node.key, k->k_key, k->k_len); 4991 DPRINTF(("set key id=%d for node %d\n", k->k_id, node.id)); 4992 return ops->add_node(sc, &node, 1); 4993 } 4994 4995 static void 4996 iwn_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni, 4997 struct ieee80211_key *k) 4998 { 4999 struct iwn_softc *sc = ic->ic_softc; 5000 struct iwn_ops *ops = &sc->ops; 5001 struct iwn_node *wn = (void *)ni; 5002 struct iwn_node_info node; 5003 5004 if ((k->k_flags & IEEE80211_KEY_GROUP) || 5005 k->k_cipher != IEEE80211_CIPHER_CCMP) { 5006 /* See comment about other ciphers above. */ 5007 ieee80211_delete_key(ic, ni, k); 5008 return; 5009 } 5010 if (ic->ic_state != IEEE80211_S_RUN) 5011 return; /* Nothing to do. */ 5012 memset(&node, 0, sizeof node); 5013 node.id = (k->k_flags & IEEE80211_KEY_GROUP) ? 5014 sc->broadcast_id : wn->id; 5015 node.control = IWN_NODE_UPDATE; 5016 node.flags = IWN_FLAG_SET_KEY; 5017 node.kflags = htole16(IWN_KFLAG_INVALID); 5018 node.kid = 0xff; 5019 DPRINTF(("delete keys for node %d\n", node.id)); 5020 (void)ops->add_node(sc, &node, 1); 5021 } 5022 #endif 5023 5024 /* XXX Added for NetBSD (copied from rev 1.39). */ 5025 5026 static int 5027 iwn_wme_update(struct ieee80211com *ic) 5028 { 5029 #define IWN_EXP2(v) htole16((1 << (v)) - 1) 5030 #define IWN_USEC(v) htole16(IEEE80211_TXOP_TO_US(v)) 5031 struct iwn_softc *sc = ic->ic_ifp->if_softc; 5032 const struct wmeParams *wmep; 5033 struct iwn_edca_params cmd; 5034 int ac; 5035 5036 /* don't override default WME values if WME is not actually enabled */ 5037 if (!(ic->ic_flags & IEEE80211_F_WME)) 5038 return 0; 5039 cmd.flags = 0; 5040 for (ac = 0; ac < WME_NUM_AC; ac++) { 5041 wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 5042 cmd.ac[ac].aifsn = wmep->wmep_aifsn; 5043 cmd.ac[ac].cwmin = IWN_EXP2(wmep->wmep_logcwmin); 5044 cmd.ac[ac].cwmax = IWN_EXP2(wmep->wmep_logcwmax); 5045 cmd.ac[ac].txoplimit = IWN_USEC(wmep->wmep_txopLimit); 5046 5047 DPRINTF(("setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d " 5048 "txop=%d\n", ac, cmd.ac[ac].aifsn, 5049 cmd.ac[ac].cwmin, 5050 cmd.ac[ac].cwmax, cmd.ac[ac].txoplimit)); 5051 } 5052 return iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 5053 #undef IWN_USEC 5054 #undef IWN_EXP2 5055 } 5056 5057 #ifndef IEEE80211_NO_HT 5058 /* 5059 * This function is called by upper layer when an ADDBA request is received 5060 * from another STA and before the ADDBA response is sent. 5061 */ 5062 static int 5063 iwn_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 5064 uint8_t tid) 5065 { 5066 struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid]; 5067 struct iwn_softc *sc = ic->ic_softc; 5068 struct iwn_ops *ops = &sc->ops; 5069 struct iwn_node *wn = (void *)ni; 5070 struct iwn_node_info node; 5071 5072 memset(&node, 0, sizeof node); 5073 node.id = wn->id; 5074 node.control = IWN_NODE_UPDATE; 5075 node.flags = IWN_FLAG_SET_ADDBA; 5076 node.addba_tid = tid; 5077 node.addba_ssn = htole16(ba->ba_winstart); 5078 DPRINTFN(2, ("ADDBA RA=%d TID=%d SSN=%d\n", wn->id, tid, 5079 ba->ba_winstart)); 5080 return ops->add_node(sc, &node, 1); 5081 } 5082 5083 /* 5084 * This function is called by upper layer on teardown of an HT-immediate 5085 * Block Ack agreement (eg. uppon receipt of a DELBA frame). 5086 */ 5087 static void 5088 iwn_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni, 5089 uint8_t tid) 5090 { 5091 struct iwn_softc *sc = ic->ic_softc; 5092 struct iwn_ops *ops = &sc->ops; 5093 struct iwn_node *wn = (void *)ni; 5094 struct iwn_node_info node; 5095 5096 memset(&node, 0, sizeof node); 5097 node.id = wn->id; 5098 node.control = IWN_NODE_UPDATE; 5099 node.flags = IWN_FLAG_SET_DELBA; 5100 node.delba_tid = tid; 5101 DPRINTFN(2, ("DELBA RA=%d TID=%d\n", wn->id, tid)); 5102 (void)ops->add_node(sc, &node, 1); 5103 } 5104 5105 /* 5106 * This function is called by upper layer when an ADDBA response is received 5107 * from another STA. 5108 */ 5109 static int 5110 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 5111 uint8_t tid) 5112 { 5113 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid]; 5114 struct iwn_softc *sc = ic->ic_softc; 5115 struct iwn_ops *ops = &sc->ops; 5116 struct iwn_node *wn = (void *)ni; 5117 struct iwn_node_info node; 5118 int error; 5119 5120 /* Enable TX for the specified RA/TID. */ 5121 wn->disable_tid &= ~(1 << tid); 5122 memset(&node, 0, sizeof node); 5123 node.id = wn->id; 5124 node.control = IWN_NODE_UPDATE; 5125 node.flags = IWN_FLAG_SET_DISABLE_TID; 5126 node.disable_tid = htole16(wn->disable_tid); 5127 error = ops->add_node(sc, &node, 1); 5128 if (error != 0) 5129 return error; 5130 5131 if ((error = iwn_nic_lock(sc)) != 0) 5132 return error; 5133 ops->ampdu_tx_start(sc, ni, tid, ba->ba_winstart); 5134 iwn_nic_unlock(sc); 5135 return 0; 5136 } 5137 5138 static void 5139 iwn_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni, 5140 uint8_t tid) 5141 { 5142 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid]; 5143 struct iwn_softc *sc = ic->ic_softc; 5144 struct iwn_ops *ops = &sc->ops; 5145 5146 if (iwn_nic_lock(sc) != 0) 5147 return; 5148 ops->ampdu_tx_stop(sc, tid, ba->ba_winstart); 5149 iwn_nic_unlock(sc); 5150 } 5151 5152 static void 5153 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 5154 uint8_t tid, uint16_t ssn) 5155 { 5156 struct iwn_node *wn = (void *)ni; 5157 int qid = 7 + tid; 5158 5159 /* Stop TX scheduler while we're changing its configuration. */ 5160 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5161 IWN4965_TXQ_STATUS_CHGACT); 5162 5163 /* Assign RA/TID translation to the queue. */ 5164 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid), 5165 wn->id << 4 | tid); 5166 5167 /* Enable chain-building mode for the queue. */ 5168 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid); 5169 5170 /* Set starting sequence number from the ADDBA request. */ 5171 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5172 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 5173 5174 /* Set scheduler window size. */ 5175 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid), 5176 IWN_SCHED_WINSZ); 5177 /* Set scheduler frame limit. */ 5178 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 5179 IWN_SCHED_LIMIT << 16); 5180 5181 /* Enable interrupts for the queue. */ 5182 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 5183 5184 /* Mark the queue as active. */ 5185 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5186 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA | 5187 iwn_tid2fifo[tid] << 1); 5188 } 5189 5190 static void 5191 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn) 5192 { 5193 int qid = 7 + tid; 5194 5195 /* Stop TX scheduler while we're changing its configuration. */ 5196 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5197 IWN4965_TXQ_STATUS_CHGACT); 5198 5199 /* Set starting sequence number from the ADDBA request. */ 5200 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5201 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 5202 5203 /* Disable interrupts for the queue. */ 5204 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 5205 5206 /* Mark the queue as inactive. */ 5207 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5208 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1); 5209 } 5210 5211 static void 5212 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 5213 uint8_t tid, uint16_t ssn) 5214 { 5215 struct iwn_node *wn = (void *)ni; 5216 int qid = 10 + tid; 5217 5218 /* Stop TX scheduler while we're changing its configuration. */ 5219 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5220 IWN5000_TXQ_STATUS_CHGACT); 5221 5222 /* Assign RA/TID translation to the queue. */ 5223 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid), 5224 wn->id << 4 | tid); 5225 5226 /* Enable chain-building mode for the queue. */ 5227 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid); 5228 5229 /* Enable aggregation for the queue. */ 5230 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 5231 5232 /* Set starting sequence number from the ADDBA request. */ 5233 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5234 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 5235 5236 /* Set scheduler window size and frame limit. */ 5237 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 5238 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 5239 5240 /* Enable interrupts for the queue. */ 5241 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 5242 5243 /* Mark the queue as active. */ 5244 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5245 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]); 5246 } 5247 5248 static void 5249 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn) 5250 { 5251 int qid = 10 + tid; 5252 5253 /* Stop TX scheduler while we're changing its configuration. */ 5254 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5255 IWN5000_TXQ_STATUS_CHGACT); 5256 5257 /* Disable aggregation for the queue. */ 5258 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 5259 5260 /* Set starting sequence number from the ADDBA request. */ 5261 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5262 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 5263 5264 /* Disable interrupts for the queue. */ 5265 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 5266 5267 /* Mark the queue as inactive. */ 5268 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5269 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]); 5270 } 5271 #endif /* !IEEE80211_NO_HT */ 5272 5273 /* 5274 * Query calibration tables from the initialization firmware. We do this 5275 * only once at first boot. Called from a process context. 5276 */ 5277 static int 5278 iwn5000_query_calibration(struct iwn_softc *sc) 5279 { 5280 struct iwn5000_calib_config cmd; 5281 int error; 5282 5283 memset(&cmd, 0, sizeof cmd); 5284 cmd.ucode.once.enable = 0xffffffff; 5285 cmd.ucode.once.start = 0xffffffff; 5286 cmd.ucode.once.send = 0xffffffff; 5287 cmd.ucode.flags = 0xffffffff; 5288 DPRINTF(("sending calibration query\n")); 5289 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0); 5290 if (error != 0) 5291 return error; 5292 5293 /* Wait at most two seconds for calibration to complete. */ 5294 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) 5295 error = tsleep(sc, PCATCH, "iwncal", 2 * hz); 5296 return error; 5297 } 5298 5299 /* 5300 * Send calibration results to the runtime firmware. These results were 5301 * obtained on first boot from the initialization firmware. 5302 */ 5303 static int 5304 iwn5000_send_calibration(struct iwn_softc *sc) 5305 { 5306 int idx, error; 5307 5308 for (idx = 0; idx < 5; idx++) { 5309 if (sc->calibcmd[idx].buf == NULL) 5310 continue; /* No results available. */ 5311 DPRINTF(("send calibration result idx=%d len=%d\n", 5312 idx, sc->calibcmd[idx].len)); 5313 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf, 5314 sc->calibcmd[idx].len, 0); 5315 if (error != 0) { 5316 aprint_error_dev(sc->sc_dev, 5317 "could not send calibration result\n"); 5318 return error; 5319 } 5320 } 5321 return 0; 5322 } 5323 5324 static int 5325 iwn5000_send_wimax_coex(struct iwn_softc *sc) 5326 { 5327 struct iwn5000_wimax_coex wimax; 5328 5329 #ifdef notyet 5330 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 5331 /* Enable WiMAX coexistence for combo adapters. */ 5332 wimax.flags = 5333 IWN_WIMAX_COEX_ASSOC_WA_UNMASK | 5334 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK | 5335 IWN_WIMAX_COEX_STA_TABLE_VALID | 5336 IWN_WIMAX_COEX_ENABLE; 5337 memcpy(wimax.events, iwn6050_wimax_events, 5338 sizeof iwn6050_wimax_events); 5339 } else 5340 #endif 5341 { 5342 /* Disable WiMAX coexistence. */ 5343 wimax.flags = 0; 5344 memset(wimax.events, 0, sizeof wimax.events); 5345 } 5346 DPRINTF(("Configuring WiMAX coexistence\n")); 5347 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0); 5348 } 5349 5350 static int 5351 iwn6000_temp_offset_calib(struct iwn_softc *sc) 5352 { 5353 struct iwn6000_phy_calib_temp_offset cmd; 5354 5355 memset(&cmd, 0, sizeof cmd); 5356 cmd.code = IWN6000_PHY_CALIB_TEMP_OFFSET; 5357 cmd.ngroups = 1; 5358 cmd.isvalid = 1; 5359 if (sc->eeprom_temp != 0) 5360 cmd.offset = htole16(sc->eeprom_temp); 5361 else 5362 cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET); 5363 DPRINTF(("setting radio sensor offset to %d\n", le16toh(cmd.offset))); 5364 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 5365 } 5366 5367 static int 5368 iwn2000_temp_offset_calib(struct iwn_softc *sc) 5369 { 5370 struct iwn2000_phy_calib_temp_offset cmd; 5371 5372 memset(&cmd, 0, sizeof cmd); 5373 cmd.code = IWN2000_PHY_CALIB_TEMP_OFFSET; 5374 cmd.ngroups = 1; 5375 cmd.isvalid = 1; 5376 if (sc->eeprom_rawtemp != 0) { 5377 cmd.offset_low = htole16(sc->eeprom_rawtemp); 5378 cmd.offset_high = htole16(sc->eeprom_temp); 5379 } else { 5380 cmd.offset_low = htole16(IWN_DEFAULT_TEMP_OFFSET); 5381 cmd.offset_high = htole16(IWN_DEFAULT_TEMP_OFFSET); 5382 } 5383 cmd.burnt_voltage_ref = htole16(sc->eeprom_voltage); 5384 DPRINTF(("setting radio sensor offset to %d:%d, voltage to %d\n", 5385 le16toh(cmd.offset_low), le16toh(cmd.offset_high), 5386 le16toh(cmd.burnt_voltage_ref))); 5387 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 5388 } 5389 5390 /* 5391 * This function is called after the runtime firmware notifies us of its 5392 * readiness (called in a process context). 5393 */ 5394 static int 5395 iwn4965_post_alive(struct iwn_softc *sc) 5396 { 5397 int error, qid; 5398 5399 if ((error = iwn_nic_lock(sc)) != 0) 5400 return error; 5401 5402 /* Clear TX scheduler state in SRAM. */ 5403 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 5404 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0, 5405 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t)); 5406 5407 /* Set physical address of TX scheduler rings (1KB aligned). */ 5408 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 5409 5410 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 5411 5412 /* Disable chain mode for all our 16 queues. */ 5413 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0); 5414 5415 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) { 5416 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0); 5417 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 5418 5419 /* Set scheduler window size. */ 5420 iwn_mem_write(sc, sc->sched_base + 5421 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ); 5422 /* Set scheduler frame limit. */ 5423 iwn_mem_write(sc, sc->sched_base + 5424 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 5425 IWN_SCHED_LIMIT << 16); 5426 } 5427 5428 /* Enable interrupts for all our 16 queues. */ 5429 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff); 5430 /* Identify TX FIFO rings (0-7). */ 5431 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff); 5432 5433 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 5434 for (qid = 0; qid < 7; qid++) { 5435 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 }; 5436 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5437 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1); 5438 } 5439 iwn_nic_unlock(sc); 5440 return 0; 5441 } 5442 5443 /* 5444 * This function is called after the initialization or runtime firmware 5445 * notifies us of its readiness (called in a process context). 5446 */ 5447 static int 5448 iwn5000_post_alive(struct iwn_softc *sc) 5449 { 5450 int error, qid; 5451 5452 /* Switch to using ICT interrupt mode. */ 5453 iwn5000_ict_reset(sc); 5454 5455 if ((error = iwn_nic_lock(sc)) != 0) 5456 return error; 5457 5458 /* Clear TX scheduler state in SRAM. */ 5459 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 5460 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0, 5461 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t)); 5462 5463 /* Set physical address of TX scheduler rings (1KB aligned). */ 5464 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 5465 5466 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 5467 5468 /* Enable chain mode for all queues, except command queue. */ 5469 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef); 5470 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0); 5471 5472 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) { 5473 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0); 5474 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 5475 5476 iwn_mem_write(sc, sc->sched_base + 5477 IWN5000_SCHED_QUEUE_OFFSET(qid), 0); 5478 /* Set scheduler window size and frame limit. */ 5479 iwn_mem_write(sc, sc->sched_base + 5480 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 5481 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 5482 } 5483 5484 /* Enable interrupts for all our 20 queues. */ 5485 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff); 5486 /* Identify TX FIFO rings (0-7). */ 5487 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff); 5488 5489 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 5490 for (qid = 0; qid < 7; qid++) { 5491 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 }; 5492 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5493 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 5494 } 5495 iwn_nic_unlock(sc); 5496 5497 /* Configure WiMAX coexistence for combo adapters. */ 5498 error = iwn5000_send_wimax_coex(sc); 5499 if (error != 0) { 5500 aprint_error_dev(sc->sc_dev, 5501 "could not configure WiMAX coexistence\n"); 5502 return error; 5503 } 5504 if (sc->hw_type != IWN_HW_REV_TYPE_5150) { 5505 struct iwn5000_phy_calib_crystal cmd; 5506 5507 /* Perform crystal calibration. */ 5508 memset(&cmd, 0, sizeof cmd); 5509 cmd.code = IWN5000_PHY_CALIB_CRYSTAL; 5510 cmd.ngroups = 1; 5511 cmd.isvalid = 1; 5512 cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff; 5513 cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff; 5514 DPRINTF(("sending crystal calibration %d, %d\n", 5515 cmd.cap_pin[0], cmd.cap_pin[1])); 5516 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 5517 if (error != 0) { 5518 aprint_error_dev(sc->sc_dev, 5519 "crystal calibration failed\n"); 5520 return error; 5521 } 5522 } 5523 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) { 5524 /* Query calibration from the initialization firmware. */ 5525 if ((error = iwn5000_query_calibration(sc)) != 0) { 5526 aprint_error_dev(sc->sc_dev, 5527 "could not query calibration\n"); 5528 return error; 5529 } 5530 /* 5531 * We have the calibration results now, reboot with the 5532 * runtime firmware (call ourselves recursively!) 5533 */ 5534 iwn_hw_stop(sc); 5535 error = iwn_hw_init(sc); 5536 } else { 5537 /* Send calibration results to runtime firmware. */ 5538 error = iwn5000_send_calibration(sc); 5539 } 5540 return error; 5541 } 5542 5543 /* 5544 * The firmware boot code is small and is intended to be copied directly into 5545 * the NIC internal memory (no DMA transfer). 5546 */ 5547 static int 5548 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size) 5549 { 5550 int error, ntries; 5551 5552 size /= sizeof (uint32_t); 5553 5554 if ((error = iwn_nic_lock(sc)) != 0) 5555 return error; 5556 5557 /* Copy microcode image into NIC memory. */ 5558 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE, 5559 (const uint32_t *)ucode, size); 5560 5561 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0); 5562 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE); 5563 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size); 5564 5565 /* Start boot load now. */ 5566 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START); 5567 5568 /* Wait for transfer to complete. */ 5569 for (ntries = 0; ntries < 1000; ntries++) { 5570 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) & 5571 IWN_BSM_WR_CTRL_START)) 5572 break; 5573 DELAY(10); 5574 } 5575 if (ntries == 1000) { 5576 aprint_error_dev(sc->sc_dev, 5577 "could not load boot firmware\n"); 5578 iwn_nic_unlock(sc); 5579 return ETIMEDOUT; 5580 } 5581 5582 /* Enable boot after power up. */ 5583 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN); 5584 5585 iwn_nic_unlock(sc); 5586 return 0; 5587 } 5588 5589 static int 5590 iwn4965_load_firmware(struct iwn_softc *sc) 5591 { 5592 struct iwn_fw_info *fw = &sc->fw; 5593 struct iwn_dma_info *dma = &sc->fw_dma; 5594 int error; 5595 5596 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 5597 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 5598 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, fw->init.datasz, 5599 BUS_DMASYNC_PREWRITE); 5600 memcpy((char *)dma->vaddr + IWN4965_FW_DATA_MAXSZ, 5601 fw->init.text, fw->init.textsz); 5602 bus_dmamap_sync(sc->sc_dmat, dma->map, IWN4965_FW_DATA_MAXSZ, 5603 fw->init.textsz, BUS_DMASYNC_PREWRITE); 5604 5605 /* Tell adapter where to find initialization sections. */ 5606 if ((error = iwn_nic_lock(sc)) != 0) 5607 return error; 5608 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 5609 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz); 5610 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 5611 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 5612 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 5613 iwn_nic_unlock(sc); 5614 5615 /* Load firmware boot code. */ 5616 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 5617 if (error != 0) { 5618 aprint_error_dev(sc->sc_dev, 5619 "could not load boot firmware\n"); 5620 return error; 5621 } 5622 /* Now press "execute". */ 5623 IWN_WRITE(sc, IWN_RESET, 0); 5624 5625 /* Wait at most one second for first alive notification. */ 5626 if ((error = tsleep(sc, PCATCH, "iwninit", hz)) != 0) { 5627 aprint_error_dev(sc->sc_dev, 5628 "timeout waiting for adapter to initialize\n"); 5629 return error; 5630 } 5631 5632 /* Retrieve current temperature for initial TX power calibration. */ 5633 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz; 5634 sc->temp = iwn4965_get_temperature(sc); 5635 5636 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 5637 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 5638 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, fw->main.datasz, 5639 BUS_DMASYNC_PREWRITE); 5640 memcpy((char *)dma->vaddr + IWN4965_FW_DATA_MAXSZ, 5641 fw->main.text, fw->main.textsz); 5642 bus_dmamap_sync(sc->sc_dmat, dma->map, IWN4965_FW_DATA_MAXSZ, 5643 fw->main.textsz, BUS_DMASYNC_PREWRITE); 5644 5645 /* Tell adapter where to find runtime sections. */ 5646 if ((error = iwn_nic_lock(sc)) != 0) 5647 return error; 5648 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 5649 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz); 5650 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 5651 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 5652 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, 5653 IWN_FW_UPDATED | fw->main.textsz); 5654 iwn_nic_unlock(sc); 5655 5656 return 0; 5657 } 5658 5659 static int 5660 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst, 5661 const uint8_t *section, int size) 5662 { 5663 struct iwn_dma_info *dma = &sc->fw_dma; 5664 int error; 5665 5666 /* Copy firmware section into pre-allocated DMA-safe memory. */ 5667 memcpy(dma->vaddr, section, size); 5668 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, size, BUS_DMASYNC_PREWRITE); 5669 5670 if ((error = iwn_nic_lock(sc)) != 0) 5671 return error; 5672 5673 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 5674 IWN_FH_TX_CONFIG_DMA_PAUSE); 5675 5676 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst); 5677 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL), 5678 IWN_LOADDR(dma->paddr)); 5679 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL), 5680 IWN_HIADDR(dma->paddr) << 28 | size); 5681 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL), 5682 IWN_FH_TXBUF_STATUS_TBNUM(1) | 5683 IWN_FH_TXBUF_STATUS_TBIDX(1) | 5684 IWN_FH_TXBUF_STATUS_TFBD_VALID); 5685 5686 /* Kick Flow Handler to start DMA transfer. */ 5687 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 5688 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD); 5689 5690 iwn_nic_unlock(sc); 5691 5692 /* Wait at most five seconds for FH DMA transfer to complete. */ 5693 return tsleep(sc, PCATCH, "iwninit", 5 * hz); 5694 } 5695 5696 static int 5697 iwn5000_load_firmware(struct iwn_softc *sc) 5698 { 5699 struct iwn_fw_part *fw; 5700 int error; 5701 5702 /* Load the initialization firmware on first boot only. */ 5703 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ? 5704 &sc->fw.main : &sc->fw.init; 5705 5706 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE, 5707 fw->text, fw->textsz); 5708 if (error != 0) { 5709 aprint_error_dev(sc->sc_dev, 5710 "could not load firmware %s section\n", ".text"); 5711 return error; 5712 } 5713 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE, 5714 fw->data, fw->datasz); 5715 if (error != 0) { 5716 aprint_error_dev(sc->sc_dev, 5717 "could not load firmware %s section\n", ".data"); 5718 return error; 5719 } 5720 5721 /* Now press "execute". */ 5722 IWN_WRITE(sc, IWN_RESET, 0); 5723 return 0; 5724 } 5725 5726 /* 5727 * Extract text and data sections from a legacy firmware image. 5728 */ 5729 static int 5730 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw) 5731 { 5732 const uint32_t *ptr; 5733 size_t hdrlen = 24; 5734 uint32_t rev; 5735 5736 ptr = (const uint32_t *)fw->data; 5737 rev = le32toh(*ptr++); 5738 5739 sc->ucode_rev = rev; 5740 5741 /* Check firmware API version. */ 5742 if (IWN_FW_API(rev) <= 1) { 5743 aprint_error_dev(sc->sc_dev, 5744 "bad firmware, need API version >=2\n"); 5745 return EINVAL; 5746 } 5747 if (IWN_FW_API(rev) >= 3) { 5748 /* Skip build number (version 2 header). */ 5749 hdrlen += 4; 5750 ptr++; 5751 } 5752 if (fw->size < hdrlen) { 5753 aprint_error_dev(sc->sc_dev, 5754 "firmware too short: %zd bytes\n", fw->size); 5755 return EINVAL; 5756 } 5757 fw->main.textsz = le32toh(*ptr++); 5758 fw->main.datasz = le32toh(*ptr++); 5759 fw->init.textsz = le32toh(*ptr++); 5760 fw->init.datasz = le32toh(*ptr++); 5761 fw->boot.textsz = le32toh(*ptr++); 5762 5763 /* Check that all firmware sections fit. */ 5764 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz + 5765 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 5766 aprint_error_dev(sc->sc_dev, 5767 "firmware too short: %zd bytes\n", fw->size); 5768 return EINVAL; 5769 } 5770 5771 /* Get pointers to firmware sections. */ 5772 fw->main.text = (const uint8_t *)ptr; 5773 fw->main.data = fw->main.text + fw->main.textsz; 5774 fw->init.text = fw->main.data + fw->main.datasz; 5775 fw->init.data = fw->init.text + fw->init.textsz; 5776 fw->boot.text = fw->init.data + fw->init.datasz; 5777 return 0; 5778 } 5779 5780 /* 5781 * Extract text and data sections from a TLV firmware image. 5782 */ 5783 static int 5784 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw, 5785 uint16_t alt) 5786 { 5787 const struct iwn_fw_tlv_hdr *hdr; 5788 const struct iwn_fw_tlv *tlv; 5789 const uint8_t *ptr, *end; 5790 uint64_t altmask; 5791 uint32_t len; 5792 5793 if (fw->size < sizeof (*hdr)) { 5794 aprint_error_dev(sc->sc_dev, 5795 "firmware too short: %zd bytes\n", fw->size); 5796 return EINVAL; 5797 } 5798 hdr = (const struct iwn_fw_tlv_hdr *)fw->data; 5799 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) { 5800 aprint_error_dev(sc->sc_dev, 5801 "bad firmware signature 0x%08x\n", le32toh(hdr->signature)); 5802 return EINVAL; 5803 } 5804 DPRINTF(("FW: \"%.64s\", build 0x%x\n", hdr->descr, 5805 le32toh(hdr->build))); 5806 sc->ucode_rev = le32toh(hdr->rev); 5807 5808 /* 5809 * Select the closest supported alternative that is less than 5810 * or equal to the specified one. 5811 */ 5812 altmask = le64toh(hdr->altmask); 5813 while (alt > 0 && !(altmask & (1ULL << alt))) 5814 alt--; /* Downgrade. */ 5815 DPRINTF(("using alternative %d\n", alt)); 5816 5817 ptr = (const uint8_t *)(hdr + 1); 5818 end = (const uint8_t *)(fw->data + fw->size); 5819 5820 /* Parse type-length-value fields. */ 5821 while (ptr + sizeof (*tlv) <= end) { 5822 tlv = (const struct iwn_fw_tlv *)ptr; 5823 len = le32toh(tlv->len); 5824 5825 ptr += sizeof (*tlv); 5826 if (ptr + len > end) { 5827 aprint_error_dev(sc->sc_dev, 5828 "firmware too short: %zd bytes\n", fw->size); 5829 return EINVAL; 5830 } 5831 /* Skip other alternatives. */ 5832 if (tlv->alt != 0 && tlv->alt != htole16(alt)) 5833 goto next; 5834 5835 switch (le16toh(tlv->type)) { 5836 case IWN_FW_TLV_MAIN_TEXT: 5837 fw->main.text = ptr; 5838 fw->main.textsz = len; 5839 break; 5840 case IWN_FW_TLV_MAIN_DATA: 5841 fw->main.data = ptr; 5842 fw->main.datasz = len; 5843 break; 5844 case IWN_FW_TLV_INIT_TEXT: 5845 fw->init.text = ptr; 5846 fw->init.textsz = len; 5847 break; 5848 case IWN_FW_TLV_INIT_DATA: 5849 fw->init.data = ptr; 5850 fw->init.datasz = len; 5851 break; 5852 case IWN_FW_TLV_BOOT_TEXT: 5853 fw->boot.text = ptr; 5854 fw->boot.textsz = len; 5855 break; 5856 case IWN_FW_TLV_ENH_SENS: 5857 if (len != 0) { 5858 aprint_error_dev(sc->sc_dev, 5859 "TLV type %d has invalid size %u\n", 5860 le16toh(tlv->type), len); 5861 goto next; 5862 } 5863 sc->sc_flags |= IWN_FLAG_ENH_SENS; 5864 break; 5865 case IWN_FW_TLV_PHY_CALIB: 5866 if (len != sizeof(uint32_t)) { 5867 aprint_error_dev(sc->sc_dev, 5868 "TLV type %d has invalid size %u\n", 5869 le16toh(tlv->type), len); 5870 goto next; 5871 } 5872 if (le32toh(*ptr) <= IWN5000_PHY_CALIB_MAX) { 5873 sc->reset_noise_gain = le32toh(*ptr); 5874 sc->noise_gain = le32toh(*ptr) + 1; 5875 } 5876 break; 5877 case IWN_FW_TLV_FLAGS: 5878 if (len < sizeof(uint32_t)) 5879 break; 5880 if (len % sizeof(uint32_t)) 5881 break; 5882 sc->tlv_feature_flags = le32toh(*ptr); 5883 DPRINTF(("feature: 0x%08x\n", sc->tlv_feature_flags)); 5884 break; 5885 default: 5886 DPRINTF(("TLV type %d not handled\n", 5887 le16toh(tlv->type))); 5888 break; 5889 } 5890 next: /* TLV fields are 32-bit aligned. */ 5891 ptr += (len + 3) & ~3; 5892 } 5893 return 0; 5894 } 5895 5896 static int 5897 iwn_read_firmware(struct iwn_softc *sc) 5898 { 5899 struct iwn_fw_info *fw = &sc->fw; 5900 firmware_handle_t fwh; 5901 int error; 5902 5903 /* 5904 * Some PHY calibration commands are firmware-dependent; these 5905 * are the default values that will be overridden if 5906 * necessary. 5907 */ 5908 sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN; 5909 sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN; 5910 5911 /* Initialize for error returns */ 5912 fw->data = NULL; 5913 fw->size = 0; 5914 5915 /* Open firmware image. */ 5916 if ((error = firmware_open("if_iwn", sc->fwname, &fwh)) != 0) { 5917 aprint_error_dev(sc->sc_dev, 5918 "could not get firmware handle %s\n", sc->fwname); 5919 return error; 5920 } 5921 fw->size = firmware_get_size(fwh); 5922 if (fw->size < sizeof (uint32_t)) { 5923 aprint_error_dev(sc->sc_dev, 5924 "firmware too short: %zd bytes\n", fw->size); 5925 firmware_close(fwh); 5926 return EINVAL; 5927 } 5928 5929 /* Read the firmware. */ 5930 fw->data = firmware_malloc(fw->size); 5931 if (fw->data == NULL) { 5932 aprint_error_dev(sc->sc_dev, 5933 "not enough memory to stock firmware %s\n", sc->fwname); 5934 firmware_close(fwh); 5935 return ENOMEM; 5936 } 5937 error = firmware_read(fwh, 0, fw->data, fw->size); 5938 firmware_close(fwh); 5939 if (error != 0) { 5940 aprint_error_dev(sc->sc_dev, 5941 "could not read firmware %s\n", sc->fwname); 5942 goto out; 5943 } 5944 5945 /* Retrieve text and data sections. */ 5946 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */ 5947 error = iwn_read_firmware_leg(sc, fw); 5948 else 5949 error = iwn_read_firmware_tlv(sc, fw, 1); 5950 if (error != 0) { 5951 aprint_error_dev(sc->sc_dev, 5952 "could not read firmware sections\n"); 5953 goto out; 5954 } 5955 5956 /* Make sure text and data sections fit in hardware memory. */ 5957 if (fw->main.textsz > sc->fw_text_maxsz || 5958 fw->main.datasz > sc->fw_data_maxsz || 5959 fw->init.textsz > sc->fw_text_maxsz || 5960 fw->init.datasz > sc->fw_data_maxsz || 5961 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ || 5962 (fw->boot.textsz & 3) != 0) { 5963 aprint_error_dev(sc->sc_dev, 5964 "firmware sections too large\n"); 5965 goto out; 5966 } 5967 5968 /* We can proceed with loading the firmware. */ 5969 return 0; 5970 out: 5971 firmware_free(fw->data, fw->size); 5972 fw->data = NULL; 5973 fw->size = 0; 5974 return error ? error : EINVAL; 5975 } 5976 5977 static int 5978 iwn_clock_wait(struct iwn_softc *sc) 5979 { 5980 int ntries; 5981 5982 /* Set "initialization complete" bit. */ 5983 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 5984 5985 /* Wait for clock stabilization. */ 5986 for (ntries = 0; ntries < 2500; ntries++) { 5987 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY) 5988 return 0; 5989 DELAY(10); 5990 } 5991 aprint_error_dev(sc->sc_dev, 5992 "timeout waiting for clock stabilization\n"); 5993 return ETIMEDOUT; 5994 } 5995 5996 static int 5997 iwn_apm_init(struct iwn_softc *sc) 5998 { 5999 pcireg_t reg; 6000 int error; 6001 6002 /* Disable L0s exit timer (NMI bug workaround). */ 6003 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER); 6004 /* Don't wait for ICH L0s (ICH bug workaround). */ 6005 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX); 6006 6007 /* Set FH wait threshold to max (HW bug under stress workaround). */ 6008 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000); 6009 6010 /* Enable HAP INTA to move adapter from L1a to L0s. */ 6011 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A); 6012 6013 /* Retrieve PCIe Active State Power Management (ASPM). */ 6014 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 6015 sc->sc_cap_off + PCIE_LCSR); 6016 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 6017 if (reg & PCIE_LCSR_ASPM_L1) /* L1 Entry enabled. */ 6018 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 6019 else 6020 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 6021 6022 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 6023 sc->hw_type <= IWN_HW_REV_TYPE_1000) 6024 IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT); 6025 6026 /* Wait for clock stabilization before accessing prph. */ 6027 if ((error = iwn_clock_wait(sc)) != 0) 6028 return error; 6029 6030 if ((error = iwn_nic_lock(sc)) != 0) 6031 return error; 6032 if (sc->hw_type == IWN_HW_REV_TYPE_4965) { 6033 /* Enable DMA and BSM (Bootstrap State Machine). */ 6034 iwn_prph_write(sc, IWN_APMG_CLK_EN, 6035 IWN_APMG_CLK_CTRL_DMA_CLK_RQT | 6036 IWN_APMG_CLK_CTRL_BSM_CLK_RQT); 6037 } else { 6038 /* Enable DMA. */ 6039 iwn_prph_write(sc, IWN_APMG_CLK_EN, 6040 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 6041 } 6042 DELAY(20); 6043 /* Disable L1-Active. */ 6044 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS); 6045 iwn_nic_unlock(sc); 6046 6047 return 0; 6048 } 6049 6050 static void 6051 iwn_apm_stop_master(struct iwn_softc *sc) 6052 { 6053 int ntries; 6054 6055 /* Stop busmaster DMA activity. */ 6056 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER); 6057 for (ntries = 0; ntries < 100; ntries++) { 6058 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED) 6059 return; 6060 DELAY(10); 6061 } 6062 aprint_error_dev(sc->sc_dev, "timeout waiting for master\n"); 6063 } 6064 6065 static void 6066 iwn_apm_stop(struct iwn_softc *sc) 6067 { 6068 iwn_apm_stop_master(sc); 6069 6070 /* Reset the entire device. */ 6071 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW); 6072 DELAY(10); 6073 /* Clear "initialization complete" bit. */ 6074 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 6075 } 6076 6077 static int 6078 iwn4965_nic_config(struct iwn_softc *sc) 6079 { 6080 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) { 6081 /* 6082 * I don't believe this to be correct but this is what the 6083 * vendor driver is doing. Probably the bits should not be 6084 * shifted in IWN_RFCFG_*. 6085 */ 6086 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6087 IWN_RFCFG_TYPE(sc->rfcfg) | 6088 IWN_RFCFG_STEP(sc->rfcfg) | 6089 IWN_RFCFG_DASH(sc->rfcfg)); 6090 } 6091 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6092 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 6093 return 0; 6094 } 6095 6096 static int 6097 iwn5000_nic_config(struct iwn_softc *sc) 6098 { 6099 uint32_t tmp; 6100 int error; 6101 6102 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) { 6103 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6104 IWN_RFCFG_TYPE(sc->rfcfg) | 6105 IWN_RFCFG_STEP(sc->rfcfg) | 6106 IWN_RFCFG_DASH(sc->rfcfg)); 6107 } 6108 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6109 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 6110 6111 if ((error = iwn_nic_lock(sc)) != 0) 6112 return error; 6113 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS); 6114 6115 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 6116 /* 6117 * Select first Switching Voltage Regulator (1.32V) to 6118 * solve a stability issue related to noisy DC2DC line 6119 * in the silicon of 1000 Series. 6120 */ 6121 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR); 6122 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK; 6123 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32; 6124 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp); 6125 } 6126 iwn_nic_unlock(sc); 6127 6128 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) { 6129 /* Use internal power amplifier only. */ 6130 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA); 6131 } 6132 if ((sc->hw_type == IWN_HW_REV_TYPE_6050 || 6133 sc->hw_type == IWN_HW_REV_TYPE_6005) && sc->calib_ver >= 6) { 6134 /* Indicate that ROM calibration version is >=6. */ 6135 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6); 6136 } 6137 if (sc->hw_type == IWN_HW_REV_TYPE_6005) 6138 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_6050_1X2); 6139 if (sc->hw_type == IWN_HW_REV_TYPE_2030 || 6140 sc->hw_type == IWN_HW_REV_TYPE_2000 || 6141 sc->hw_type == IWN_HW_REV_TYPE_135 || 6142 sc->hw_type == IWN_HW_REV_TYPE_105) 6143 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_IQ_INVERT); 6144 return 0; 6145 } 6146 6147 /* 6148 * Take NIC ownership over Intel Active Management Technology (AMT). 6149 */ 6150 static int 6151 iwn_hw_prepare(struct iwn_softc *sc) 6152 { 6153 int ntries; 6154 6155 /* Check if hardware is ready. */ 6156 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 6157 for (ntries = 0; ntries < 5; ntries++) { 6158 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 6159 IWN_HW_IF_CONFIG_NIC_READY) 6160 return 0; 6161 DELAY(10); 6162 } 6163 6164 /* Hardware not ready, force into ready state. */ 6165 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE); 6166 for (ntries = 0; ntries < 15000; ntries++) { 6167 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) & 6168 IWN_HW_IF_CONFIG_PREPARE_DONE)) 6169 break; 6170 DELAY(10); 6171 } 6172 if (ntries == 15000) 6173 return ETIMEDOUT; 6174 6175 /* Hardware should be ready now. */ 6176 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 6177 for (ntries = 0; ntries < 5; ntries++) { 6178 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 6179 IWN_HW_IF_CONFIG_NIC_READY) 6180 return 0; 6181 DELAY(10); 6182 } 6183 return ETIMEDOUT; 6184 } 6185 6186 static int 6187 iwn_hw_init(struct iwn_softc *sc) 6188 { 6189 struct iwn_ops *ops = &sc->ops; 6190 int error, chnl, qid; 6191 6192 /* Clear pending interrupts. */ 6193 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6194 6195 if ((error = iwn_apm_init(sc)) != 0) { 6196 aprint_error_dev(sc->sc_dev, 6197 "could not power ON adapter\n"); 6198 return error; 6199 } 6200 6201 /* Select VMAIN power source. */ 6202 if ((error = iwn_nic_lock(sc)) != 0) 6203 return error; 6204 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK); 6205 iwn_nic_unlock(sc); 6206 6207 /* Perform adapter-specific initialization. */ 6208 if ((error = ops->nic_config(sc)) != 0) 6209 return error; 6210 6211 /* Initialize RX ring. */ 6212 if ((error = iwn_nic_lock(sc)) != 0) 6213 return error; 6214 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 6215 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0); 6216 /* Set physical address of RX ring (256-byte aligned). */ 6217 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8); 6218 /* Set physical address of RX status (16-byte aligned). */ 6219 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4); 6220 /* Enable RX. */ 6221 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 6222 IWN_FH_RX_CONFIG_ENA | 6223 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */ 6224 IWN_FH_RX_CONFIG_IRQ_DST_HOST | 6225 IWN_FH_RX_CONFIG_SINGLE_FRAME | 6226 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) | 6227 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG)); 6228 iwn_nic_unlock(sc); 6229 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7); 6230 6231 if ((error = iwn_nic_lock(sc)) != 0) 6232 return error; 6233 6234 /* Initialize TX scheduler. */ 6235 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 6236 6237 /* Set physical address of "keep warm" page (16-byte aligned). */ 6238 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4); 6239 6240 /* Initialize TX rings. */ 6241 for (qid = 0; qid < sc->ntxqs; qid++) { 6242 struct iwn_tx_ring *txq = &sc->txq[qid]; 6243 6244 /* Set physical address of TX ring (256-byte aligned). */ 6245 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid), 6246 txq->desc_dma.paddr >> 8); 6247 } 6248 iwn_nic_unlock(sc); 6249 6250 /* Enable DMA channels. */ 6251 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 6252 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 6253 IWN_FH_TX_CONFIG_DMA_ENA | 6254 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA); 6255 } 6256 6257 /* Clear "radio off" and "commands blocked" bits. */ 6258 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 6259 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED); 6260 6261 /* Clear pending interrupts. */ 6262 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6263 /* Enable interrupt coalescing. */ 6264 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8); 6265 /* Enable interrupts. */ 6266 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 6267 6268 /* _Really_ make sure "radio off" bit is cleared! */ 6269 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 6270 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 6271 6272 /* Enable shadow registers. */ 6273 if (sc->hw_type >= IWN_HW_REV_TYPE_6000) 6274 IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff); 6275 6276 if ((error = ops->load_firmware(sc)) != 0) { 6277 aprint_error_dev(sc->sc_dev, 6278 "could not load firmware\n"); 6279 return error; 6280 } 6281 /* Wait at most one second for firmware alive notification. */ 6282 if ((error = tsleep(sc, PCATCH, "iwninit", hz)) != 0) { 6283 aprint_error_dev(sc->sc_dev, 6284 "timeout waiting for adapter to initialize\n"); 6285 return error; 6286 } 6287 /* Do post-firmware initialization. */ 6288 return ops->post_alive(sc); 6289 } 6290 6291 static void 6292 iwn_hw_stop(struct iwn_softc *sc) 6293 { 6294 int chnl, qid, ntries; 6295 6296 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO); 6297 6298 /* Disable interrupts. */ 6299 IWN_WRITE(sc, IWN_INT_MASK, 0); 6300 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6301 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff); 6302 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 6303 6304 /* Make sure we no longer hold the NIC lock. */ 6305 iwn_nic_unlock(sc); 6306 6307 /* Stop TX scheduler. */ 6308 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 6309 6310 /* Stop all DMA channels. */ 6311 if (iwn_nic_lock(sc) == 0) { 6312 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 6313 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0); 6314 for (ntries = 0; ntries < 200; ntries++) { 6315 if (IWN_READ(sc, IWN_FH_TX_STATUS) & 6316 IWN_FH_TX_STATUS_IDLE(chnl)) 6317 break; 6318 DELAY(10); 6319 } 6320 } 6321 iwn_nic_unlock(sc); 6322 } 6323 6324 /* Stop RX ring. */ 6325 iwn_reset_rx_ring(sc, &sc->rxq); 6326 6327 /* Reset all TX rings. */ 6328 for (qid = 0; qid < sc->ntxqs; qid++) 6329 iwn_reset_tx_ring(sc, &sc->txq[qid]); 6330 6331 if (iwn_nic_lock(sc) == 0) { 6332 iwn_prph_write(sc, IWN_APMG_CLK_DIS, 6333 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 6334 iwn_nic_unlock(sc); 6335 } 6336 DELAY(5); 6337 /* Power OFF adapter. */ 6338 iwn_apm_stop(sc); 6339 } 6340 6341 static int 6342 iwn_init(struct ifnet *ifp) 6343 { 6344 struct iwn_softc *sc = ifp->if_softc; 6345 struct ieee80211com *ic = &sc->sc_ic; 6346 int error; 6347 6348 mutex_enter(&sc->sc_mtx); 6349 if (sc->sc_flags & IWN_FLAG_HW_INITED) 6350 goto out; 6351 if ((error = iwn_hw_prepare(sc)) != 0) { 6352 aprint_error_dev(sc->sc_dev, 6353 "hardware not ready\n"); 6354 goto fail; 6355 } 6356 6357 /* Check that the radio is not disabled by hardware switch. */ 6358 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) { 6359 aprint_error_dev(sc->sc_dev, 6360 "radio is disabled by hardware switch\n"); 6361 error = EPERM; /* :-) */ 6362 goto fail; 6363 } 6364 6365 /* Read firmware images from the filesystem. */ 6366 if ((error = iwn_read_firmware(sc)) != 0) { 6367 aprint_error_dev(sc->sc_dev, 6368 "could not read firmware\n"); 6369 goto fail; 6370 } 6371 6372 /* Initialize interrupt mask to default value. */ 6373 sc->int_mask = IWN_INT_MASK_DEF; 6374 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 6375 6376 /* Initialize hardware and upload firmware. */ 6377 KASSERT(sc->fw.data != NULL && sc->fw.size > 0); 6378 error = iwn_hw_init(sc); 6379 firmware_free(sc->fw.data, sc->fw.size); 6380 sc->fw.data = NULL; 6381 sc->fw.size = 0; 6382 if (error != 0) { 6383 aprint_error_dev(sc->sc_dev, 6384 "could not initialize hardware\n"); 6385 goto fail; 6386 } 6387 6388 /* Configure adapter now that it is ready. */ 6389 if ((error = iwn_config(sc)) != 0) { 6390 aprint_error_dev(sc->sc_dev, 6391 "could not configure device\n"); 6392 goto fail; 6393 } 6394 6395 sc->sc_beacon_wait = 0; 6396 6397 ifp->if_flags &= ~IFF_OACTIVE; 6398 ifp->if_flags |= IFF_RUNNING; 6399 6400 if (ic->ic_opmode != IEEE80211_M_MONITOR) 6401 ieee80211_begin_scan(ic, 0); 6402 else 6403 ieee80211_new_state(ic, IEEE80211_S_RUN, -1); 6404 6405 sc->sc_flags |= IWN_FLAG_HW_INITED; 6406 out: 6407 mutex_exit(&sc->sc_mtx); 6408 return 0; 6409 6410 fail: mutex_exit(&sc->sc_mtx); 6411 iwn_stop(ifp, 1); 6412 return error; 6413 } 6414 6415 static void 6416 iwn_stop(struct ifnet *ifp, int disable) 6417 { 6418 struct iwn_softc *sc = ifp->if_softc; 6419 struct ieee80211com *ic = &sc->sc_ic; 6420 6421 if (!disable) 6422 mutex_enter(&sc->sc_mtx); 6423 sc->sc_flags &= ~IWN_FLAG_HW_INITED; 6424 ifp->if_timer = sc->sc_tx_timer = 0; 6425 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 6426 6427 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 6428 6429 /* Power OFF hardware. */ 6430 iwn_hw_stop(sc); 6431 6432 if (!disable) 6433 mutex_exit(&sc->sc_mtx); 6434 } 6435 6436 /* 6437 * XXX MCLGETI alternative 6438 * 6439 * With IWN_USE_RBUF defined it uses the rbuf cache for receive buffers 6440 * as long as there are available free buffers then it uses MEXTMALLOC., 6441 * Without IWN_USE_RBUF defined it uses MEXTMALLOC exclusively. 6442 * The MCLGET4K code is used for testing an alternative mbuf cache. 6443 */ 6444 6445 static struct mbuf * 6446 MCLGETIalt(struct iwn_softc *sc, int how, 6447 struct ifnet *ifp __unused, u_int size) 6448 { 6449 struct mbuf *m; 6450 #ifdef IWN_USE_RBUF 6451 struct iwn_rbuf *rbuf; 6452 #endif 6453 6454 MGETHDR(m, how, MT_DATA); 6455 if (m == NULL) 6456 return NULL; 6457 6458 #ifdef IWN_USE_RBUF 6459 if (sc->rxq.nb_free_entries > 0 && 6460 (rbuf = iwn_alloc_rbuf(sc)) != NULL) { 6461 /* Attach buffer to mbuf header. */ 6462 MEXTADD(m, rbuf->vaddr, size, 0, iwn_free_rbuf, rbuf); 6463 m->m_flags |= M_EXT_RW; 6464 } 6465 else { 6466 MEXTMALLOC(m, size, how); 6467 if ((m->m_flags & M_EXT) == 0) { 6468 m_freem(m); 6469 return NULL; 6470 } 6471 } 6472 6473 #else 6474 #ifdef MCLGET4K 6475 if (size == 4096) 6476 MCLGET4K(m, how); 6477 else 6478 panic("size must be 4k"); 6479 #else 6480 MEXTMALLOC(m, size, how); 6481 #endif 6482 if ((m->m_flags & M_EXT) == 0) { 6483 m_freem(m); 6484 return NULL; 6485 } 6486 #endif 6487 6488 return m; 6489 } 6490 6491 #ifdef IWN_USE_RBUF 6492 static struct iwn_rbuf * 6493 iwn_alloc_rbuf(struct iwn_softc *sc) 6494 { 6495 struct iwn_rbuf *rbuf; 6496 mutex_enter(&sc->rxq.freelist_mtx); 6497 6498 rbuf = SLIST_FIRST(&sc->rxq.freelist); 6499 if (rbuf != NULL) { 6500 SLIST_REMOVE_HEAD(&sc->rxq.freelist, next); 6501 sc->rxq.nb_free_entries --; 6502 } 6503 mutex_exit(&sc->rxq.freelist_mtx); 6504 return rbuf; 6505 } 6506 6507 /* 6508 * This is called automatically by the network stack when the mbuf to which 6509 * our RX buffer is attached is freed. 6510 */ 6511 static void 6512 iwn_free_rbuf(struct mbuf* m, void *buf, size_t size, void *arg) 6513 { 6514 struct iwn_rbuf *rbuf = arg; 6515 struct iwn_softc *sc = rbuf->sc; 6516 6517 /* Put the RX buffer back in the free list. */ 6518 mutex_enter(&sc->rxq.freelist_mtx); 6519 SLIST_INSERT_HEAD(&sc->rxq.freelist, rbuf, next); 6520 mutex_exit(&sc->rxq.freelist_mtx); 6521 6522 sc->rxq.nb_free_entries ++; 6523 if (__predict_true(m != NULL)) 6524 pool_cache_put(mb_cache, m); 6525 } 6526 6527 static int 6528 iwn_alloc_rpool(struct iwn_softc *sc) 6529 { 6530 struct iwn_rx_ring *ring = &sc->rxq; 6531 struct iwn_rbuf *rbuf; 6532 int i, error; 6533 6534 mutex_init(&ring->freelist_mtx, MUTEX_DEFAULT, IPL_NET); 6535 6536 /* Allocate a big chunk of DMA'able memory... */ 6537 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->buf_dma, NULL, 6538 IWN_RBUF_COUNT * IWN_RBUF_SIZE, PAGE_SIZE); 6539 if (error != 0) { 6540 aprint_error_dev(sc->sc_dev, 6541 "could not allocate RX buffers DMA memory\n"); 6542 return error; 6543 } 6544 /* ...and split it into chunks of IWN_RBUF_SIZE bytes. */ 6545 SLIST_INIT(&ring->freelist); 6546 for (i = 0; i < IWN_RBUF_COUNT; i++) { 6547 rbuf = &ring->rbuf[i]; 6548 6549 rbuf->sc = sc; /* Backpointer for callbacks. */ 6550 rbuf->vaddr = (void *)((vaddr_t)ring->buf_dma.vaddr + i * IWN_RBUF_SIZE); 6551 rbuf->paddr = ring->buf_dma.paddr + i * IWN_RBUF_SIZE; 6552 6553 SLIST_INSERT_HEAD(&ring->freelist, rbuf, next); 6554 } 6555 ring->nb_free_entries = IWN_RBUF_COUNT; 6556 return 0; 6557 } 6558 6559 static void 6560 iwn_free_rpool(struct iwn_softc *sc) 6561 { 6562 iwn_dma_contig_free(&sc->rxq.buf_dma); 6563 } 6564 #endif 6565 6566 /* 6567 * XXX: Hack to set the current channel to the value advertised in beacons or 6568 * probe responses. Only used during AP detection. 6569 * XXX: Duplicated from if_iwi.c 6570 */ 6571 static void 6572 iwn_fix_channel(struct ieee80211com *ic, struct mbuf *m, 6573 struct iwn_rx_stat *stat) 6574 { 6575 struct iwn_softc *sc = ic->ic_ifp->if_softc; 6576 struct ieee80211_frame *wh; 6577 uint8_t subtype; 6578 uint8_t *frm, *efrm; 6579 6580 wh = mtod(m, struct ieee80211_frame *); 6581 6582 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT) 6583 return; 6584 6585 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 6586 6587 if (subtype != IEEE80211_FC0_SUBTYPE_BEACON && 6588 subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP) 6589 return; 6590 6591 if (sc->sc_flags & IWN_FLAG_SCANNING_5GHZ) { 6592 int chan = le16toh(stat->chan); 6593 if (chan < __arraycount(ic->ic_channels)) 6594 ic->ic_curchan = &ic->ic_channels[chan]; 6595 return; 6596 } 6597 6598 frm = (uint8_t *)(wh + 1); 6599 efrm = mtod(m, uint8_t *) + m->m_len; 6600 6601 frm += 12; /* skip tstamp, bintval and capinfo fields */ 6602 while (frm + 2 < efrm) { 6603 if (*frm == IEEE80211_ELEMID_DSPARMS) { 6604 #if IEEE80211_CHAN_MAX < 255 6605 if (frm[2] <= IEEE80211_CHAN_MAX) 6606 #endif 6607 ic->ic_curchan = &ic->ic_channels[frm[2]]; 6608 } 6609 6610 frm += frm[1] + 2; 6611 } 6612 } 6613 6614 #ifdef notyetMODULE 6615 6616 MODULE(MODULE_CLASS_DRIVER, if_iwn, "pci"); 6617 6618 #ifdef _MODULE 6619 #include "ioconf.c" 6620 #endif 6621 6622 static int 6623 if_iwn_modcmd(modcmd_t cmd, void *data) 6624 { 6625 int error = 0; 6626 6627 switch (cmd) { 6628 case MODULE_CMD_INIT: 6629 #ifdef _MODULE 6630 error = config_init_component(cfdriver_ioconf_if_iwn, 6631 cfattach_ioconf_if_iwn, cfdata_ioconf_if_iwn); 6632 #endif 6633 return error; 6634 case MODULE_CMD_FINI: 6635 #ifdef _MODULE 6636 error = config_fini_component(cfdriver_ioconf_if_iwn, 6637 cfattach_ioconf_if_iwn, cfdata_ioconf_if_iwn); 6638 #endif 6639 return error; 6640 case MODULE_CMD_AUTOUNLOAD: 6641 #ifdef _MODULE 6642 /* XXX This is not optional! */ 6643 #endif 6644 return error; 6645 default: 6646 return ENOTTY; 6647 } 6648 } 6649 #endif 6650