1 /* $NetBSD: if_iwn.c,v 1.92 2019/10/10 22:34:42 bad Exp $ */ 2 /* $OpenBSD: if_iwn.c,v 1.135 2014/09/10 07:22:09 dcoppa Exp $ */ 3 4 /*- 5 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /* 21 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network 22 * adapters. 23 */ 24 #include <sys/cdefs.h> 25 __KERNEL_RCSID(0, "$NetBSD: if_iwn.c,v 1.92 2019/10/10 22:34:42 bad Exp $"); 26 27 #define IWN_USE_RBUF /* Use local storage for RX */ 28 #undef IWN_HWCRYPTO /* XXX does not even compile yet */ 29 30 #include <sys/param.h> 31 #include <sys/sockio.h> 32 #include <sys/proc.h> 33 #include <sys/mbuf.h> 34 #include <sys/kernel.h> 35 #include <sys/socket.h> 36 #include <sys/systm.h> 37 #include <sys/malloc.h> 38 #ifdef notyetMODULE 39 #include <sys/module.h> 40 #endif 41 #include <sys/mutex.h> 42 #include <sys/conf.h> 43 #include <sys/kauth.h> 44 #include <sys/callout.h> 45 46 #include <dev/sysmon/sysmonvar.h> 47 48 #include <sys/bus.h> 49 #include <machine/endian.h> 50 #include <sys/intr.h> 51 52 #include <dev/pci/pcireg.h> 53 #include <dev/pci/pcivar.h> 54 #include <dev/pci/pcidevs.h> 55 56 #include <net/bpf.h> 57 #include <net/if.h> 58 #include <net/if_arp.h> 59 #include <net/if_dl.h> 60 #include <net/if_media.h> 61 #include <net/if_types.h> 62 63 #include <netinet/in.h> 64 #include <netinet/in_systm.h> 65 #include <netinet/in_var.h> 66 #include <net/if_ether.h> 67 #include <netinet/ip.h> 68 69 #include <net80211/ieee80211_var.h> 70 #include <net80211/ieee80211_amrr.h> 71 #include <net80211/ieee80211_radiotap.h> 72 73 #include <dev/firmload.h> 74 75 #include <dev/pci/if_iwnreg.h> 76 #include <dev/pci/if_iwnvar.h> 77 78 static const pci_product_id_t iwn_devices[] = { 79 PCI_PRODUCT_INTEL_WIFI_LINK_1030_1, 80 PCI_PRODUCT_INTEL_WIFI_LINK_1030_2, 81 PCI_PRODUCT_INTEL_WIFI_LINK_4965_1, 82 PCI_PRODUCT_INTEL_WIFI_LINK_4965_2, 83 PCI_PRODUCT_INTEL_WIFI_LINK_4965_3, 84 PCI_PRODUCT_INTEL_WIFI_LINK_4965_4, 85 PCI_PRODUCT_INTEL_WIFI_LINK_5100_1, 86 PCI_PRODUCT_INTEL_WIFI_LINK_5100_2, 87 PCI_PRODUCT_INTEL_WIFI_LINK_5150_1, 88 PCI_PRODUCT_INTEL_WIFI_LINK_5150_2, 89 PCI_PRODUCT_INTEL_WIFI_LINK_5300_1, 90 PCI_PRODUCT_INTEL_WIFI_LINK_5300_2, 91 PCI_PRODUCT_INTEL_WIFI_LINK_5350_1, 92 PCI_PRODUCT_INTEL_WIFI_LINK_5350_2, 93 PCI_PRODUCT_INTEL_WIFI_LINK_1000_1, 94 PCI_PRODUCT_INTEL_WIFI_LINK_1000_2, 95 PCI_PRODUCT_INTEL_WIFI_LINK_6000_3X3_1, 96 PCI_PRODUCT_INTEL_WIFI_LINK_6000_3X3_2, 97 PCI_PRODUCT_INTEL_WIFI_LINK_6000_IPA_1, 98 PCI_PRODUCT_INTEL_WIFI_LINK_6000_IPA_2, 99 PCI_PRODUCT_INTEL_WIFI_LINK_6050_2X2_1, 100 PCI_PRODUCT_INTEL_WIFI_LINK_6050_2X2_2, 101 PCI_PRODUCT_INTEL_WIFI_LINK_6005_2X2_1, 102 PCI_PRODUCT_INTEL_WIFI_LINK_6005_2X2_2, 103 PCI_PRODUCT_INTEL_WIFI_LINK_6230_1, 104 PCI_PRODUCT_INTEL_WIFI_LINK_6230_2, 105 PCI_PRODUCT_INTEL_WIFI_LINK_6235, 106 PCI_PRODUCT_INTEL_WIFI_LINK_6235_2, 107 PCI_PRODUCT_INTEL_WIFI_LINK_100_1, 108 PCI_PRODUCT_INTEL_WIFI_LINK_100_2, 109 PCI_PRODUCT_INTEL_WIFI_LINK_130_1, 110 PCI_PRODUCT_INTEL_WIFI_LINK_130_2, 111 PCI_PRODUCT_INTEL_WIFI_LINK_2230_1, 112 PCI_PRODUCT_INTEL_WIFI_LINK_2230_2, 113 PCI_PRODUCT_INTEL_WIFI_LINK_2200_1, 114 PCI_PRODUCT_INTEL_WIFI_LINK_2200_2, 115 PCI_PRODUCT_INTEL_WIFI_LINK_135_1, 116 PCI_PRODUCT_INTEL_WIFI_LINK_135_2, 117 PCI_PRODUCT_INTEL_WIFI_LINK_105_1, 118 PCI_PRODUCT_INTEL_WIFI_LINK_105_2, 119 }; 120 121 static int iwn_match(device_t , struct cfdata *, void *); 122 static void iwn_attach(device_t , device_t , void *); 123 static int iwn4965_attach(struct iwn_softc *, pci_product_id_t); 124 static int iwn5000_attach(struct iwn_softc *, pci_product_id_t); 125 static void iwn_radiotap_attach(struct iwn_softc *); 126 static int iwn_detach(device_t , int); 127 #if 0 128 static void iwn_power(int, void *); 129 #endif 130 static bool iwn_resume(device_t, const pmf_qual_t *); 131 static int iwn_nic_lock(struct iwn_softc *); 132 static int iwn_eeprom_lock(struct iwn_softc *); 133 static int iwn_init_otprom(struct iwn_softc *); 134 static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int); 135 static int iwn_dma_contig_alloc(bus_dma_tag_t, struct iwn_dma_info *, 136 void **, bus_size_t, bus_size_t); 137 static void iwn_dma_contig_free(struct iwn_dma_info *); 138 static int iwn_alloc_sched(struct iwn_softc *); 139 static void iwn_free_sched(struct iwn_softc *); 140 static int iwn_alloc_kw(struct iwn_softc *); 141 static void iwn_free_kw(struct iwn_softc *); 142 static int iwn_alloc_ict(struct iwn_softc *); 143 static void iwn_free_ict(struct iwn_softc *); 144 static int iwn_alloc_fwmem(struct iwn_softc *); 145 static void iwn_free_fwmem(struct iwn_softc *); 146 static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 147 static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 148 static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 149 static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *, 150 int); 151 static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 152 static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 153 static void iwn5000_ict_reset(struct iwn_softc *); 154 static int iwn_read_eeprom(struct iwn_softc *); 155 static void iwn4965_read_eeprom(struct iwn_softc *); 156 157 #ifdef IWN_DEBUG 158 static void iwn4965_print_power_group(struct iwn_softc *, int); 159 #endif 160 static void iwn5000_read_eeprom(struct iwn_softc *); 161 static void iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t); 162 static void iwn_read_eeprom_enhinfo(struct iwn_softc *); 163 static struct ieee80211_node *iwn_node_alloc(struct ieee80211_node_table *); 164 static void iwn_newassoc(struct ieee80211_node *, int); 165 static int iwn_media_change(struct ifnet *); 166 static int iwn_newstate(struct ieee80211com *, enum ieee80211_state, int); 167 static void iwn_iter_func(void *, struct ieee80211_node *); 168 static void iwn_calib_timeout(void *); 169 static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *, 170 struct iwn_rx_data *); 171 static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *, 172 struct iwn_rx_data *); 173 #ifndef IEEE80211_NO_HT 174 static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *, 175 struct iwn_rx_data *); 176 #endif 177 static void iwn5000_rx_calib_results(struct iwn_softc *, 178 struct iwn_rx_desc *, struct iwn_rx_data *); 179 static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *, 180 struct iwn_rx_data *); 181 static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 182 struct iwn_rx_data *); 183 static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 184 struct iwn_rx_data *); 185 static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int, 186 uint8_t); 187 static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *); 188 static void iwn_notif_intr(struct iwn_softc *); 189 static void iwn_wakeup_intr(struct iwn_softc *); 190 static void iwn_fatal_intr(struct iwn_softc *); 191 static int iwn_intr(void *); 192 static void iwn_softintr(void *); 193 static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t, 194 uint16_t); 195 static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t, 196 uint16_t); 197 #ifdef notyet 198 static void iwn5000_reset_sched(struct iwn_softc *, int, int); 199 #endif 200 static int iwn_tx(struct iwn_softc *, struct mbuf *, 201 struct ieee80211_node *, int); 202 static void iwn_start(struct ifnet *); 203 static void iwn_watchdog(struct ifnet *); 204 static int iwn_ioctl(struct ifnet *, u_long, void *); 205 static int iwn_cmd(struct iwn_softc *, int, const void *, int, int); 206 static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *, 207 int); 208 static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *, 209 int); 210 static int iwn_set_link_quality(struct iwn_softc *, 211 struct ieee80211_node *); 212 static int iwn_add_broadcast_node(struct iwn_softc *, int); 213 static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t); 214 static int iwn_set_critical_temp(struct iwn_softc *); 215 static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *); 216 static void iwn4965_power_calibration(struct iwn_softc *, int); 217 static int iwn4965_set_txpower(struct iwn_softc *, int); 218 static int iwn5000_set_txpower(struct iwn_softc *, int); 219 static int iwn4965_get_rssi(const struct iwn_rx_stat *); 220 static int iwn5000_get_rssi(const struct iwn_rx_stat *); 221 static int iwn_get_noise(const struct iwn_rx_general_stats *); 222 static int iwn4965_get_temperature(struct iwn_softc *); 223 static int iwn5000_get_temperature(struct iwn_softc *); 224 static int iwn_init_sensitivity(struct iwn_softc *); 225 static void iwn_collect_noise(struct iwn_softc *, 226 const struct iwn_rx_general_stats *); 227 static int iwn4965_init_gains(struct iwn_softc *); 228 static int iwn5000_init_gains(struct iwn_softc *); 229 static int iwn4965_set_gains(struct iwn_softc *); 230 static int iwn5000_set_gains(struct iwn_softc *); 231 static void iwn_tune_sensitivity(struct iwn_softc *, 232 const struct iwn_rx_stats *); 233 static int iwn_send_sensitivity(struct iwn_softc *); 234 static int iwn_set_pslevel(struct iwn_softc *, int, int, int); 235 static int iwn5000_runtime_calib(struct iwn_softc *); 236 237 static int iwn_config_bt_coex_bluetooth(struct iwn_softc *); 238 static int iwn_config_bt_coex_prio_table(struct iwn_softc *); 239 static int iwn_config_bt_coex_adv1(struct iwn_softc *); 240 static int iwn_config_bt_coex_adv2(struct iwn_softc *); 241 242 static int iwn_config(struct iwn_softc *); 243 static uint16_t iwn_get_active_dwell_time(struct iwn_softc *, uint16_t, 244 uint8_t); 245 static uint16_t iwn_limit_dwell(struct iwn_softc *, uint16_t); 246 static uint16_t iwn_get_passive_dwell_time(struct iwn_softc *, uint16_t); 247 static int iwn_scan(struct iwn_softc *, uint16_t); 248 static int iwn_auth(struct iwn_softc *); 249 static int iwn_run(struct iwn_softc *); 250 #ifdef IWN_HWCRYPTO 251 static int iwn_set_key(struct ieee80211com *, struct ieee80211_node *, 252 struct ieee80211_key *); 253 static void iwn_delete_key(struct ieee80211com *, struct ieee80211_node *, 254 struct ieee80211_key *); 255 #endif 256 static int iwn_wme_update(struct ieee80211com *); 257 #ifndef IEEE80211_NO_HT 258 static int iwn_ampdu_rx_start(struct ieee80211com *, 259 struct ieee80211_node *, uint8_t); 260 static void iwn_ampdu_rx_stop(struct ieee80211com *, 261 struct ieee80211_node *, uint8_t); 262 static int iwn_ampdu_tx_start(struct ieee80211com *, 263 struct ieee80211_node *, uint8_t); 264 static void iwn_ampdu_tx_stop(struct ieee80211com *, 265 struct ieee80211_node *, uint8_t); 266 static void iwn4965_ampdu_tx_start(struct iwn_softc *, 267 struct ieee80211_node *, uint8_t, uint16_t); 268 static void iwn4965_ampdu_tx_stop(struct iwn_softc *, 269 uint8_t, uint16_t); 270 static void iwn5000_ampdu_tx_start(struct iwn_softc *, 271 struct ieee80211_node *, uint8_t, uint16_t); 272 static void iwn5000_ampdu_tx_stop(struct iwn_softc *, 273 uint8_t, uint16_t); 274 #endif 275 static int iwn5000_query_calibration(struct iwn_softc *); 276 static int iwn5000_send_calibration(struct iwn_softc *); 277 static int iwn5000_send_wimax_coex(struct iwn_softc *); 278 static int iwn6000_temp_offset_calib(struct iwn_softc *); 279 static int iwn2000_temp_offset_calib(struct iwn_softc *); 280 static int iwn4965_post_alive(struct iwn_softc *); 281 static int iwn5000_post_alive(struct iwn_softc *); 282 static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *, 283 int); 284 static int iwn4965_load_firmware(struct iwn_softc *); 285 static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t, 286 const uint8_t *, int); 287 static int iwn5000_load_firmware(struct iwn_softc *); 288 static int iwn_read_firmware_leg(struct iwn_softc *, 289 struct iwn_fw_info *); 290 static int iwn_read_firmware_tlv(struct iwn_softc *, 291 struct iwn_fw_info *, uint16_t); 292 static int iwn_read_firmware(struct iwn_softc *); 293 static int iwn_clock_wait(struct iwn_softc *); 294 static int iwn_apm_init(struct iwn_softc *); 295 static void iwn_apm_stop_master(struct iwn_softc *); 296 static void iwn_apm_stop(struct iwn_softc *); 297 static int iwn4965_nic_config(struct iwn_softc *); 298 static int iwn5000_nic_config(struct iwn_softc *); 299 static int iwn_hw_prepare(struct iwn_softc *); 300 static int iwn_hw_init(struct iwn_softc *); 301 static void iwn_hw_stop(struct iwn_softc *); 302 static int iwn_init(struct ifnet *); 303 static void iwn_stop(struct ifnet *, int); 304 305 /* XXX MCLGETI alternative */ 306 static struct mbuf *MCLGETIalt(struct iwn_softc *, int, 307 struct ifnet *, u_int); 308 #ifdef IWN_USE_RBUF 309 static struct iwn_rbuf *iwn_alloc_rbuf(struct iwn_softc *); 310 static void iwn_free_rbuf(struct mbuf *, void *, size_t, void *); 311 static int iwn_alloc_rpool(struct iwn_softc *); 312 static void iwn_free_rpool(struct iwn_softc *); 313 #endif 314 315 static void iwn_fix_channel(struct ieee80211com *, struct mbuf *, 316 struct iwn_rx_stat *); 317 318 #ifdef IWN_DEBUG 319 #define DPRINTF(x) do { if (iwn_debug > 0) printf x; } while (0) 320 #define DPRINTFN(n, x) do { if (iwn_debug >= (n)) printf x; } while (0) 321 int iwn_debug = 0; 322 #else 323 #define DPRINTF(x) 324 #define DPRINTFN(n, x) 325 #endif 326 327 CFATTACH_DECL_NEW(iwn, sizeof(struct iwn_softc), iwn_match, iwn_attach, 328 iwn_detach, NULL); 329 330 static int 331 iwn_match(device_t parent, cfdata_t match __unused, void *aux) 332 { 333 struct pci_attach_args *pa = aux; 334 size_t i; 335 336 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL) 337 return 0; 338 339 for (i = 0; i < __arraycount(iwn_devices); i++) 340 if (PCI_PRODUCT(pa->pa_id) == iwn_devices[i]) 341 return 1; 342 343 return 0; 344 } 345 346 static void 347 iwn_attach(device_t parent __unused, device_t self, void *aux) 348 { 349 struct iwn_softc *sc = device_private(self); 350 struct ieee80211com *ic = &sc->sc_ic; 351 struct ifnet *ifp = &sc->sc_ec.ec_if; 352 struct pci_attach_args *pa = aux; 353 const char *intrstr; 354 pcireg_t memtype, reg; 355 int i, error; 356 char intrbuf[PCI_INTRSTR_LEN]; 357 358 sc->sc_dev = self; 359 sc->sc_pct = pa->pa_pc; 360 sc->sc_pcitag = pa->pa_tag; 361 sc->sc_dmat = pa->pa_dmat; 362 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE); 363 364 callout_init(&sc->calib_to, 0); 365 callout_setfunc(&sc->calib_to, iwn_calib_timeout, sc); 366 367 pci_aprint_devinfo(pa, NULL); 368 369 /* 370 * Get the offset of the PCI Express Capability Structure in PCI 371 * Configuration Space. 372 */ 373 error = pci_get_capability(sc->sc_pct, sc->sc_pcitag, 374 PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL); 375 if (error == 0) { 376 aprint_error_dev(self, 377 "PCIe capability structure not found!\n"); 378 return; 379 } 380 381 /* Clear device-specific "PCI retry timeout" register (41h). */ 382 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40); 383 if (reg & 0xff00) 384 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00); 385 386 /* Enable bus-mastering. */ 387 /* XXX verify the bus-mastering is really needed (not in OpenBSD) */ 388 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 389 reg |= PCI_COMMAND_MASTER_ENABLE; 390 pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg); 391 392 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IWN_PCI_BAR0); 393 error = pci_mapreg_map(pa, IWN_PCI_BAR0, memtype, 0, &sc->sc_st, 394 &sc->sc_sh, NULL, &sc->sc_sz); 395 if (error != 0) { 396 aprint_error_dev(self, "can't map mem space\n"); 397 return; 398 } 399 400 sc->sc_soft_ih = softint_establish(SOFTINT_NET, iwn_softintr, sc); 401 if (sc->sc_soft_ih == NULL) { 402 aprint_error_dev(self, "can't establish soft interrupt\n"); 403 goto unmap; 404 } 405 406 /* Install interrupt handler. */ 407 error = pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0); 408 if (error) { 409 aprint_error_dev(self, "can't allocate interrupt\n"); 410 goto failsi; 411 } 412 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 413 if (pci_intr_type(sc->sc_pct, sc->sc_pihp[0]) == PCI_INTR_TYPE_INTX) 414 CLR(reg, PCI_COMMAND_INTERRUPT_DISABLE); 415 else 416 SET(reg, PCI_COMMAND_INTERRUPT_DISABLE); 417 pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg); 418 intrstr = pci_intr_string(sc->sc_pct, sc->sc_pihp[0], intrbuf, 419 sizeof(intrbuf)); 420 sc->sc_ih = pci_intr_establish_xname(sc->sc_pct, sc->sc_pihp[0], 421 IPL_NET, iwn_intr, sc, device_xname(self)); 422 if (sc->sc_ih == NULL) { 423 aprint_error_dev(self, "can't establish interrupt"); 424 if (intrstr != NULL) 425 aprint_error(" at %s", intrstr); 426 aprint_error("\n"); 427 goto failia; 428 } 429 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 430 431 /* Read hardware revision and attach. */ 432 sc->hw_type = 433 (IWN_READ(sc, IWN_HW_REV) & IWN_HW_REV_TYPE_MASK) 434 >> IWN_HW_REV_TYPE_SHIFT; 435 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 436 error = iwn4965_attach(sc, PCI_PRODUCT(pa->pa_id)); 437 else 438 error = iwn5000_attach(sc, PCI_PRODUCT(pa->pa_id)); 439 if (error != 0) { 440 aprint_error_dev(self, "could not attach device\n"); 441 goto failih; 442 } 443 444 if ((error = iwn_hw_prepare(sc)) != 0) { 445 aprint_error_dev(self, "hardware not ready\n"); 446 goto failih; 447 } 448 449 /* Read MAC address, channels, etc from EEPROM. */ 450 if ((error = iwn_read_eeprom(sc)) != 0) { 451 aprint_error_dev(self, "could not read EEPROM\n"); 452 goto failih; 453 } 454 455 /* Allocate DMA memory for firmware transfers. */ 456 if ((error = iwn_alloc_fwmem(sc)) != 0) { 457 aprint_error_dev(self, 458 "could not allocate memory for firmware\n"); 459 goto failih; 460 } 461 462 /* Allocate "Keep Warm" page. */ 463 if ((error = iwn_alloc_kw(sc)) != 0) { 464 aprint_error_dev(self, "could not allocate keep warm page\n"); 465 goto fail1; 466 } 467 468 /* Allocate ICT table for 5000 Series. */ 469 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 470 (error = iwn_alloc_ict(sc)) != 0) { 471 aprint_error_dev(self, "could not allocate ICT table\n"); 472 goto fail2; 473 } 474 475 /* Allocate TX scheduler "rings". */ 476 if ((error = iwn_alloc_sched(sc)) != 0) { 477 aprint_error_dev(self, 478 "could not allocate TX scheduler rings\n"); 479 goto fail3; 480 } 481 482 #ifdef IWN_USE_RBUF 483 /* Allocate RX buffers. */ 484 if ((error = iwn_alloc_rpool(sc)) != 0) { 485 aprint_error_dev(self, "could not allocate RX buffers\n"); 486 goto fail3; 487 } 488 #endif 489 490 /* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */ 491 for (i = 0; i < sc->ntxqs; i++) { 492 if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 493 aprint_error_dev(self, 494 "could not allocate TX ring %d\n", i); 495 goto fail4; 496 } 497 } 498 499 /* Allocate RX ring. */ 500 if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) { 501 aprint_error_dev(self, "could not allocate RX ring\n"); 502 goto fail4; 503 } 504 505 /* Clear pending interrupts. */ 506 IWN_WRITE(sc, IWN_INT, 0xffffffff); 507 508 /* Count the number of available chains. */ 509 sc->ntxchains = 510 ((sc->txchainmask >> 2) & 1) + 511 ((sc->txchainmask >> 1) & 1) + 512 ((sc->txchainmask >> 0) & 1); 513 sc->nrxchains = 514 ((sc->rxchainmask >> 2) & 1) + 515 ((sc->rxchainmask >> 1) & 1) + 516 ((sc->rxchainmask >> 0) & 1); 517 aprint_normal_dev(self, "MIMO %dT%dR, %.4s, address %s\n", 518 sc->ntxchains, sc->nrxchains, sc->eeprom_domain, 519 ether_sprintf(ic->ic_myaddr)); 520 521 ic->ic_ifp = ifp; 522 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 523 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 524 ic->ic_state = IEEE80211_S_INIT; 525 526 /* 527 * Set device capabilities. 528 * XXX OpenBSD has IEEE80211_C_WEP, IEEE80211_C_RSN, and 529 * IEEE80211_C_PMGT too. 530 */ 531 ic->ic_caps = 532 IEEE80211_C_IBSS | /* IBSS mode support */ 533 IEEE80211_C_WPA | /* 802.11i */ 534 IEEE80211_C_MONITOR | /* monitor mode supported */ 535 IEEE80211_C_TXPMGT | /* tx power management */ 536 IEEE80211_C_SHSLOT | /* short slot time supported */ 537 IEEE80211_C_SHPREAMBLE | /* short preamble supported */ 538 IEEE80211_C_WME; /* 802.11e */ 539 540 #ifndef IEEE80211_NO_HT 541 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 542 /* Set HT capabilities. */ 543 ic->ic_htcaps = 544 #if IWN_RBUF_SIZE == 8192 545 IEEE80211_HTCAP_AMSDU7935 | 546 #endif 547 IEEE80211_HTCAP_CBW20_40 | 548 IEEE80211_HTCAP_SGI20 | 549 IEEE80211_HTCAP_SGI40; 550 if (sc->hw_type != IWN_HW_REV_TYPE_4965) 551 ic->ic_htcaps |= IEEE80211_HTCAP_GF; 552 if (sc->hw_type == IWN_HW_REV_TYPE_6050) 553 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DYN; 554 else 555 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DIS; 556 } 557 #endif /* !IEEE80211_NO_HT */ 558 559 /* Set supported legacy rates. */ 560 ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b; 561 ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g; 562 if (sc->sc_flags & IWN_FLAG_HAS_5GHZ) { 563 ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a; 564 } 565 #ifndef IEEE80211_NO_HT 566 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 567 /* Set supported HT rates. */ 568 ic->ic_sup_mcs[0] = 0xff; /* MCS 0-7 */ 569 if (sc->nrxchains > 1) 570 ic->ic_sup_mcs[1] = 0xff; /* MCS 7-15 */ 571 if (sc->nrxchains > 2) 572 ic->ic_sup_mcs[2] = 0xff; /* MCS 16-23 */ 573 } 574 #endif 575 576 /* IBSS channel undefined for now. */ 577 ic->ic_ibss_chan = &ic->ic_channels[0]; 578 579 ifp->if_softc = sc; 580 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 581 ifp->if_init = iwn_init; 582 ifp->if_ioctl = iwn_ioctl; 583 ifp->if_start = iwn_start; 584 ifp->if_stop = iwn_stop; 585 ifp->if_watchdog = iwn_watchdog; 586 IFQ_SET_READY(&ifp->if_snd); 587 memcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 588 589 error = if_initialize(ifp); 590 if (error != 0) { 591 aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n", 592 error); 593 goto fail5; 594 } 595 ieee80211_ifattach(ic); 596 /* Use common softint-based if_input */ 597 ifp->if_percpuq = if_percpuq_create(ifp); 598 if_register(ifp); 599 600 ic->ic_node_alloc = iwn_node_alloc; 601 ic->ic_newassoc = iwn_newassoc; 602 #ifdef IWN_HWCRYPTO 603 ic->ic_crypto.cs_key_set = iwn_set_key; 604 ic->ic_crypto.cs_key_delete = iwn_delete_key; 605 #endif 606 ic->ic_wme.wme_update = iwn_wme_update; 607 #ifndef IEEE80211_NO_HT 608 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start; 609 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop; 610 ic->ic_ampdu_tx_start = iwn_ampdu_tx_start; 611 ic->ic_ampdu_tx_stop = iwn_ampdu_tx_stop; 612 #endif 613 614 /* Override 802.11 state transition machine. */ 615 sc->sc_newstate = ic->ic_newstate; 616 ic->ic_newstate = iwn_newstate; 617 ieee80211_media_init(ic, iwn_media_change, ieee80211_media_status); 618 619 sc->amrr.amrr_min_success_threshold = 1; 620 sc->amrr.amrr_max_success_threshold = 15; 621 622 iwn_radiotap_attach(sc); 623 624 /* 625 * XXX for NetBSD, OpenBSD timeout_set replaced by 626 * callout_init and callout_setfunc, above. 627 */ 628 629 if (pmf_device_register(self, NULL, iwn_resume)) 630 pmf_class_network_register(self, ifp); 631 else 632 aprint_error_dev(self, "couldn't establish power handler\n"); 633 634 /* XXX NetBSD add call to ieee80211_announce for dmesg. */ 635 ieee80211_announce(ic); 636 637 sc->sc_flags |= IWN_FLAG_ATTACHED; 638 return; 639 640 /* Free allocated memory if something failed during attachment. */ 641 fail5: iwn_free_rx_ring(sc, &sc->rxq); 642 fail4: while (--i >= 0) 643 iwn_free_tx_ring(sc, &sc->txq[i]); 644 #ifdef IWN_USE_RBUF 645 iwn_free_rpool(sc); 646 #endif 647 iwn_free_sched(sc); 648 fail3: if (sc->ict != NULL) 649 iwn_free_ict(sc); 650 fail2: iwn_free_kw(sc); 651 fail1: iwn_free_fwmem(sc); 652 failih: pci_intr_disestablish(sc->sc_pct, sc->sc_ih); 653 sc->sc_ih = NULL; 654 failia: pci_intr_release(sc->sc_pct, sc->sc_pihp, 1); 655 sc->sc_pihp = NULL; 656 failsi: softint_disestablish(sc->sc_soft_ih); 657 sc->sc_soft_ih = NULL; 658 unmap: bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz); 659 } 660 661 int 662 iwn4965_attach(struct iwn_softc *sc, pci_product_id_t pid) 663 { 664 struct iwn_ops *ops = &sc->ops; 665 666 ops->load_firmware = iwn4965_load_firmware; 667 ops->read_eeprom = iwn4965_read_eeprom; 668 ops->post_alive = iwn4965_post_alive; 669 ops->nic_config = iwn4965_nic_config; 670 ops->config_bt_coex = iwn_config_bt_coex_bluetooth; 671 ops->update_sched = iwn4965_update_sched; 672 ops->get_temperature = iwn4965_get_temperature; 673 ops->get_rssi = iwn4965_get_rssi; 674 ops->set_txpower = iwn4965_set_txpower; 675 ops->init_gains = iwn4965_init_gains; 676 ops->set_gains = iwn4965_set_gains; 677 ops->add_node = iwn4965_add_node; 678 ops->tx_done = iwn4965_tx_done; 679 #ifndef IEEE80211_NO_HT 680 ops->ampdu_tx_start = iwn4965_ampdu_tx_start; 681 ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop; 682 #endif 683 sc->ntxqs = IWN4965_NTXQUEUES; 684 sc->ndmachnls = IWN4965_NDMACHNLS; 685 sc->broadcast_id = IWN4965_ID_BROADCAST; 686 sc->rxonsz = IWN4965_RXONSZ; 687 sc->schedsz = IWN4965_SCHEDSZ; 688 sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ; 689 sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ; 690 sc->fwsz = IWN4965_FWSZ; 691 sc->sched_txfact_addr = IWN4965_SCHED_TXFACT; 692 sc->limits = &iwn4965_sensitivity_limits; 693 sc->fwname = "iwlwifi-4965-2.ucode"; 694 /* Override chains masks, ROM is known to be broken. */ 695 sc->txchainmask = IWN_ANT_AB; 696 sc->rxchainmask = IWN_ANT_ABC; 697 698 return 0; 699 } 700 701 int 702 iwn5000_attach(struct iwn_softc *sc, pci_product_id_t pid) 703 { 704 struct iwn_ops *ops = &sc->ops; 705 706 ops->load_firmware = iwn5000_load_firmware; 707 ops->read_eeprom = iwn5000_read_eeprom; 708 ops->post_alive = iwn5000_post_alive; 709 ops->nic_config = iwn5000_nic_config; 710 ops->config_bt_coex = iwn_config_bt_coex_bluetooth; 711 ops->update_sched = iwn5000_update_sched; 712 ops->get_temperature = iwn5000_get_temperature; 713 ops->get_rssi = iwn5000_get_rssi; 714 ops->set_txpower = iwn5000_set_txpower; 715 ops->init_gains = iwn5000_init_gains; 716 ops->set_gains = iwn5000_set_gains; 717 ops->add_node = iwn5000_add_node; 718 ops->tx_done = iwn5000_tx_done; 719 #ifndef IEEE80211_NO_HT 720 ops->ampdu_tx_start = iwn5000_ampdu_tx_start; 721 ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop; 722 #endif 723 sc->ntxqs = IWN5000_NTXQUEUES; 724 sc->ndmachnls = IWN5000_NDMACHNLS; 725 sc->broadcast_id = IWN5000_ID_BROADCAST; 726 sc->rxonsz = IWN5000_RXONSZ; 727 sc->schedsz = IWN5000_SCHEDSZ; 728 sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ; 729 sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ; 730 sc->fwsz = IWN5000_FWSZ; 731 sc->sched_txfact_addr = IWN5000_SCHED_TXFACT; 732 733 switch (sc->hw_type) { 734 case IWN_HW_REV_TYPE_5100: 735 sc->limits = &iwn5000_sensitivity_limits; 736 sc->fwname = "iwlwifi-5000-2.ucode"; 737 /* Override chains masks, ROM is known to be broken. */ 738 sc->txchainmask = IWN_ANT_B; 739 sc->rxchainmask = IWN_ANT_AB; 740 break; 741 case IWN_HW_REV_TYPE_5150: 742 sc->limits = &iwn5150_sensitivity_limits; 743 sc->fwname = "iwlwifi-5150-2.ucode"; 744 break; 745 case IWN_HW_REV_TYPE_5300: 746 case IWN_HW_REV_TYPE_5350: 747 sc->limits = &iwn5000_sensitivity_limits; 748 sc->fwname = "iwlwifi-5000-2.ucode"; 749 break; 750 case IWN_HW_REV_TYPE_1000: 751 sc->limits = &iwn1000_sensitivity_limits; 752 if (pid == PCI_PRODUCT_INTEL_WIFI_LINK_100_1 || 753 pid == PCI_PRODUCT_INTEL_WIFI_LINK_100_2) 754 sc->fwname = "iwlwifi-100-5.ucode"; 755 else 756 sc->fwname = "iwlwifi-1000-3.ucode"; 757 break; 758 case IWN_HW_REV_TYPE_6000: 759 sc->limits = &iwn6000_sensitivity_limits; 760 sc->fwname = "iwlwifi-6000-4.ucode"; 761 if (pid == PCI_PRODUCT_INTEL_WIFI_LINK_6000_IPA_1 || 762 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6000_IPA_2) { 763 sc->sc_flags |= IWN_FLAG_INTERNAL_PA; 764 /* Override chains masks, ROM is known to be broken. */ 765 sc->txchainmask = IWN_ANT_BC; 766 sc->rxchainmask = IWN_ANT_BC; 767 } 768 break; 769 case IWN_HW_REV_TYPE_6050: 770 sc->limits = &iwn6000_sensitivity_limits; 771 sc->fwname = "iwlwifi-6050-5.ucode"; 772 break; 773 case IWN_HW_REV_TYPE_6005: 774 sc->limits = &iwn6000_sensitivity_limits; 775 /* Type 6030 cards return IWN_HW_REV_TYPE_6005 */ 776 if (pid == PCI_PRODUCT_INTEL_WIFI_LINK_1030_1 || 777 pid == PCI_PRODUCT_INTEL_WIFI_LINK_1030_2 || 778 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6230_1 || 779 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6230_2 || 780 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6235 || 781 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6235_2) { 782 sc->fwname = "iwlwifi-6000g2b-6.ucode"; 783 ops->config_bt_coex = iwn_config_bt_coex_adv1; 784 } 785 /* 786 * This covers: 787 * PCI_PRODUCT_INTEL_WIFI_LINK_6005_2X2_1 788 * PCI_PRODUCT_INTEL_WIFI_LINK_6005_2X2_2 789 */ 790 else 791 sc->fwname = "iwlwifi-6000g2a-5.ucode"; 792 break; 793 case IWN_HW_REV_TYPE_2030: 794 sc->limits = &iwn2030_sensitivity_limits; 795 sc->fwname = "iwlwifi-2030-6.ucode"; 796 ops->config_bt_coex = iwn_config_bt_coex_adv2; 797 break; 798 case IWN_HW_REV_TYPE_2000: 799 sc->limits = &iwn2000_sensitivity_limits; 800 sc->fwname = "iwlwifi-2000-6.ucode"; 801 break; 802 case IWN_HW_REV_TYPE_135: 803 sc->limits = &iwn2000_sensitivity_limits; 804 sc->fwname = "iwlwifi-135-6.ucode"; 805 ops->config_bt_coex = iwn_config_bt_coex_adv2; 806 break; 807 case IWN_HW_REV_TYPE_105: 808 sc->limits = &iwn2000_sensitivity_limits; 809 sc->fwname = "iwlwifi-105-6.ucode"; 810 break; 811 default: 812 aprint_normal(": adapter type %d not supported\n", sc->hw_type); 813 return ENOTSUP; 814 } 815 return 0; 816 } 817 818 /* 819 * Attach the interface to 802.11 radiotap. 820 */ 821 static void 822 iwn_radiotap_attach(struct iwn_softc *sc) 823 { 824 struct ifnet *ifp = sc->sc_ic.ic_ifp; 825 826 bpf_attach2(ifp, DLT_IEEE802_11_RADIO, 827 sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN, 828 &sc->sc_drvbpf); 829 830 sc->sc_rxtap_len = sizeof sc->sc_rxtapu; 831 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); 832 sc->sc_rxtap.wr_ihdr.it_present = htole32(IWN_RX_RADIOTAP_PRESENT); 833 834 sc->sc_txtap_len = sizeof sc->sc_txtapu; 835 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); 836 sc->sc_txtap.wt_ihdr.it_present = htole32(IWN_TX_RADIOTAP_PRESENT); 837 } 838 839 static int 840 iwn_detach(device_t self, int flags __unused) 841 { 842 struct iwn_softc *sc = device_private(self); 843 struct ifnet *ifp = sc->sc_ic.ic_ifp; 844 int qid; 845 846 if (!(sc->sc_flags & IWN_FLAG_ATTACHED)) 847 return 0; 848 849 callout_stop(&sc->calib_to); 850 851 /* Uninstall interrupt handler. */ 852 if (sc->sc_ih != NULL) 853 pci_intr_disestablish(sc->sc_pct, sc->sc_ih); 854 if (sc->sc_pihp != NULL) 855 pci_intr_release(sc->sc_pct, sc->sc_pihp, 1); 856 if (sc->sc_soft_ih != NULL) 857 softint_disestablish(sc->sc_soft_ih); 858 859 /* Free DMA resources. */ 860 iwn_free_rx_ring(sc, &sc->rxq); 861 for (qid = 0; qid < sc->ntxqs; qid++) 862 iwn_free_tx_ring(sc, &sc->txq[qid]); 863 #ifdef IWN_USE_RBUF 864 iwn_free_rpool(sc); 865 #endif 866 iwn_free_sched(sc); 867 iwn_free_kw(sc); 868 if (sc->ict != NULL) 869 iwn_free_ict(sc); 870 iwn_free_fwmem(sc); 871 872 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz); 873 874 ieee80211_ifdetach(&sc->sc_ic); 875 if_detach(ifp); 876 877 return 0; 878 } 879 880 #if 0 881 /* 882 * XXX Investigate if clearing the PCI retry timeout could eliminate 883 * the repeated scan calls. Also the calls to if_init and if_start 884 * are similar to the effect of adding the call to ifioctl_common . 885 */ 886 static void 887 iwn_power(int why, void *arg) 888 { 889 struct iwn_softc *sc = arg; 890 struct ifnet *ifp; 891 pcireg_t reg; 892 int s; 893 894 if (why != PWR_RESUME) 895 return; 896 897 /* Clear device-specific "PCI retry timeout" register (41h). */ 898 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40); 899 if (reg & 0xff00) 900 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00); 901 902 s = splnet(); 903 ifp = &sc->sc_ic.ic_if; 904 if (ifp->if_flags & IFF_UP) { 905 ifp->if_init(ifp); 906 if (ifp->if_flags & IFF_RUNNING) 907 ifp->if_start(ifp); 908 } 909 splx(s); 910 } 911 #endif 912 913 static bool 914 iwn_resume(device_t dv, const pmf_qual_t *qual) 915 { 916 return true; 917 } 918 919 static int 920 iwn_nic_lock(struct iwn_softc *sc) 921 { 922 int ntries; 923 924 /* Request exclusive access to NIC. */ 925 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 926 927 /* Spin until we actually get the lock. */ 928 for (ntries = 0; ntries < 1000; ntries++) { 929 if ((IWN_READ(sc, IWN_GP_CNTRL) & 930 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) == 931 IWN_GP_CNTRL_MAC_ACCESS_ENA) 932 return 0; 933 DELAY(10); 934 } 935 return ETIMEDOUT; 936 } 937 938 static __inline void 939 iwn_nic_unlock(struct iwn_softc *sc) 940 { 941 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 942 } 943 944 static __inline uint32_t 945 iwn_prph_read(struct iwn_softc *sc, uint32_t addr) 946 { 947 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr); 948 IWN_BARRIER_READ_WRITE(sc); 949 return IWN_READ(sc, IWN_PRPH_RDATA); 950 } 951 952 static __inline void 953 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 954 { 955 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr); 956 IWN_BARRIER_WRITE(sc); 957 IWN_WRITE(sc, IWN_PRPH_WDATA, data); 958 } 959 960 static __inline void 961 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 962 { 963 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask); 964 } 965 966 static __inline void 967 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 968 { 969 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask); 970 } 971 972 static __inline void 973 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr, 974 const uint32_t *data, int count) 975 { 976 for (; count > 0; count--, data++, addr += 4) 977 iwn_prph_write(sc, addr, *data); 978 } 979 980 static __inline uint32_t 981 iwn_mem_read(struct iwn_softc *sc, uint32_t addr) 982 { 983 IWN_WRITE(sc, IWN_MEM_RADDR, addr); 984 IWN_BARRIER_READ_WRITE(sc); 985 return IWN_READ(sc, IWN_MEM_RDATA); 986 } 987 988 static __inline void 989 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 990 { 991 IWN_WRITE(sc, IWN_MEM_WADDR, addr); 992 IWN_BARRIER_WRITE(sc); 993 IWN_WRITE(sc, IWN_MEM_WDATA, data); 994 } 995 996 #ifndef IEEE80211_NO_HT 997 static __inline void 998 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data) 999 { 1000 uint32_t tmp; 1001 1002 tmp = iwn_mem_read(sc, addr & ~3); 1003 if (addr & 3) 1004 tmp = (tmp & 0x0000ffff) | data << 16; 1005 else 1006 tmp = (tmp & 0xffff0000) | data; 1007 iwn_mem_write(sc, addr & ~3, tmp); 1008 } 1009 #endif 1010 1011 static __inline void 1012 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data, 1013 int count) 1014 { 1015 for (; count > 0; count--, addr += 4) 1016 *data++ = iwn_mem_read(sc, addr); 1017 } 1018 1019 static __inline void 1020 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val, 1021 int count) 1022 { 1023 for (; count > 0; count--, addr += 4) 1024 iwn_mem_write(sc, addr, val); 1025 } 1026 1027 static int 1028 iwn_eeprom_lock(struct iwn_softc *sc) 1029 { 1030 int i, ntries; 1031 1032 for (i = 0; i < 100; i++) { 1033 /* Request exclusive access to EEPROM. */ 1034 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 1035 IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1036 1037 /* Spin until we actually get the lock. */ 1038 for (ntries = 0; ntries < 100; ntries++) { 1039 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 1040 IWN_HW_IF_CONFIG_EEPROM_LOCKED) 1041 return 0; 1042 DELAY(10); 1043 } 1044 } 1045 return ETIMEDOUT; 1046 } 1047 1048 static __inline void 1049 iwn_eeprom_unlock(struct iwn_softc *sc) 1050 { 1051 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1052 } 1053 1054 /* 1055 * Initialize access by host to One Time Programmable ROM. 1056 * NB: This kind of ROM can be found on 1000 or 6000 Series only. 1057 */ 1058 static int 1059 iwn_init_otprom(struct iwn_softc *sc) 1060 { 1061 uint16_t prev = 0, base, next; 1062 int count, error; 1063 1064 /* Wait for clock stabilization before accessing prph. */ 1065 if ((error = iwn_clock_wait(sc)) != 0) 1066 return error; 1067 1068 if ((error = iwn_nic_lock(sc)) != 0) 1069 return error; 1070 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1071 DELAY(5); 1072 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1073 iwn_nic_unlock(sc); 1074 1075 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */ 1076 if (sc->hw_type != IWN_HW_REV_TYPE_1000) { 1077 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT, 1078 IWN_RESET_LINK_PWR_MGMT_DIS); 1079 } 1080 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER); 1081 /* Clear ECC status. */ 1082 IWN_SETBITS(sc, IWN_OTP_GP, 1083 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS); 1084 1085 /* 1086 * Find the block before last block (contains the EEPROM image) 1087 * for HW without OTP shadow RAM. 1088 */ 1089 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 1090 /* Switch to absolute addressing mode. */ 1091 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS); 1092 base = 0; 1093 for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) { 1094 error = iwn_read_prom_data(sc, base, &next, 2); 1095 if (error != 0) 1096 return error; 1097 if (next == 0) /* End of linked-list. */ 1098 break; 1099 prev = base; 1100 base = le16toh(next); 1101 } 1102 if (count == 0 || count == IWN1000_OTP_NBLOCKS) 1103 return EIO; 1104 /* Skip "next" word. */ 1105 sc->prom_base = prev + 1; 1106 } 1107 return 0; 1108 } 1109 1110 static int 1111 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count) 1112 { 1113 uint8_t *out = data; 1114 uint32_t val, tmp; 1115 int ntries; 1116 1117 addr += sc->prom_base; 1118 for (; count > 0; count -= 2, addr++) { 1119 IWN_WRITE(sc, IWN_EEPROM, addr << 2); 1120 for (ntries = 0; ntries < 10; ntries++) { 1121 val = IWN_READ(sc, IWN_EEPROM); 1122 if (val & IWN_EEPROM_READ_VALID) 1123 break; 1124 DELAY(5); 1125 } 1126 if (ntries == 10) { 1127 aprint_error_dev(sc->sc_dev, 1128 "timeout reading ROM at 0x%x\n", addr); 1129 return ETIMEDOUT; 1130 } 1131 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1132 /* OTPROM, check for ECC errors. */ 1133 tmp = IWN_READ(sc, IWN_OTP_GP); 1134 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) { 1135 aprint_error_dev(sc->sc_dev, 1136 "OTPROM ECC error at 0x%x\n", addr); 1137 return EIO; 1138 } 1139 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) { 1140 /* Correctable ECC error, clear bit. */ 1141 IWN_SETBITS(sc, IWN_OTP_GP, 1142 IWN_OTP_GP_ECC_CORR_STTS); 1143 } 1144 } 1145 *out++ = val >> 16; 1146 if (count > 1) 1147 *out++ = val >> 24; 1148 } 1149 return 0; 1150 } 1151 1152 static int 1153 iwn_dma_contig_alloc(bus_dma_tag_t tag, struct iwn_dma_info *dma, void **kvap, 1154 bus_size_t size, bus_size_t alignment) 1155 { 1156 int nsegs, error; 1157 1158 dma->tag = tag; 1159 dma->size = size; 1160 1161 error = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT, 1162 &dma->map); 1163 if (error != 0) 1164 goto fail; 1165 1166 error = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs, 1167 BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_ZERO */ 1168 if (error != 0) 1169 goto fail; 1170 1171 error = bus_dmamem_map(tag, &dma->seg, 1, size, &dma->vaddr, 1172 BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_COHERENT */ 1173 if (error != 0) 1174 goto fail; 1175 1176 error = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL, 1177 BUS_DMA_NOWAIT); 1178 if (error != 0) 1179 goto fail; 1180 1181 /* XXX Presumably needed because of missing BUS_DMA_ZERO, above. */ 1182 memset(dma->vaddr, 0, size); 1183 bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE); 1184 1185 dma->paddr = dma->map->dm_segs[0].ds_addr; 1186 if (kvap != NULL) 1187 *kvap = dma->vaddr; 1188 1189 return 0; 1190 1191 fail: iwn_dma_contig_free(dma); 1192 return error; 1193 } 1194 1195 static void 1196 iwn_dma_contig_free(struct iwn_dma_info *dma) 1197 { 1198 if (dma->map != NULL) { 1199 if (dma->vaddr != NULL) { 1200 bus_dmamap_sync(dma->tag, dma->map, 0, dma->size, 1201 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1202 bus_dmamap_unload(dma->tag, dma->map); 1203 bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size); 1204 bus_dmamem_free(dma->tag, &dma->seg, 1); 1205 dma->vaddr = NULL; 1206 } 1207 bus_dmamap_destroy(dma->tag, dma->map); 1208 dma->map = NULL; 1209 } 1210 } 1211 1212 static int 1213 iwn_alloc_sched(struct iwn_softc *sc) 1214 { 1215 /* TX scheduler rings must be aligned on a 1KB boundary. */ 1216 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma, 1217 (void **)&sc->sched, sc->schedsz, 1024); 1218 } 1219 1220 static void 1221 iwn_free_sched(struct iwn_softc *sc) 1222 { 1223 iwn_dma_contig_free(&sc->sched_dma); 1224 } 1225 1226 static int 1227 iwn_alloc_kw(struct iwn_softc *sc) 1228 { 1229 /* "Keep Warm" page must be aligned on a 4KB boundary. */ 1230 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, NULL, 4096, 1231 4096); 1232 } 1233 1234 static void 1235 iwn_free_kw(struct iwn_softc *sc) 1236 { 1237 iwn_dma_contig_free(&sc->kw_dma); 1238 } 1239 1240 static int 1241 iwn_alloc_ict(struct iwn_softc *sc) 1242 { 1243 /* ICT table must be aligned on a 4KB boundary. */ 1244 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, 1245 (void **)&sc->ict, IWN_ICT_SIZE, 4096); 1246 } 1247 1248 static void 1249 iwn_free_ict(struct iwn_softc *sc) 1250 { 1251 iwn_dma_contig_free(&sc->ict_dma); 1252 } 1253 1254 static int 1255 iwn_alloc_fwmem(struct iwn_softc *sc) 1256 { 1257 /* Must be aligned on a 16-byte boundary. */ 1258 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, NULL, 1259 sc->fwsz, 16); 1260 } 1261 1262 static void 1263 iwn_free_fwmem(struct iwn_softc *sc) 1264 { 1265 iwn_dma_contig_free(&sc->fw_dma); 1266 } 1267 1268 static int 1269 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1270 { 1271 bus_size_t size; 1272 int i, error; 1273 1274 ring->cur = 0; 1275 1276 /* Allocate RX descriptors (256-byte aligned). */ 1277 size = IWN_RX_RING_COUNT * sizeof (uint32_t); 1278 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, 1279 (void **)&ring->desc, size, 256); 1280 if (error != 0) { 1281 aprint_error_dev(sc->sc_dev, 1282 "could not allocate RX ring DMA memory\n"); 1283 goto fail; 1284 } 1285 1286 /* Allocate RX status area (16-byte aligned). */ 1287 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, 1288 (void **)&ring->stat, sizeof (struct iwn_rx_status), 16); 1289 if (error != 0) { 1290 aprint_error_dev(sc->sc_dev, 1291 "could not allocate RX status DMA memory\n"); 1292 goto fail; 1293 } 1294 1295 /* 1296 * Allocate and map RX buffers. 1297 */ 1298 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1299 struct iwn_rx_data *data = &ring->data[i]; 1300 1301 error = bus_dmamap_create(sc->sc_dmat, IWN_RBUF_SIZE, 1, 1302 IWN_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1303 &data->map); 1304 if (error != 0) { 1305 aprint_error_dev(sc->sc_dev, 1306 "could not create RX buf DMA map\n"); 1307 goto fail; 1308 } 1309 1310 data->m = MCLGETIalt(sc, M_DONTWAIT, NULL, IWN_RBUF_SIZE); 1311 if (data->m == NULL) { 1312 aprint_error_dev(sc->sc_dev, 1313 "could not allocate RX mbuf\n"); 1314 error = ENOBUFS; 1315 goto fail; 1316 } 1317 1318 error = bus_dmamap_load(sc->sc_dmat, data->map, 1319 mtod(data->m, void *), IWN_RBUF_SIZE, NULL, 1320 BUS_DMA_NOWAIT | BUS_DMA_READ); 1321 if (error != 0) { 1322 aprint_error_dev(sc->sc_dev, 1323 "can't not map mbuf (error %d)\n", error); 1324 goto fail; 1325 } 1326 1327 /* Set physical address of RX buffer (256-byte aligned). */ 1328 ring->desc[i] = htole32(data->map->dm_segs[0].ds_addr >> 8); 1329 } 1330 1331 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0, size, 1332 BUS_DMASYNC_PREWRITE); 1333 1334 return 0; 1335 1336 fail: iwn_free_rx_ring(sc, ring); 1337 return error; 1338 } 1339 1340 static void 1341 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1342 { 1343 int ntries; 1344 1345 if (iwn_nic_lock(sc) == 0) { 1346 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 1347 for (ntries = 0; ntries < 1000; ntries++) { 1348 if (IWN_READ(sc, IWN_FH_RX_STATUS) & 1349 IWN_FH_RX_STATUS_IDLE) 1350 break; 1351 DELAY(10); 1352 } 1353 iwn_nic_unlock(sc); 1354 } 1355 ring->cur = 0; 1356 sc->last_rx_valid = 0; 1357 } 1358 1359 static void 1360 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1361 { 1362 int i; 1363 1364 iwn_dma_contig_free(&ring->desc_dma); 1365 iwn_dma_contig_free(&ring->stat_dma); 1366 1367 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1368 struct iwn_rx_data *data = &ring->data[i]; 1369 1370 if (data->m != NULL) { 1371 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1372 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1373 bus_dmamap_unload(sc->sc_dmat, data->map); 1374 m_freem(data->m); 1375 } 1376 if (data->map != NULL) 1377 bus_dmamap_destroy(sc->sc_dmat, data->map); 1378 } 1379 } 1380 1381 static int 1382 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid) 1383 { 1384 bus_addr_t paddr; 1385 bus_size_t size; 1386 int i, error; 1387 1388 ring->qid = qid; 1389 ring->queued = 0; 1390 ring->cur = 0; 1391 1392 /* Allocate TX descriptors (256-byte aligned). */ 1393 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc); 1394 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, 1395 (void **)&ring->desc, size, 256); 1396 if (error != 0) { 1397 aprint_error_dev(sc->sc_dev, 1398 "could not allocate TX ring DMA memory\n"); 1399 goto fail; 1400 } 1401 /* 1402 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need 1403 * to allocate commands space for other rings. 1404 * XXX Do we really need to allocate descriptors for other rings? 1405 */ 1406 if (qid > 4) 1407 return 0; 1408 1409 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd); 1410 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, 1411 (void **)&ring->cmd, size, 4); 1412 if (error != 0) { 1413 aprint_error_dev(sc->sc_dev, 1414 "could not allocate TX cmd DMA memory\n"); 1415 goto fail; 1416 } 1417 1418 paddr = ring->cmd_dma.paddr; 1419 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1420 struct iwn_tx_data *data = &ring->data[i]; 1421 1422 data->cmd_paddr = paddr; 1423 data->scratch_paddr = paddr + 12; 1424 paddr += sizeof (struct iwn_tx_cmd); 1425 1426 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1427 IWN_MAX_SCATTER - 1, MCLBYTES, 0, BUS_DMA_NOWAIT, 1428 &data->map); 1429 if (error != 0) { 1430 aprint_error_dev(sc->sc_dev, 1431 "could not create TX buf DMA map\n"); 1432 goto fail; 1433 } 1434 } 1435 return 0; 1436 1437 fail: iwn_free_tx_ring(sc, ring); 1438 return error; 1439 } 1440 1441 static void 1442 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1443 { 1444 int i; 1445 1446 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1447 struct iwn_tx_data *data = &ring->data[i]; 1448 1449 if (data->m != NULL) { 1450 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1451 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1452 bus_dmamap_unload(sc->sc_dmat, data->map); 1453 m_freem(data->m); 1454 data->m = NULL; 1455 } 1456 } 1457 /* Clear TX descriptors. */ 1458 memset(ring->desc, 0, ring->desc_dma.size); 1459 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0, 1460 ring->desc_dma.size, BUS_DMASYNC_PREWRITE); 1461 sc->qfullmsk &= ~(1 << ring->qid); 1462 ring->queued = 0; 1463 ring->cur = 0; 1464 } 1465 1466 static void 1467 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1468 { 1469 int i; 1470 1471 iwn_dma_contig_free(&ring->desc_dma); 1472 iwn_dma_contig_free(&ring->cmd_dma); 1473 1474 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1475 struct iwn_tx_data *data = &ring->data[i]; 1476 1477 if (data->m != NULL) { 1478 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1479 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1480 bus_dmamap_unload(sc->sc_dmat, data->map); 1481 m_freem(data->m); 1482 } 1483 if (data->map != NULL) 1484 bus_dmamap_destroy(sc->sc_dmat, data->map); 1485 } 1486 } 1487 1488 static void 1489 iwn5000_ict_reset(struct iwn_softc *sc) 1490 { 1491 /* Disable interrupts. */ 1492 IWN_WRITE(sc, IWN_INT_MASK, 0); 1493 1494 /* Reset ICT table. */ 1495 memset(sc->ict, 0, IWN_ICT_SIZE); 1496 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map, 0, IWN_ICT_SIZE, 1497 BUS_DMASYNC_PREWRITE); 1498 sc->ict_cur = 0; 1499 1500 /* Set physical address of ICT table (4KB aligned). */ 1501 DPRINTF(("enabling ICT\n")); 1502 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE | 1503 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12); 1504 1505 /* Enable periodic RX interrupt. */ 1506 sc->int_mask |= IWN_INT_RX_PERIODIC; 1507 /* Switch to ICT interrupt mode in driver. */ 1508 sc->sc_flags |= IWN_FLAG_USE_ICT; 1509 1510 /* Re-enable interrupts. */ 1511 IWN_WRITE(sc, IWN_INT, 0xffffffff); 1512 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 1513 } 1514 1515 static int 1516 iwn_read_eeprom(struct iwn_softc *sc) 1517 { 1518 struct iwn_ops *ops = &sc->ops; 1519 struct ieee80211com *ic = &sc->sc_ic; 1520 uint16_t val; 1521 int error; 1522 1523 /* Check whether adapter has an EEPROM or an OTPROM. */ 1524 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 && 1525 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP)) 1526 sc->sc_flags |= IWN_FLAG_HAS_OTPROM; 1527 DPRINTF(("%s found\n", (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? 1528 "OTPROM" : "EEPROM")); 1529 1530 /* Adapter has to be powered on for EEPROM access to work. */ 1531 if ((error = iwn_apm_init(sc)) != 0) { 1532 aprint_error_dev(sc->sc_dev, 1533 "could not power ON adapter\n"); 1534 return error; 1535 } 1536 1537 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) { 1538 aprint_error_dev(sc->sc_dev, 1539 "bad ROM signature\n"); 1540 return EIO; 1541 } 1542 if ((error = iwn_eeprom_lock(sc)) != 0) { 1543 aprint_error_dev(sc->sc_dev, 1544 "could not lock ROM (error=%d)\n", error); 1545 return error; 1546 } 1547 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1548 if ((error = iwn_init_otprom(sc)) != 0) { 1549 aprint_error_dev(sc->sc_dev, 1550 "could not initialize OTPROM\n"); 1551 return error; 1552 } 1553 } 1554 1555 iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2); 1556 DPRINTF(("SKU capabilities=0x%04x\n", le16toh(val))); 1557 /* Check if HT support is bonded out. */ 1558 if (val & htole16(IWN_EEPROM_SKU_CAP_11N)) 1559 sc->sc_flags |= IWN_FLAG_HAS_11N; 1560 1561 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2); 1562 sc->rfcfg = le16toh(val); 1563 DPRINTF(("radio config=0x%04x\n", sc->rfcfg)); 1564 /* Read Tx/Rx chains from ROM unless it's known to be broken. */ 1565 if (sc->txchainmask == 0) 1566 sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg); 1567 if (sc->rxchainmask == 0) 1568 sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg); 1569 1570 /* Read MAC address. */ 1571 iwn_read_prom_data(sc, IWN_EEPROM_MAC, ic->ic_myaddr, ETHER_ADDR_LEN); 1572 1573 /* Read adapter-specific information from EEPROM. */ 1574 ops->read_eeprom(sc); 1575 1576 iwn_apm_stop(sc); /* Power OFF adapter. */ 1577 1578 iwn_eeprom_unlock(sc); 1579 return 0; 1580 } 1581 1582 static void 1583 iwn4965_read_eeprom(struct iwn_softc *sc) 1584 { 1585 uint32_t addr; 1586 uint16_t val; 1587 int i; 1588 1589 /* Read regulatory domain (4 ASCII characters). */ 1590 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4); 1591 1592 /* Read the list of authorized channels (20MHz ones only). */ 1593 for (i = 0; i < 5; i++) { 1594 addr = iwn4965_regulatory_bands[i]; 1595 iwn_read_eeprom_channels(sc, i, addr); 1596 } 1597 1598 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */ 1599 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2); 1600 sc->maxpwr2GHz = val & 0xff; 1601 sc->maxpwr5GHz = val >> 8; 1602 /* Check that EEPROM values are within valid range. */ 1603 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50) 1604 sc->maxpwr5GHz = 38; 1605 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50) 1606 sc->maxpwr2GHz = 38; 1607 DPRINTF(("maxpwr 2GHz=%d 5GHz=%d\n", sc->maxpwr2GHz, sc->maxpwr5GHz)); 1608 1609 /* Read samples for each TX power group. */ 1610 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands, 1611 sizeof sc->bands); 1612 1613 /* Read voltage at which samples were taken. */ 1614 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2); 1615 sc->eeprom_voltage = (int16_t)le16toh(val); 1616 DPRINTF(("voltage=%d (in 0.3V)\n", sc->eeprom_voltage)); 1617 1618 #ifdef IWN_DEBUG 1619 /* Print samples. */ 1620 if (iwn_debug > 0) { 1621 for (i = 0; i < IWN_NBANDS; i++) 1622 iwn4965_print_power_group(sc, i); 1623 } 1624 #endif 1625 } 1626 1627 #ifdef IWN_DEBUG 1628 static void 1629 iwn4965_print_power_group(struct iwn_softc *sc, int i) 1630 { 1631 struct iwn4965_eeprom_band *band = &sc->bands[i]; 1632 struct iwn4965_eeprom_chan_samples *chans = band->chans; 1633 int j, c; 1634 1635 aprint_normal("===band %d===\n", i); 1636 aprint_normal("chan lo=%d, chan hi=%d\n", band->lo, band->hi); 1637 aprint_normal("chan1 num=%d\n", chans[0].num); 1638 for (c = 0; c < 2; c++) { 1639 for (j = 0; j < IWN_NSAMPLES; j++) { 1640 aprint_normal("chain %d, sample %d: temp=%d gain=%d " 1641 "power=%d pa_det=%d\n", c, j, 1642 chans[0].samples[c][j].temp, 1643 chans[0].samples[c][j].gain, 1644 chans[0].samples[c][j].power, 1645 chans[0].samples[c][j].pa_det); 1646 } 1647 } 1648 aprint_normal("chan2 num=%d\n", chans[1].num); 1649 for (c = 0; c < 2; c++) { 1650 for (j = 0; j < IWN_NSAMPLES; j++) { 1651 aprint_normal("chain %d, sample %d: temp=%d gain=%d " 1652 "power=%d pa_det=%d\n", c, j, 1653 chans[1].samples[c][j].temp, 1654 chans[1].samples[c][j].gain, 1655 chans[1].samples[c][j].power, 1656 chans[1].samples[c][j].pa_det); 1657 } 1658 } 1659 } 1660 #endif 1661 1662 static void 1663 iwn5000_read_eeprom(struct iwn_softc *sc) 1664 { 1665 struct iwn5000_eeprom_calib_hdr hdr; 1666 int32_t volt; 1667 uint32_t base, addr; 1668 uint16_t val; 1669 int i; 1670 1671 /* Read regulatory domain (4 ASCII characters). */ 1672 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 1673 base = le16toh(val); 1674 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN, 1675 sc->eeprom_domain, 4); 1676 1677 /* Read the list of authorized channels (20MHz ones only). */ 1678 for (i = 0; i < 5; i++) { 1679 addr = base + iwn5000_regulatory_bands[i]; 1680 iwn_read_eeprom_channels(sc, i, addr); 1681 } 1682 1683 /* Read enhanced TX power information for 6000 Series. */ 1684 if (sc->hw_type >= IWN_HW_REV_TYPE_6000) 1685 iwn_read_eeprom_enhinfo(sc); 1686 1687 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2); 1688 base = le16toh(val); 1689 iwn_read_prom_data(sc, base, &hdr, sizeof hdr); 1690 DPRINTF(("calib version=%u pa type=%u voltage=%u\n", 1691 hdr.version, hdr.pa_type, le16toh(hdr.volt))); 1692 sc->calib_ver = hdr.version; 1693 1694 if (sc->hw_type == IWN_HW_REV_TYPE_2030 || 1695 sc->hw_type == IWN_HW_REV_TYPE_2000 || 1696 sc->hw_type == IWN_HW_REV_TYPE_135 || 1697 sc->hw_type == IWN_HW_REV_TYPE_105) { 1698 sc->eeprom_voltage = le16toh(hdr.volt); 1699 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 1700 sc->eeprom_temp = le16toh(val); 1701 iwn_read_prom_data(sc, base + IWN2000_EEPROM_RAWTEMP, &val, 2); 1702 sc->eeprom_rawtemp = le16toh(val); 1703 } 1704 1705 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 1706 /* Compute temperature offset. */ 1707 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 1708 sc->eeprom_temp = le16toh(val); 1709 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 1710 volt = le16toh(val); 1711 sc->temp_off = sc->eeprom_temp - (volt / -5); 1712 DPRINTF(("temp=%d volt=%d offset=%dK\n", 1713 sc->eeprom_temp, volt, sc->temp_off)); 1714 } else { 1715 /* Read crystal calibration. */ 1716 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL, 1717 &sc->eeprom_crystal, sizeof (uint32_t)); 1718 DPRINTF(("crystal calibration 0x%08x\n", 1719 le32toh(sc->eeprom_crystal))); 1720 } 1721 } 1722 1723 static void 1724 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr) 1725 { 1726 struct ieee80211com *ic = &sc->sc_ic; 1727 const struct iwn_chan_band *band = &iwn_bands[n]; 1728 struct iwn_eeprom_chan channels[IWN_MAX_CHAN_PER_BAND]; 1729 uint8_t chan; 1730 int i; 1731 1732 iwn_read_prom_data(sc, addr, channels, 1733 band->nchan * sizeof (struct iwn_eeprom_chan)); 1734 1735 for (i = 0; i < band->nchan; i++) { 1736 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) 1737 continue; 1738 1739 chan = band->chan[i]; 1740 1741 if (n == 0) { /* 2GHz band */ 1742 ic->ic_channels[chan].ic_freq = 1743 ieee80211_ieee2mhz(chan, IEEE80211_CHAN_2GHZ); 1744 ic->ic_channels[chan].ic_flags = 1745 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM | 1746 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; 1747 1748 } else { /* 5GHz band */ 1749 /* 1750 * Some adapters support channels 7, 8, 11 and 12 1751 * both in the 2GHz and 4.9GHz bands. 1752 * Because of limitations in our net80211 layer, 1753 * we don't support them in the 4.9GHz band. 1754 */ 1755 if (chan <= 14) 1756 continue; 1757 1758 ic->ic_channels[chan].ic_freq = 1759 ieee80211_ieee2mhz(chan, IEEE80211_CHAN_5GHZ); 1760 ic->ic_channels[chan].ic_flags = IEEE80211_CHAN_A; 1761 /* We have at least one valid 5GHz channel. */ 1762 sc->sc_flags |= IWN_FLAG_HAS_5GHZ; 1763 } 1764 1765 /* Is active scan allowed on this channel? */ 1766 if (!(channels[i].flags & IWN_EEPROM_CHAN_ACTIVE)) { 1767 ic->ic_channels[chan].ic_flags |= 1768 IEEE80211_CHAN_PASSIVE; 1769 } 1770 1771 /* Save maximum allowed TX power for this channel. */ 1772 sc->maxpwr[chan] = channels[i].maxpwr; 1773 1774 DPRINTF(("adding chan %d flags=0x%x maxpwr=%d\n", 1775 chan, channels[i].flags, sc->maxpwr[chan])); 1776 } 1777 } 1778 1779 static void 1780 iwn_read_eeprom_enhinfo(struct iwn_softc *sc) 1781 { 1782 struct iwn_eeprom_enhinfo enhinfo[35]; 1783 uint16_t val, base; 1784 int8_t maxpwr; 1785 uint8_t flags; 1786 int i; 1787 1788 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 1789 base = le16toh(val); 1790 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO, 1791 enhinfo, sizeof enhinfo); 1792 1793 memset(sc->enh_maxpwr, 0, sizeof sc->enh_maxpwr); 1794 for (i = 0; i < __arraycount(enhinfo); i++) { 1795 flags = enhinfo[i].flags; 1796 if (!(flags & IWN_ENHINFO_VALID)) 1797 continue; /* Skip invalid entries. */ 1798 1799 maxpwr = 0; 1800 if (sc->txchainmask & IWN_ANT_A) 1801 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]); 1802 if (sc->txchainmask & IWN_ANT_B) 1803 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]); 1804 if (sc->txchainmask & IWN_ANT_C) 1805 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]); 1806 if (sc->ntxchains == 2) 1807 maxpwr = MAX(maxpwr, enhinfo[i].mimo2); 1808 else if (sc->ntxchains == 3) 1809 maxpwr = MAX(maxpwr, enhinfo[i].mimo3); 1810 maxpwr /= 2; /* Convert half-dBm to dBm. */ 1811 1812 DPRINTF(("enhinfo %d, maxpwr=%d\n", i, maxpwr)); 1813 sc->enh_maxpwr[i] = maxpwr; 1814 } 1815 } 1816 1817 static struct ieee80211_node * 1818 iwn_node_alloc(struct ieee80211_node_table *ic __unused) 1819 { 1820 return malloc(sizeof (struct iwn_node), M_80211_NODE, M_NOWAIT | M_ZERO); 1821 } 1822 1823 static void 1824 iwn_newassoc(struct ieee80211_node *ni, int isnew) 1825 { 1826 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 1827 struct iwn_node *wn = (void *)ni; 1828 uint8_t rate; 1829 int ridx, i; 1830 1831 ieee80211_amrr_node_init(&sc->amrr, &wn->amn); 1832 /* Start at lowest available bit-rate, AMRR will raise. */ 1833 ni->ni_txrate = 0; 1834 1835 for (i = 0; i < ni->ni_rates.rs_nrates; i++) { 1836 rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL; 1837 /* Map 802.11 rate to HW rate index. */ 1838 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) 1839 if (iwn_rates[ridx].rate == rate) 1840 break; 1841 wn->ridx[i] = ridx; 1842 } 1843 } 1844 1845 static int 1846 iwn_media_change(struct ifnet *ifp) 1847 { 1848 struct iwn_softc *sc = ifp->if_softc; 1849 struct ieee80211com *ic = &sc->sc_ic; 1850 uint8_t rate, ridx; 1851 int error; 1852 1853 error = ieee80211_media_change(ifp); 1854 if (error != ENETRESET) 1855 return error; 1856 1857 if (ic->ic_fixed_rate != -1) { 1858 rate = ic->ic_sup_rates[ic->ic_curmode]. 1859 rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL; 1860 /* Map 802.11 rate to HW rate index. */ 1861 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) 1862 if (iwn_rates[ridx].rate == rate) 1863 break; 1864 sc->fixed_ridx = ridx; 1865 } 1866 1867 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 1868 (IFF_UP | IFF_RUNNING)) { 1869 iwn_stop(ifp, 0); 1870 error = iwn_init(ifp); 1871 } 1872 return error; 1873 } 1874 1875 static int 1876 iwn_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg) 1877 { 1878 struct ifnet *ifp = ic->ic_ifp; 1879 struct iwn_softc *sc = ifp->if_softc; 1880 int error; 1881 1882 callout_stop(&sc->calib_to); 1883 1884 switch (nstate) { 1885 case IEEE80211_S_SCAN: 1886 /* XXX Do not abort a running scan. */ 1887 if (sc->sc_flags & IWN_FLAG_SCANNING) { 1888 if (ic->ic_state != nstate) 1889 aprint_debug_dev(sc->sc_dev, "scan request(%d) " 1890 "while scanning(%d) ignored\n", nstate, 1891 ic->ic_state); 1892 break; 1893 } 1894 1895 /* XXX Not sure if call and flags are needed. */ 1896 ieee80211_node_table_reset(&ic->ic_scan); 1897 ic->ic_flags |= IEEE80211_F_SCAN | IEEE80211_F_ASCAN; 1898 sc->sc_flags |= IWN_FLAG_SCANNING_2GHZ; 1899 1900 /* Make the link LED blink while we're scanning. */ 1901 iwn_set_led(sc, IWN_LED_LINK, 10, 10); 1902 1903 if ((error = iwn_scan(sc, IEEE80211_CHAN_2GHZ)) != 0) { 1904 aprint_error_dev(sc->sc_dev, 1905 "could not initiate scan\n"); 1906 return error; 1907 } 1908 ic->ic_state = nstate; 1909 return 0; 1910 1911 case IEEE80211_S_ASSOC: 1912 if (ic->ic_state != IEEE80211_S_RUN) 1913 break; 1914 /* FALLTHROUGH */ 1915 case IEEE80211_S_AUTH: 1916 /* Reset state to handle reassociations correctly. */ 1917 sc->rxon.associd = 0; 1918 sc->rxon.filter &= ~htole32(IWN_FILTER_BSS); 1919 sc->calib.state = IWN_CALIB_STATE_INIT; 1920 1921 /* Wait until we hear a beacon before we transmit */ 1922 if (IEEE80211_IS_CHAN_PASSIVE(ic->ic_curchan)) 1923 sc->sc_beacon_wait = 1; 1924 1925 if ((error = iwn_auth(sc)) != 0) { 1926 aprint_error_dev(sc->sc_dev, 1927 "could not move to auth state\n"); 1928 return error; 1929 } 1930 break; 1931 1932 case IEEE80211_S_RUN: 1933 /* 1934 * RUN -> RUN transition; Just restart timers. 1935 */ 1936 if (ic->ic_state == IEEE80211_S_RUN) { 1937 sc->calib_cnt = 0; 1938 break; 1939 } 1940 1941 /* Wait until we hear a beacon before we transmit */ 1942 if (IEEE80211_IS_CHAN_PASSIVE(ic->ic_curchan)) 1943 sc->sc_beacon_wait = 1; 1944 1945 if ((error = iwn_run(sc)) != 0) { 1946 aprint_error_dev(sc->sc_dev, 1947 "could not move to run state\n"); 1948 return error; 1949 } 1950 break; 1951 1952 case IEEE80211_S_INIT: 1953 sc->sc_flags &= ~IWN_FLAG_SCANNING; 1954 sc->calib.state = IWN_CALIB_STATE_INIT; 1955 /* 1956 * Purge the xmit queue so we don't have old frames 1957 * during a new association attempt. 1958 */ 1959 sc->sc_beacon_wait = 0; 1960 ifp->if_flags &= ~IFF_OACTIVE; 1961 iwn_start(ifp); 1962 break; 1963 } 1964 1965 return sc->sc_newstate(ic, nstate, arg); 1966 } 1967 1968 static void 1969 iwn_iter_func(void *arg, struct ieee80211_node *ni) 1970 { 1971 struct iwn_softc *sc = arg; 1972 struct iwn_node *wn = (struct iwn_node *)ni; 1973 1974 ieee80211_amrr_choose(&sc->amrr, ni, &wn->amn); 1975 } 1976 1977 static void 1978 iwn_calib_timeout(void *arg) 1979 { 1980 struct iwn_softc *sc = arg; 1981 struct ieee80211com *ic = &sc->sc_ic; 1982 int s; 1983 1984 s = splnet(); 1985 if (ic->ic_fixed_rate == -1) { 1986 if (ic->ic_opmode == IEEE80211_M_STA) 1987 iwn_iter_func(sc, ic->ic_bss); 1988 else 1989 ieee80211_iterate_nodes(&ic->ic_sta, iwn_iter_func, sc); 1990 } 1991 /* Force automatic TX power calibration every 60 secs. */ 1992 if (++sc->calib_cnt >= 120) { 1993 uint32_t flags = 0; 1994 1995 DPRINTF(("sending request for statistics\n")); 1996 (void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, 1997 sizeof flags, 1); 1998 sc->calib_cnt = 0; 1999 } 2000 splx(s); 2001 2002 /* Automatic rate control triggered every 500ms. */ 2003 callout_schedule(&sc->calib_to, mstohz(500)); 2004 } 2005 2006 /* 2007 * Process an RX_PHY firmware notification. This is usually immediately 2008 * followed by an MPDU_RX_DONE notification. 2009 */ 2010 static void 2011 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2012 struct iwn_rx_data *data) 2013 { 2014 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1); 2015 2016 DPRINTFN(2, ("received PHY stats\n")); 2017 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2018 sizeof (*stat), BUS_DMASYNC_POSTREAD); 2019 2020 /* Save RX statistics, they will be used on MPDU_RX_DONE. */ 2021 memcpy(&sc->last_rx_stat, stat, sizeof (*stat)); 2022 sc->last_rx_valid = 1; 2023 } 2024 2025 /* 2026 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification. 2027 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one. 2028 */ 2029 static void 2030 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2031 struct iwn_rx_data *data) 2032 { 2033 struct iwn_ops *ops = &sc->ops; 2034 struct ieee80211com *ic = &sc->sc_ic; 2035 struct ifnet *ifp = ic->ic_ifp; 2036 struct iwn_rx_ring *ring = &sc->rxq; 2037 struct ieee80211_frame *wh; 2038 struct ieee80211_node *ni; 2039 struct mbuf *m, *m1; 2040 struct iwn_rx_stat *stat; 2041 char *head; 2042 uint32_t flags; 2043 int error, len, rssi, s; 2044 2045 if (desc->type == IWN_MPDU_RX_DONE) { 2046 /* Check for prior RX_PHY notification. */ 2047 if (!sc->last_rx_valid) { 2048 DPRINTF(("missing RX_PHY\n")); 2049 return; 2050 } 2051 sc->last_rx_valid = 0; 2052 stat = &sc->last_rx_stat; 2053 } else 2054 stat = (struct iwn_rx_stat *)(desc + 1); 2055 2056 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWN_RBUF_SIZE, 2057 BUS_DMASYNC_POSTREAD); 2058 2059 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) { 2060 aprint_error_dev(sc->sc_dev, 2061 "invalid RX statistic header\n"); 2062 return; 2063 } 2064 if (desc->type == IWN_MPDU_RX_DONE) { 2065 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1); 2066 head = (char *)(mpdu + 1); 2067 len = le16toh(mpdu->len); 2068 } else { 2069 head = (char *)(stat + 1) + stat->cfg_phy_len; 2070 len = le16toh(stat->len); 2071 } 2072 2073 flags = le32toh(*(uint32_t *)(head + len)); 2074 2075 /* Discard frames with a bad FCS early. */ 2076 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) { 2077 DPRINTFN(2, ("RX flags error %x\n", flags)); 2078 ifp->if_ierrors++; 2079 return; 2080 } 2081 /* Discard frames that are too short. */ 2082 if (len < sizeof (*wh)) { 2083 DPRINTF(("frame too short: %d\n", len)); 2084 ic->ic_stats.is_rx_tooshort++; 2085 ifp->if_ierrors++; 2086 return; 2087 } 2088 2089 m1 = MCLGETIalt(sc, M_DONTWAIT, NULL, IWN_RBUF_SIZE); 2090 if (m1 == NULL) { 2091 ic->ic_stats.is_rx_nobuf++; 2092 ifp->if_ierrors++; 2093 return; 2094 } 2095 bus_dmamap_unload(sc->sc_dmat, data->map); 2096 2097 error = bus_dmamap_load(sc->sc_dmat, data->map, mtod(m1, void *), 2098 IWN_RBUF_SIZE, NULL, BUS_DMA_NOWAIT | BUS_DMA_READ); 2099 if (error != 0) { 2100 m_freem(m1); 2101 2102 /* Try to reload the old mbuf. */ 2103 error = bus_dmamap_load(sc->sc_dmat, data->map, 2104 mtod(data->m, void *), IWN_RBUF_SIZE, NULL, 2105 BUS_DMA_NOWAIT | BUS_DMA_READ); 2106 if (error != 0) { 2107 panic("%s: could not load old RX mbuf", 2108 device_xname(sc->sc_dev)); 2109 } 2110 /* Physical address may have changed. */ 2111 ring->desc[ring->cur] = 2112 htole32(data->map->dm_segs[0].ds_addr >> 8); 2113 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 2114 ring->cur * sizeof (uint32_t), sizeof (uint32_t), 2115 BUS_DMASYNC_PREWRITE); 2116 ifp->if_ierrors++; 2117 return; 2118 } 2119 2120 m = data->m; 2121 data->m = m1; 2122 /* Update RX descriptor. */ 2123 ring->desc[ring->cur] = htole32(data->map->dm_segs[0].ds_addr >> 8); 2124 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 2125 ring->cur * sizeof (uint32_t), sizeof (uint32_t), 2126 BUS_DMASYNC_PREWRITE); 2127 2128 /* Finalize mbuf. */ 2129 m_set_rcvif(m, ifp); 2130 m->m_data = head; 2131 m->m_pkthdr.len = m->m_len = len; 2132 2133 s = splnet(); 2134 2135 /* Grab a reference to the source node. */ 2136 wh = mtod(m, struct ieee80211_frame *); 2137 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 2138 2139 /* XXX OpenBSD adds decryption here (see also comments in iwn_tx). */ 2140 /* NetBSD does decryption in ieee80211_input. */ 2141 2142 rssi = ops->get_rssi(stat); 2143 2144 /* XXX Added for NetBSD: scans never stop without it */ 2145 if (ic->ic_state == IEEE80211_S_SCAN) 2146 iwn_fix_channel(ic, m, stat); 2147 2148 if (sc->sc_drvbpf != NULL) { 2149 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap; 2150 2151 tap->wr_flags = 0; 2152 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE)) 2153 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2154 tap->wr_chan_freq = 2155 htole16(ic->ic_channels[stat->chan].ic_freq); 2156 tap->wr_chan_flags = 2157 htole16(ic->ic_channels[stat->chan].ic_flags); 2158 tap->wr_dbm_antsignal = (int8_t)rssi; 2159 tap->wr_dbm_antnoise = (int8_t)sc->noise; 2160 tap->wr_tsft = stat->tstamp; 2161 switch (stat->rate) { 2162 /* CCK rates. */ 2163 case 10: tap->wr_rate = 2; break; 2164 case 20: tap->wr_rate = 4; break; 2165 case 55: tap->wr_rate = 11; break; 2166 case 110: tap->wr_rate = 22; break; 2167 /* OFDM rates. */ 2168 case 0xd: tap->wr_rate = 12; break; 2169 case 0xf: tap->wr_rate = 18; break; 2170 case 0x5: tap->wr_rate = 24; break; 2171 case 0x7: tap->wr_rate = 36; break; 2172 case 0x9: tap->wr_rate = 48; break; 2173 case 0xb: tap->wr_rate = 72; break; 2174 case 0x1: tap->wr_rate = 96; break; 2175 case 0x3: tap->wr_rate = 108; break; 2176 /* Unknown rate: should not happen. */ 2177 default: tap->wr_rate = 0; 2178 } 2179 2180 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m, BPF_D_IN); 2181 } 2182 2183 /* 2184 * If it's a beacon and we're waiting, then do the wakeup. 2185 */ 2186 if (sc->sc_beacon_wait) { 2187 uint8_t type, subtype; 2188 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2189 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2190 /* 2191 * This assumes at this point we've received our own 2192 * beacon. 2193 */ 2194 if (type == IEEE80211_FC0_TYPE_MGT && 2195 subtype == IEEE80211_FC0_SUBTYPE_BEACON) { 2196 sc->sc_beacon_wait = 0; 2197 ifp->if_flags &= ~IFF_OACTIVE; 2198 iwn_start(ifp); 2199 } 2200 } 2201 2202 /* Send the frame to the 802.11 layer. */ 2203 ieee80211_input(ic, m, ni, rssi, 0); 2204 2205 /* Node is no longer needed. */ 2206 ieee80211_free_node(ni); 2207 2208 splx(s); 2209 } 2210 2211 #ifndef IEEE80211_NO_HT 2212 /* Process an incoming Compressed BlockAck. */ 2213 static void 2214 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2215 struct iwn_rx_data *data) 2216 { 2217 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1); 2218 struct iwn_tx_ring *txq; 2219 2220 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), sizeof (*ba), 2221 BUS_DMASYNC_POSTREAD); 2222 2223 txq = &sc->txq[le16toh(ba->qid)]; 2224 /* XXX TBD */ 2225 } 2226 #endif 2227 2228 /* 2229 * Process a CALIBRATION_RESULT notification sent by the initialization 2230 * firmware on response to a CMD_CALIB_CONFIG command (5000 only). 2231 */ 2232 static void 2233 iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2234 struct iwn_rx_data *data) 2235 { 2236 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1); 2237 int len, idx = -1; 2238 2239 /* Runtime firmware should not send such a notification. */ 2240 if (sc->sc_flags & IWN_FLAG_CALIB_DONE) 2241 return; 2242 2243 len = (le32toh(desc->len) & 0x3fff) - 4; 2244 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), len, 2245 BUS_DMASYNC_POSTREAD); 2246 2247 switch (calib->code) { 2248 case IWN5000_PHY_CALIB_DC: 2249 if (sc->hw_type == IWN_HW_REV_TYPE_5150 || 2250 sc->hw_type == IWN_HW_REV_TYPE_2030 || 2251 sc->hw_type == IWN_HW_REV_TYPE_2000 || 2252 sc->hw_type == IWN_HW_REV_TYPE_135 || 2253 sc->hw_type == IWN_HW_REV_TYPE_105) 2254 idx = 0; 2255 break; 2256 case IWN5000_PHY_CALIB_LO: 2257 idx = 1; 2258 break; 2259 case IWN5000_PHY_CALIB_TX_IQ: 2260 idx = 2; 2261 break; 2262 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC: 2263 if (sc->hw_type < IWN_HW_REV_TYPE_6000 && 2264 sc->hw_type != IWN_HW_REV_TYPE_5150) 2265 idx = 3; 2266 break; 2267 case IWN5000_PHY_CALIB_BASE_BAND: 2268 idx = 4; 2269 break; 2270 } 2271 if (idx == -1) /* Ignore other results. */ 2272 return; 2273 2274 /* Save calibration result. */ 2275 if (sc->calibcmd[idx].buf != NULL) 2276 free(sc->calibcmd[idx].buf, M_DEVBUF); 2277 sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT); 2278 if (sc->calibcmd[idx].buf == NULL) { 2279 DPRINTF(("not enough memory for calibration result %d\n", 2280 calib->code)); 2281 return; 2282 } 2283 DPRINTF(("saving calibration result code=%d len=%d\n", 2284 calib->code, len)); 2285 sc->calibcmd[idx].len = len; 2286 memcpy(sc->calibcmd[idx].buf, calib, len); 2287 } 2288 2289 /* 2290 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification. 2291 * The latter is sent by the firmware after each received beacon. 2292 */ 2293 static void 2294 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2295 struct iwn_rx_data *data) 2296 { 2297 struct iwn_ops *ops = &sc->ops; 2298 struct ieee80211com *ic = &sc->sc_ic; 2299 struct iwn_calib_state *calib = &sc->calib; 2300 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1); 2301 int temp; 2302 2303 /* Ignore statistics received during a scan. */ 2304 if (ic->ic_state != IEEE80211_S_RUN) 2305 return; 2306 2307 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2308 sizeof (*stats), BUS_DMASYNC_POSTREAD); 2309 2310 DPRINTFN(3, ("received statistics (cmd=%d)\n", desc->type)); 2311 sc->calib_cnt = 0; /* Reset TX power calibration timeout. */ 2312 2313 /* Test if temperature has changed. */ 2314 if (stats->general.temp != sc->rawtemp) { 2315 /* Convert "raw" temperature to degC. */ 2316 sc->rawtemp = stats->general.temp; 2317 temp = ops->get_temperature(sc); 2318 DPRINTFN(2, ("temperature=%dC\n", temp)); 2319 2320 /* Update TX power if need be (4965AGN only). */ 2321 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 2322 iwn4965_power_calibration(sc, temp); 2323 } 2324 2325 if (desc->type != IWN_BEACON_STATISTICS) 2326 return; /* Reply to a statistics request. */ 2327 2328 sc->noise = iwn_get_noise(&stats->rx.general); 2329 2330 /* Test that RSSI and noise are present in stats report. */ 2331 if (le32toh(stats->rx.general.flags) != 1) { 2332 DPRINTF(("received statistics without RSSI\n")); 2333 return; 2334 } 2335 2336 /* 2337 * XXX Differential gain calibration makes the 6005 firmware 2338 * crap out, so skip it for now. This effectively disables 2339 * sensitivity tuning as well. 2340 */ 2341 if (sc->hw_type == IWN_HW_REV_TYPE_6005) 2342 return; 2343 2344 if (calib->state == IWN_CALIB_STATE_ASSOC) 2345 iwn_collect_noise(sc, &stats->rx.general); 2346 else if (calib->state == IWN_CALIB_STATE_RUN) 2347 iwn_tune_sensitivity(sc, &stats->rx); 2348 } 2349 2350 /* 2351 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN 2352 * and 5000 adapters have different incompatible TX status formats. 2353 */ 2354 static void 2355 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2356 struct iwn_rx_data *data) 2357 { 2358 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1); 2359 2360 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2361 sizeof (*stat), BUS_DMASYNC_POSTREAD); 2362 iwn_tx_done(sc, desc, stat->ackfailcnt, le32toh(stat->status) & 0xff); 2363 } 2364 2365 static void 2366 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2367 struct iwn_rx_data *data) 2368 { 2369 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1); 2370 2371 #ifdef notyet 2372 /* Reset TX scheduler slot. */ 2373 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx); 2374 #endif 2375 2376 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2377 sizeof (*stat), BUS_DMASYNC_POSTREAD); 2378 iwn_tx_done(sc, desc, stat->ackfailcnt, le16toh(stat->status) & 0xff); 2379 } 2380 2381 /* 2382 * Adapter-independent backend for TX_DONE firmware notifications. 2383 */ 2384 static void 2385 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt, 2386 uint8_t status) 2387 { 2388 struct ieee80211com *ic = &sc->sc_ic; 2389 struct ifnet *ifp = ic->ic_ifp; 2390 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf]; 2391 struct iwn_tx_data *data = &ring->data[desc->idx]; 2392 struct iwn_node *wn = (struct iwn_node *)data->ni; 2393 int s; 2394 2395 s = splnet(); 2396 2397 /* Update rate control statistics. */ 2398 wn->amn.amn_txcnt++; 2399 if (ackfailcnt > 0) 2400 wn->amn.amn_retrycnt++; 2401 2402 if (status != 1 && status != 2) 2403 ifp->if_oerrors++; 2404 else 2405 ifp->if_opackets++; 2406 2407 /* Unmap and free mbuf. */ 2408 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize, 2409 BUS_DMASYNC_POSTWRITE); 2410 bus_dmamap_unload(sc->sc_dmat, data->map); 2411 m_freem(data->m); 2412 data->m = NULL; 2413 ieee80211_free_node(data->ni); 2414 data->ni = NULL; 2415 2416 sc->sc_tx_timer = 0; 2417 if (--ring->queued < IWN_TX_RING_LOMARK) { 2418 sc->qfullmsk &= ~(1 << ring->qid); 2419 if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) { 2420 ifp->if_flags &= ~IFF_OACTIVE; 2421 iwn_start(ifp); 2422 } 2423 } 2424 2425 splx(s); 2426 } 2427 2428 /* 2429 * Process a "command done" firmware notification. This is where we wakeup 2430 * processes waiting for a synchronous command completion. 2431 */ 2432 static void 2433 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc) 2434 { 2435 struct iwn_tx_ring *ring = &sc->txq[4]; 2436 struct iwn_tx_data *data; 2437 2438 if ((desc->qid & 0xf) != 4) 2439 return; /* Not a command ack. */ 2440 2441 data = &ring->data[desc->idx]; 2442 2443 /* If the command was mapped in an mbuf, free it. */ 2444 if (data->m != NULL) { 2445 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 2446 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2447 bus_dmamap_unload(sc->sc_dmat, data->map); 2448 m_freem(data->m); 2449 data->m = NULL; 2450 } 2451 wakeup(&ring->desc[desc->idx]); 2452 } 2453 2454 /* 2455 * Process an INT_FH_RX or INT_SW_RX interrupt. 2456 */ 2457 static void 2458 iwn_notif_intr(struct iwn_softc *sc) 2459 { 2460 struct iwn_ops *ops = &sc->ops; 2461 struct ieee80211com *ic = &sc->sc_ic; 2462 struct ifnet *ifp = ic->ic_ifp; 2463 uint16_t hw; 2464 int s; 2465 2466 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map, 2467 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD); 2468 2469 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff; 2470 while (sc->rxq.cur != hw) { 2471 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 2472 struct iwn_rx_desc *desc; 2473 2474 bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof (*desc), 2475 BUS_DMASYNC_POSTREAD); 2476 desc = mtod(data->m, struct iwn_rx_desc *); 2477 2478 DPRINTFN(4, ("notification qid=%d idx=%d flags=%x type=%d\n", 2479 desc->qid & 0xf, desc->idx, desc->flags, desc->type)); 2480 2481 if (!(desc->qid & 0x80)) /* Reply to a command. */ 2482 iwn_cmd_done(sc, desc); 2483 2484 switch (desc->type) { 2485 case IWN_RX_PHY: 2486 iwn_rx_phy(sc, desc, data); 2487 break; 2488 2489 case IWN_RX_DONE: /* 4965AGN only. */ 2490 case IWN_MPDU_RX_DONE: 2491 /* An 802.11 frame has been received. */ 2492 iwn_rx_done(sc, desc, data); 2493 break; 2494 #ifndef IEEE80211_NO_HT 2495 case IWN_RX_COMPRESSED_BA: 2496 /* A Compressed BlockAck has been received. */ 2497 iwn_rx_compressed_ba(sc, desc, data); 2498 break; 2499 #endif 2500 case IWN_TX_DONE: 2501 /* An 802.11 frame has been transmitted. */ 2502 ops->tx_done(sc, desc, data); 2503 break; 2504 2505 case IWN_RX_STATISTICS: 2506 case IWN_BEACON_STATISTICS: 2507 iwn_rx_statistics(sc, desc, data); 2508 break; 2509 2510 case IWN_BEACON_MISSED: 2511 { 2512 struct iwn_beacon_missed *miss = 2513 (struct iwn_beacon_missed *)(desc + 1); 2514 2515 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2516 sizeof (*miss), BUS_DMASYNC_POSTREAD); 2517 /* 2518 * If more than 5 consecutive beacons are missed, 2519 * reinitialize the sensitivity state machine. 2520 */ 2521 DPRINTF(("beacons missed %d/%d\n", 2522 le32toh(miss->consecutive), le32toh(miss->total))); 2523 if (ic->ic_state == IEEE80211_S_RUN && 2524 le32toh(miss->consecutive) > 5) 2525 (void)iwn_init_sensitivity(sc); 2526 break; 2527 } 2528 case IWN_UC_READY: 2529 { 2530 struct iwn_ucode_info *uc = 2531 (struct iwn_ucode_info *)(desc + 1); 2532 2533 /* The microcontroller is ready. */ 2534 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2535 sizeof (*uc), BUS_DMASYNC_POSTREAD); 2536 DPRINTF(("microcode alive notification version=%d.%d " 2537 "subtype=%x alive=%x\n", uc->major, uc->minor, 2538 uc->subtype, le32toh(uc->valid))); 2539 2540 if (le32toh(uc->valid) != 1) { 2541 aprint_error_dev(sc->sc_dev, 2542 "microcontroller initialization " 2543 "failed\n"); 2544 break; 2545 } 2546 if (uc->subtype == IWN_UCODE_INIT) { 2547 /* Save microcontroller report. */ 2548 memcpy(&sc->ucode_info, uc, sizeof (*uc)); 2549 } 2550 /* Save the address of the error log in SRAM. */ 2551 sc->errptr = le32toh(uc->errptr); 2552 break; 2553 } 2554 case IWN_STATE_CHANGED: 2555 { 2556 uint32_t *status = (uint32_t *)(desc + 1); 2557 2558 /* Enabled/disabled notification. */ 2559 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2560 sizeof (*status), BUS_DMASYNC_POSTREAD); 2561 DPRINTF(("state changed to %x\n", le32toh(*status))); 2562 2563 if (le32toh(*status) & 1) { 2564 /* The radio button has to be pushed. */ 2565 aprint_error_dev(sc->sc_dev, 2566 "Radio transmitter is off\n"); 2567 /* Turn the interface down. */ 2568 s = splnet(); 2569 ifp->if_flags &= ~IFF_UP; 2570 iwn_stop(ifp, 1); 2571 splx(s); 2572 return; /* No further processing. */ 2573 } 2574 break; 2575 } 2576 case IWN_START_SCAN: 2577 { 2578 struct iwn_start_scan *scan = 2579 (struct iwn_start_scan *)(desc + 1); 2580 2581 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2582 sizeof (*scan), BUS_DMASYNC_POSTREAD); 2583 DPRINTFN(2, ("scanning channel %d status %x\n", 2584 scan->chan, le32toh(scan->status))); 2585 2586 /* Fix current channel. */ 2587 ic->ic_bss->ni_chan = &ic->ic_channels[scan->chan]; 2588 break; 2589 } 2590 case IWN_STOP_SCAN: 2591 { 2592 struct iwn_stop_scan *scan = 2593 (struct iwn_stop_scan *)(desc + 1); 2594 2595 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2596 sizeof (*scan), BUS_DMASYNC_POSTREAD); 2597 DPRINTF(("scan finished nchan=%d status=%d chan=%d\n", 2598 scan->nchan, scan->status, scan->chan)); 2599 2600 if (scan->status == 1 && scan->chan <= 14 && 2601 (sc->sc_flags & IWN_FLAG_HAS_5GHZ)) { 2602 /* 2603 * We just finished scanning 2GHz channels, 2604 * start scanning 5GHz ones. 2605 */ 2606 sc->sc_flags &= ~IWN_FLAG_SCANNING_2GHZ; 2607 sc->sc_flags |= IWN_FLAG_SCANNING_5GHZ; 2608 if (iwn_scan(sc, IEEE80211_CHAN_5GHZ) == 0) 2609 break; 2610 } 2611 sc->sc_flags &= ~IWN_FLAG_SCANNING; 2612 ieee80211_end_scan(ic); 2613 break; 2614 } 2615 case IWN5000_CALIBRATION_RESULT: 2616 iwn5000_rx_calib_results(sc, desc, data); 2617 break; 2618 2619 case IWN5000_CALIBRATION_DONE: 2620 sc->sc_flags |= IWN_FLAG_CALIB_DONE; 2621 wakeup(sc); 2622 break; 2623 } 2624 2625 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT; 2626 } 2627 2628 /* Tell the firmware what we have processed. */ 2629 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1; 2630 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7); 2631 } 2632 2633 /* 2634 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 2635 * from power-down sleep mode. 2636 */ 2637 static void 2638 iwn_wakeup_intr(struct iwn_softc *sc) 2639 { 2640 int qid; 2641 2642 DPRINTF(("ucode wakeup from power-down sleep\n")); 2643 2644 /* Wakeup RX and TX rings. */ 2645 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7); 2646 for (qid = 0; qid < sc->ntxqs; qid++) { 2647 struct iwn_tx_ring *ring = &sc->txq[qid]; 2648 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur); 2649 } 2650 } 2651 2652 /* 2653 * Dump the error log of the firmware when a firmware panic occurs. Although 2654 * we can't debug the firmware because it is neither open source nor free, it 2655 * can help us to identify certain classes of problems. 2656 */ 2657 static void 2658 iwn_fatal_intr(struct iwn_softc *sc) 2659 { 2660 struct iwn_fw_dump dump; 2661 int i; 2662 2663 /* Force a complete recalibration on next init. */ 2664 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE; 2665 2666 /* Check that the error log address is valid. */ 2667 if (sc->errptr < IWN_FW_DATA_BASE || 2668 sc->errptr + sizeof (dump) > 2669 IWN_FW_DATA_BASE + sc->fw_data_maxsz) { 2670 aprint_error_dev(sc->sc_dev, 2671 "bad firmware error log address 0x%08x\n", sc->errptr); 2672 return; 2673 } 2674 if (iwn_nic_lock(sc) != 0) { 2675 aprint_error_dev(sc->sc_dev, 2676 "could not read firmware error log\n"); 2677 return; 2678 } 2679 /* Read firmware error log from SRAM. */ 2680 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump, 2681 sizeof (dump) / sizeof (uint32_t)); 2682 iwn_nic_unlock(sc); 2683 2684 if (dump.valid == 0) { 2685 aprint_error_dev(sc->sc_dev, 2686 "firmware error log is empty\n"); 2687 return; 2688 } 2689 aprint_error("firmware error log:\n"); 2690 aprint_error(" error type = \"%s\" (0x%08X)\n", 2691 (dump.id < __arraycount(iwn_fw_errmsg)) ? 2692 iwn_fw_errmsg[dump.id] : "UNKNOWN", 2693 dump.id); 2694 aprint_error(" program counter = 0x%08X\n", dump.pc); 2695 aprint_error(" source line = 0x%08X\n", dump.src_line); 2696 aprint_error(" error data = 0x%08X%08X\n", 2697 dump.error_data[0], dump.error_data[1]); 2698 aprint_error(" branch link = 0x%08X%08X\n", 2699 dump.branch_link[0], dump.branch_link[1]); 2700 aprint_error(" interrupt link = 0x%08X%08X\n", 2701 dump.interrupt_link[0], dump.interrupt_link[1]); 2702 aprint_error(" time = %u\n", dump.time[0]); 2703 2704 /* Dump driver status (TX and RX rings) while we're here. */ 2705 aprint_error("driver status:\n"); 2706 for (i = 0; i < sc->ntxqs; i++) { 2707 struct iwn_tx_ring *ring = &sc->txq[i]; 2708 aprint_error(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 2709 i, ring->qid, ring->cur, ring->queued); 2710 } 2711 aprint_error(" rx ring: cur=%d\n", sc->rxq.cur); 2712 aprint_error(" 802.11 state %d\n", sc->sc_ic.ic_state); 2713 } 2714 2715 static int 2716 iwn_intr(void *arg) 2717 { 2718 struct iwn_softc *sc = arg; 2719 2720 /* Disable interrupts. */ 2721 IWN_WRITE(sc, IWN_INT_MASK, 0); 2722 2723 softint_schedule(sc->sc_soft_ih); 2724 return 1; 2725 } 2726 2727 static void 2728 iwn_softintr(void *arg) 2729 { 2730 struct iwn_softc *sc = arg; 2731 struct ifnet *ifp = sc->sc_ic.ic_ifp; 2732 uint32_t r1, r2, tmp; 2733 int s; 2734 2735 /* Read interrupts from ICT (fast) or from registers (slow). */ 2736 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 2737 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map, 0, 2738 IWN_ICT_SIZE, BUS_DMASYNC_POSTREAD); 2739 tmp = 0; 2740 while (sc->ict[sc->ict_cur] != 0) { 2741 tmp |= sc->ict[sc->ict_cur]; 2742 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */ 2743 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT; 2744 } 2745 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map, 0, 2746 IWN_ICT_SIZE, BUS_DMASYNC_PREWRITE); 2747 tmp = le32toh(tmp); 2748 if (tmp == 0xffffffff) /* Shouldn't happen. */ 2749 tmp = 0; 2750 else if (tmp & 0xc0000) /* Workaround a HW bug. */ 2751 tmp |= 0x8000; 2752 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff); 2753 r2 = 0; /* Unused. */ 2754 } else { 2755 r1 = IWN_READ(sc, IWN_INT); 2756 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 2757 return; /* Hardware gone! */ 2758 r2 = IWN_READ(sc, IWN_FH_INT); 2759 } 2760 if (r1 == 0 && r2 == 0) { 2761 goto out; /* Interrupt not for us. */ 2762 } 2763 2764 /* Acknowledge interrupts. */ 2765 IWN_WRITE(sc, IWN_INT, r1); 2766 if (!(sc->sc_flags & IWN_FLAG_USE_ICT)) 2767 IWN_WRITE(sc, IWN_FH_INT, r2); 2768 2769 if (r1 & IWN_INT_RF_TOGGLED) { 2770 tmp = IWN_READ(sc, IWN_GP_CNTRL); 2771 aprint_error_dev(sc->sc_dev, 2772 "RF switch: radio %s\n", 2773 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled"); 2774 } 2775 if (r1 & IWN_INT_CT_REACHED) { 2776 aprint_error_dev(sc->sc_dev, 2777 "critical temperature reached!\n"); 2778 } 2779 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) { 2780 aprint_error_dev(sc->sc_dev, 2781 "fatal firmware error\n"); 2782 /* Dump firmware error log and stop. */ 2783 iwn_fatal_intr(sc); 2784 s = splnet(); 2785 ifp->if_flags &= ~IFF_UP; 2786 iwn_stop(ifp, 1); 2787 splx(s); 2788 return; 2789 } 2790 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) || 2791 (r2 & IWN_FH_INT_RX)) { 2792 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 2793 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) 2794 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX); 2795 IWN_WRITE_1(sc, IWN_INT_PERIODIC, IWN_INT_PERIODIC_DIS); 2796 iwn_notif_intr(sc); 2797 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) { 2798 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 2799 IWN_INT_PERIODIC_ENA); 2800 } 2801 } else 2802 iwn_notif_intr(sc); 2803 } 2804 2805 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) { 2806 if (sc->sc_flags & IWN_FLAG_USE_ICT) 2807 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX); 2808 wakeup(sc); /* FH DMA transfer completed. */ 2809 } 2810 2811 if (r1 & IWN_INT_ALIVE) 2812 wakeup(sc); /* Firmware is alive. */ 2813 2814 if (r1 & IWN_INT_WAKEUP) 2815 iwn_wakeup_intr(sc); 2816 2817 out: 2818 /* Re-enable interrupts. */ 2819 if (ifp->if_flags & IFF_UP) 2820 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 2821 } 2822 2823 /* 2824 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and 2825 * 5000 adapters use a slightly different format). 2826 */ 2827 static void 2828 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 2829 uint16_t len) 2830 { 2831 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx]; 2832 2833 *w = htole16(len + 8); 2834 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2835 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr, 2836 sizeof (uint16_t), 2837 BUS_DMASYNC_PREWRITE); 2838 if (idx < IWN_SCHED_WINSZ) { 2839 *(w + IWN_TX_RING_COUNT) = *w; 2840 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2841 (char *)(void *)(w + IWN_TX_RING_COUNT) - 2842 (char *)(void *)sc->sched_dma.vaddr, 2843 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 2844 } 2845 } 2846 2847 static void 2848 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 2849 uint16_t len) 2850 { 2851 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 2852 2853 *w = htole16(id << 12 | (len + 8)); 2854 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2855 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr, 2856 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 2857 if (idx < IWN_SCHED_WINSZ) { 2858 *(w + IWN_TX_RING_COUNT) = *w; 2859 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2860 (char *)(void *)(w + IWN_TX_RING_COUNT) - 2861 (char *)(void *)sc->sched_dma.vaddr, 2862 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 2863 } 2864 } 2865 2866 #ifdef notyet 2867 static void 2868 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx) 2869 { 2870 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 2871 2872 *w = (*w & htole16(0xf000)) | htole16(1); 2873 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2874 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr, 2875 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 2876 if (idx < IWN_SCHED_WINSZ) { 2877 *(w + IWN_TX_RING_COUNT) = *w; 2878 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 2879 (char *)(void *)(w + IWN_TX_RING_COUNT) - 2880 (char *)(void *)sc->sched_dma.vaddr, 2881 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 2882 } 2883 } 2884 #endif 2885 2886 static int 2887 iwn_tx(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac) 2888 { 2889 struct ieee80211com *ic = &sc->sc_ic; 2890 struct iwn_node *wn = (void *)ni; 2891 struct iwn_tx_ring *ring; 2892 struct iwn_tx_desc *desc; 2893 struct iwn_tx_data *data; 2894 struct iwn_tx_cmd *cmd; 2895 struct iwn_cmd_data *tx; 2896 const struct iwn_rate *rinfo; 2897 struct ieee80211_frame *wh; 2898 struct ieee80211_key *k = NULL; 2899 struct mbuf *m1; 2900 uint32_t flags; 2901 u_int hdrlen; 2902 bus_dma_segment_t *seg; 2903 uint8_t tid, ridx, txant, type; 2904 int i, totlen, error, pad; 2905 2906 const struct chanAccParams *cap; 2907 int noack; 2908 int hdrlen2; 2909 2910 wh = mtod(m, struct ieee80211_frame *); 2911 hdrlen = ieee80211_anyhdrsize(wh); 2912 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2913 2914 hdrlen2 = (ieee80211_has_qos(wh)) ? 2915 sizeof (struct ieee80211_qosframe) : 2916 sizeof (struct ieee80211_frame); 2917 2918 if (hdrlen != hdrlen2) 2919 aprint_error_dev(sc->sc_dev, "hdrlen error (%d != %d)\n", 2920 hdrlen, hdrlen2); 2921 2922 /* XXX OpenBSD sets a different tid when using QOS */ 2923 tid = 0; 2924 if (ieee80211_has_qos(wh)) { 2925 cap = &ic->ic_wme.wme_chanParams; 2926 noack = cap->cap_wmeParams[ac].wmep_noackPolicy; 2927 } 2928 else 2929 noack = 0; 2930 2931 ring = &sc->txq[ac]; 2932 desc = &ring->desc[ring->cur]; 2933 data = &ring->data[ring->cur]; 2934 2935 /* Choose a TX rate index. */ 2936 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 2937 type != IEEE80211_FC0_TYPE_DATA) { 2938 ridx = (ic->ic_curmode == IEEE80211_MODE_11A) ? 2939 IWN_RIDX_OFDM6 : IWN_RIDX_CCK1; 2940 } else if (ic->ic_fixed_rate != -1) { 2941 ridx = sc->fixed_ridx; 2942 } else 2943 ridx = wn->ridx[ni->ni_txrate]; 2944 rinfo = &iwn_rates[ridx]; 2945 2946 /* Encrypt the frame if need be. */ 2947 /* 2948 * XXX For now, NetBSD swaps the encryption and bpf sections 2949 * in order to match old code and other drivers. Tests with 2950 * tcpdump indicates that the order is irrelevant, however, 2951 * as bpf produces unencrypted data for both ordering choices. 2952 */ 2953 if (wh->i_fc[1] & IEEE80211_FC1_WEP) { 2954 k = ieee80211_crypto_encap(ic, ni, m); 2955 if (k == NULL) { 2956 m_freem(m); 2957 return ENOBUFS; 2958 } 2959 /* Packet header may have moved, reset our local pointer. */ 2960 wh = mtod(m, struct ieee80211_frame *); 2961 } 2962 totlen = m->m_pkthdr.len; 2963 2964 if (sc->sc_drvbpf != NULL) { 2965 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 2966 2967 tap->wt_flags = 0; 2968 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq); 2969 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags); 2970 tap->wt_rate = rinfo->rate; 2971 tap->wt_hwqueue = ac; 2972 if (wh->i_fc[1] & IEEE80211_FC1_WEP) 2973 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2974 2975 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m, BPF_D_OUT); 2976 } 2977 2978 /* Prepare TX firmware command. */ 2979 cmd = &ring->cmd[ring->cur]; 2980 cmd->code = IWN_CMD_TX_DATA; 2981 cmd->flags = 0; 2982 cmd->qid = ring->qid; 2983 cmd->idx = ring->cur; 2984 2985 tx = (struct iwn_cmd_data *)cmd->data; 2986 /* NB: No need to clear tx, all fields are reinitialized here. */ 2987 tx->scratch = 0; /* clear "scratch" area */ 2988 2989 flags = 0; 2990 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2991 /* Unicast frame, check if an ACK is expected. */ 2992 if (!noack) 2993 flags |= IWN_TX_NEED_ACK; 2994 } 2995 2996 #ifdef notyet 2997 /* XXX NetBSD does not define IEEE80211_FC0_SUBTYPE_BAR */ 2998 if ((wh->i_fc[0] & 2999 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == 3000 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR)) 3001 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */ 3002 #endif 3003 3004 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 3005 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */ 3006 3007 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 3008 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3009 /* NB: Group frames are sent using CCK in 802.11b/g. */ 3010 if (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold) { 3011 flags |= IWN_TX_NEED_RTS; 3012 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 3013 ridx >= IWN_RIDX_OFDM6) { 3014 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 3015 flags |= IWN_TX_NEED_CTS; 3016 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 3017 flags |= IWN_TX_NEED_RTS; 3018 } 3019 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) { 3020 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 3021 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 3022 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS); 3023 flags |= IWN_TX_NEED_PROTECTION; 3024 } else 3025 flags |= IWN_TX_FULL_TXOP; 3026 } 3027 } 3028 3029 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 3030 type != IEEE80211_FC0_TYPE_DATA) 3031 tx->id = sc->broadcast_id; 3032 else 3033 tx->id = wn->id; 3034 3035 if (type == IEEE80211_FC0_TYPE_MGT) { 3036 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3037 3038 #ifndef IEEE80211_STA_ONLY 3039 /* Tell HW to set timestamp in probe responses. */ 3040 /* XXX NetBSD rev 1.11 added probe requests here but */ 3041 /* probe requests do not take timestamps (from Bergamini). */ 3042 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 3043 flags |= IWN_TX_INSERT_TSTAMP; 3044 #endif 3045 /* XXX NetBSD rev 1.11 and 1.20 added AUTH/DAUTH and RTS/CTS */ 3046 /* changes here. These are not needed (from Bergamini). */ 3047 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 3048 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 3049 tx->timeout = htole16(3); 3050 else 3051 tx->timeout = htole16(2); 3052 } else 3053 tx->timeout = htole16(0); 3054 3055 if (hdrlen & 3) { 3056 /* First segment length must be a multiple of 4. */ 3057 flags |= IWN_TX_NEED_PADDING; 3058 pad = 4 - (hdrlen & 3); 3059 } else 3060 pad = 0; 3061 3062 tx->len = htole16(totlen); 3063 tx->tid = tid; 3064 tx->rts_ntries = 60; 3065 tx->data_ntries = 15; 3066 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 3067 tx->plcp = rinfo->plcp; 3068 tx->rflags = rinfo->flags; 3069 if (tx->id == sc->broadcast_id) { 3070 /* Group or management frame. */ 3071 tx->linkq = 0; 3072 /* XXX Alternate between antenna A and B? */ 3073 txant = IWN_LSB(sc->txchainmask); 3074 tx->rflags |= IWN_RFLAG_ANT(txant); 3075 } else { 3076 tx->linkq = ni->ni_rates.rs_nrates - ni->ni_txrate - 1; 3077 flags |= IWN_TX_LINKQ; /* enable MRR */ 3078 } 3079 /* Set physical address of "scratch area". */ 3080 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 3081 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 3082 3083 /* Copy 802.11 header in TX command. */ 3084 /* XXX NetBSD changed this in rev 1.20 */ 3085 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen); 3086 3087 /* Trim 802.11 header. */ 3088 m_adj(m, hdrlen); 3089 tx->security = 0; 3090 tx->flags = htole32(flags); 3091 3092 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m, 3093 BUS_DMA_NOWAIT | BUS_DMA_WRITE); 3094 if (error != 0) { 3095 if (error != EFBIG) { 3096 aprint_error_dev(sc->sc_dev, 3097 "can't map mbuf (error %d)\n", error); 3098 m_freem(m); 3099 return error; 3100 } 3101 /* Too many DMA segments, linearize mbuf. */ 3102 MGETHDR(m1, M_DONTWAIT, MT_DATA); 3103 if (m1 == NULL) { 3104 m_freem(m); 3105 return ENOBUFS; 3106 } 3107 if (m->m_pkthdr.len > MHLEN) { 3108 MCLGET(m1, M_DONTWAIT); 3109 if (!(m1->m_flags & M_EXT)) { 3110 m_freem(m); 3111 m_freem(m1); 3112 return ENOBUFS; 3113 } 3114 } 3115 m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *)); 3116 m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len; 3117 m_freem(m); 3118 m = m1; 3119 3120 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m, 3121 BUS_DMA_NOWAIT | BUS_DMA_WRITE); 3122 if (error != 0) { 3123 aprint_error_dev(sc->sc_dev, 3124 "can't map mbuf (error %d)\n", error); 3125 m_freem(m); 3126 return error; 3127 } 3128 } 3129 3130 data->m = m; 3131 data->ni = ni; 3132 3133 DPRINTFN(4, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n", 3134 ring->qid, ring->cur, m->m_pkthdr.len, data->map->dm_nsegs)); 3135 3136 /* Fill TX descriptor. */ 3137 desc->nsegs = 1 + data->map->dm_nsegs; 3138 /* First DMA segment is used by the TX command. */ 3139 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 3140 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 3141 (4 + sizeof (*tx) + hdrlen + pad) << 4); 3142 /* Other DMA segments are for data payload. */ 3143 seg = data->map->dm_segs; 3144 for (i = 1; i <= data->map->dm_nsegs; i++) { 3145 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); 3146 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | 3147 seg->ds_len << 4); 3148 seg++; 3149 } 3150 3151 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize, 3152 BUS_DMASYNC_PREWRITE); 3153 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map, 3154 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr, 3155 sizeof (*cmd), BUS_DMASYNC_PREWRITE); 3156 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 3157 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr, 3158 sizeof (*desc), BUS_DMASYNC_PREWRITE); 3159 3160 #ifdef notyet 3161 /* Update TX scheduler. */ 3162 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 3163 #endif 3164 3165 /* Kick TX ring. */ 3166 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3167 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3168 3169 /* Mark TX ring as full if we reach a certain threshold. */ 3170 if (++ring->queued > IWN_TX_RING_HIMARK) 3171 sc->qfullmsk |= 1 << ring->qid; 3172 3173 return 0; 3174 } 3175 3176 static void 3177 iwn_start(struct ifnet *ifp) 3178 { 3179 struct iwn_softc *sc = ifp->if_softc; 3180 struct ieee80211com *ic = &sc->sc_ic; 3181 struct ieee80211_node *ni; 3182 struct ether_header *eh; 3183 struct mbuf *m; 3184 int ac; 3185 3186 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 3187 return; 3188 3189 for (;;) { 3190 if (sc->sc_beacon_wait == 1) { 3191 ifp->if_flags |= IFF_OACTIVE; 3192 break; 3193 } 3194 3195 if (sc->qfullmsk != 0) { 3196 ifp->if_flags |= IFF_OACTIVE; 3197 break; 3198 } 3199 /* Send pending management frames first. */ 3200 IF_DEQUEUE(&ic->ic_mgtq, m); 3201 if (m != NULL) { 3202 ni = M_GETCTX(m, struct ieee80211_node *); 3203 ac = 0; 3204 goto sendit; 3205 } 3206 if (ic->ic_state != IEEE80211_S_RUN) 3207 break; 3208 3209 /* Encapsulate and send data frames. */ 3210 IFQ_DEQUEUE(&ifp->if_snd, m); 3211 if (m == NULL) 3212 break; 3213 if (m->m_len < sizeof (*eh) && 3214 (m = m_pullup(m, sizeof (*eh))) == NULL) { 3215 ifp->if_oerrors++; 3216 continue; 3217 } 3218 eh = mtod(m, struct ether_header *); 3219 ni = ieee80211_find_txnode(ic, eh->ether_dhost); 3220 if (ni == NULL) { 3221 m_freem(m); 3222 ifp->if_oerrors++; 3223 continue; 3224 } 3225 /* classify mbuf so we can find which tx ring to use */ 3226 if (ieee80211_classify(ic, m, ni) != 0) { 3227 m_freem(m); 3228 ieee80211_free_node(ni); 3229 ifp->if_oerrors++; 3230 continue; 3231 } 3232 3233 /* No QoS encapsulation for EAPOL frames. */ 3234 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ? 3235 M_WME_GETAC(m) : WME_AC_BE; 3236 3237 if (sc->sc_beacon_wait == 0) 3238 bpf_mtap(ifp, m, BPF_D_OUT); 3239 3240 if ((m = ieee80211_encap(ic, m, ni)) == NULL) { 3241 ieee80211_free_node(ni); 3242 ifp->if_oerrors++; 3243 continue; 3244 } 3245 sendit: 3246 if (sc->sc_beacon_wait) 3247 continue; 3248 3249 bpf_mtap3(ic->ic_rawbpf, m, BPF_D_OUT); 3250 3251 if (iwn_tx(sc, m, ni, ac) != 0) { 3252 ieee80211_free_node(ni); 3253 ifp->if_oerrors++; 3254 continue; 3255 } 3256 3257 sc->sc_tx_timer = 5; 3258 ifp->if_timer = 1; 3259 } 3260 3261 if (sc->sc_beacon_wait > 1) 3262 sc->sc_beacon_wait = 0; 3263 } 3264 3265 static void 3266 iwn_watchdog(struct ifnet *ifp) 3267 { 3268 struct iwn_softc *sc = ifp->if_softc; 3269 3270 ifp->if_timer = 0; 3271 3272 if (sc->sc_tx_timer > 0) { 3273 if (--sc->sc_tx_timer == 0) { 3274 aprint_error_dev(sc->sc_dev, 3275 "device timeout\n"); 3276 ifp->if_flags &= ~IFF_UP; 3277 iwn_stop(ifp, 1); 3278 ifp->if_oerrors++; 3279 return; 3280 } 3281 ifp->if_timer = 1; 3282 } 3283 3284 ieee80211_watchdog(&sc->sc_ic); 3285 } 3286 3287 static int 3288 iwn_ioctl(struct ifnet *ifp, u_long cmd, void *data) 3289 { 3290 struct iwn_softc *sc = ifp->if_softc; 3291 struct ieee80211com *ic = &sc->sc_ic; 3292 const struct sockaddr *sa; 3293 int s, error = 0; 3294 3295 s = splnet(); 3296 3297 switch (cmd) { 3298 case SIOCSIFADDR: 3299 ifp->if_flags |= IFF_UP; 3300 /* FALLTHROUGH */ 3301 case SIOCSIFFLAGS: 3302 /* XXX Added as it is in every NetBSD driver */ 3303 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 3304 break; 3305 if (ifp->if_flags & IFF_UP) { 3306 if (!(ifp->if_flags & IFF_RUNNING)) 3307 error = iwn_init(ifp); 3308 } else { 3309 if (ifp->if_flags & IFF_RUNNING) 3310 iwn_stop(ifp, 1); 3311 } 3312 break; 3313 3314 case SIOCADDMULTI: 3315 case SIOCDELMULTI: 3316 sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data); 3317 error = (cmd == SIOCADDMULTI) ? 3318 ether_addmulti(sa, &sc->sc_ec) : 3319 ether_delmulti(sa, &sc->sc_ec); 3320 3321 if (error == ENETRESET) 3322 error = 0; 3323 break; 3324 3325 default: 3326 error = ieee80211_ioctl(ic, cmd, data); 3327 } 3328 3329 if (error == ENETRESET) { 3330 error = 0; 3331 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 3332 (IFF_UP | IFF_RUNNING)) { 3333 iwn_stop(ifp, 0); 3334 error = iwn_init(ifp); 3335 } 3336 } 3337 3338 splx(s); 3339 return error; 3340 } 3341 3342 /* 3343 * Send a command to the firmware. 3344 */ 3345 static int 3346 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async) 3347 { 3348 struct iwn_tx_ring *ring = &sc->txq[4]; 3349 struct iwn_tx_desc *desc; 3350 struct iwn_tx_data *data; 3351 struct iwn_tx_cmd *cmd; 3352 struct mbuf *m; 3353 bus_addr_t paddr; 3354 int totlen, error; 3355 3356 desc = &ring->desc[ring->cur]; 3357 data = &ring->data[ring->cur]; 3358 totlen = 4 + size; 3359 3360 if (size > sizeof cmd->data) { 3361 /* Command is too large to fit in a descriptor. */ 3362 if (totlen > MCLBYTES) 3363 return EINVAL; 3364 MGETHDR(m, M_DONTWAIT, MT_DATA); 3365 if (m == NULL) 3366 return ENOMEM; 3367 if (totlen > MHLEN) { 3368 MCLGET(m, M_DONTWAIT); 3369 if (!(m->m_flags & M_EXT)) { 3370 m_freem(m); 3371 return ENOMEM; 3372 } 3373 } 3374 cmd = mtod(m, struct iwn_tx_cmd *); 3375 error = bus_dmamap_load(sc->sc_dmat, data->map, cmd, totlen, 3376 NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE); 3377 if (error != 0) { 3378 m_freem(m); 3379 return error; 3380 } 3381 data->m = m; 3382 paddr = data->map->dm_segs[0].ds_addr; 3383 } else { 3384 cmd = &ring->cmd[ring->cur]; 3385 paddr = data->cmd_paddr; 3386 } 3387 3388 cmd->code = code; 3389 cmd->flags = 0; 3390 cmd->qid = ring->qid; 3391 cmd->idx = ring->cur; 3392 /* 3393 * Coverity:[OUT_OF_BOUNDS] 3394 * false positive since, allocated in mbuf if it does not fit 3395 */ 3396 memcpy(cmd->data, buf, size); 3397 3398 desc->nsegs = 1; 3399 desc->segs[0].addr = htole32(IWN_LOADDR(paddr)); 3400 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4); 3401 3402 if (size > sizeof cmd->data) { 3403 bus_dmamap_sync(sc->sc_dmat, data->map, 0, totlen, 3404 BUS_DMASYNC_PREWRITE); 3405 } else { 3406 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map, 3407 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr, 3408 totlen, BUS_DMASYNC_PREWRITE); 3409 } 3410 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 3411 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr, 3412 sizeof (*desc), BUS_DMASYNC_PREWRITE); 3413 3414 #ifdef notyet 3415 /* Update TX scheduler. */ 3416 ops->update_sched(sc, ring->qid, ring->cur, 0, 0); 3417 #endif 3418 DPRINTFN(4, ("iwn_cmd %d size=%d %s\n", code, size, async ? " (async)" : "")); 3419 3420 /* Kick command ring. */ 3421 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3422 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3423 3424 return async ? 0 : tsleep(desc, PCATCH, "iwncmd", hz); 3425 } 3426 3427 static int 3428 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 3429 { 3430 struct iwn4965_node_info hnode; 3431 char *src, *dst; 3432 3433 /* 3434 * We use the node structure for 5000 Series internally (it is 3435 * a superset of the one for 4965AGN). We thus copy the common 3436 * fields before sending the command. 3437 */ 3438 src = (char *)node; 3439 dst = (char *)&hnode; 3440 memcpy(dst, src, 48); 3441 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */ 3442 memcpy(dst + 48, src + 72, 20); 3443 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async); 3444 } 3445 3446 static int 3447 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 3448 { 3449 /* Direct mapping. */ 3450 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async); 3451 } 3452 3453 static int 3454 iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni) 3455 { 3456 struct iwn_node *wn = (void *)ni; 3457 struct ieee80211_rateset *rs = &ni->ni_rates; 3458 struct iwn_cmd_link_quality linkq; 3459 const struct iwn_rate *rinfo; 3460 uint8_t txant; 3461 int i, txrate; 3462 3463 /* Use the first valid TX antenna. */ 3464 txant = IWN_LSB(sc->txchainmask); 3465 3466 memset(&linkq, 0, sizeof linkq); 3467 linkq.id = wn->id; 3468 linkq.antmsk_1stream = txant; 3469 linkq.antmsk_2stream = IWN_ANT_AB; 3470 linkq.ampdu_max = 31; 3471 linkq.ampdu_threshold = 3; 3472 linkq.ampdu_limit = htole16(4000); /* 4ms */ 3473 3474 /* Start at highest available bit-rate. */ 3475 txrate = rs->rs_nrates - 1; 3476 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) { 3477 rinfo = &iwn_rates[wn->ridx[txrate]]; 3478 linkq.retry[i].plcp = rinfo->plcp; 3479 linkq.retry[i].rflags = rinfo->flags; 3480 linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant); 3481 /* Next retry at immediate lower bit-rate. */ 3482 if (txrate > 0) 3483 txrate--; 3484 } 3485 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1); 3486 } 3487 3488 /* 3489 * Broadcast node is used to send group-addressed and management frames. 3490 */ 3491 static int 3492 iwn_add_broadcast_node(struct iwn_softc *sc, int async) 3493 { 3494 struct iwn_ops *ops = &sc->ops; 3495 struct iwn_node_info node; 3496 struct iwn_cmd_link_quality linkq; 3497 const struct iwn_rate *rinfo; 3498 uint8_t txant; 3499 int i, error; 3500 3501 memset(&node, 0, sizeof node); 3502 IEEE80211_ADDR_COPY(node.macaddr, etherbroadcastaddr); 3503 node.id = sc->broadcast_id; 3504 DPRINTF(("adding broadcast node\n")); 3505 if ((error = ops->add_node(sc, &node, async)) != 0) 3506 return error; 3507 3508 /* Use the first valid TX antenna. */ 3509 txant = IWN_LSB(sc->txchainmask); 3510 3511 memset(&linkq, 0, sizeof linkq); 3512 linkq.id = sc->broadcast_id; 3513 linkq.antmsk_1stream = txant; 3514 linkq.antmsk_2stream = IWN_ANT_AB; 3515 linkq.ampdu_max = 64; 3516 linkq.ampdu_threshold = 3; 3517 linkq.ampdu_limit = htole16(4000); /* 4ms */ 3518 3519 /* Use lowest mandatory bit-rate. */ 3520 rinfo = (sc->sc_ic.ic_curmode != IEEE80211_MODE_11A) ? 3521 &iwn_rates[IWN_RIDX_CCK1] : &iwn_rates[IWN_RIDX_OFDM6]; 3522 linkq.retry[0].plcp = rinfo->plcp; 3523 linkq.retry[0].rflags = rinfo->flags; 3524 linkq.retry[0].rflags |= IWN_RFLAG_ANT(txant); 3525 /* Use same bit-rate for all TX retries. */ 3526 for (i = 1; i < IWN_MAX_TX_RETRIES; i++) { 3527 linkq.retry[i].plcp = linkq.retry[0].plcp; 3528 linkq.retry[i].rflags = linkq.retry[0].rflags; 3529 } 3530 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async); 3531 } 3532 3533 static void 3534 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on) 3535 { 3536 struct iwn_cmd_led led; 3537 3538 /* Clear microcode LED ownership. */ 3539 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL); 3540 3541 led.which = which; 3542 led.unit = htole32(10000); /* on/off in unit of 100ms */ 3543 led.off = off; 3544 led.on = on; 3545 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1); 3546 } 3547 3548 /* 3549 * Set the critical temperature at which the firmware will stop the radio 3550 * and notify us. 3551 */ 3552 static int 3553 iwn_set_critical_temp(struct iwn_softc *sc) 3554 { 3555 struct iwn_critical_temp crit; 3556 int32_t temp; 3557 3558 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF); 3559 3560 if (sc->hw_type == IWN_HW_REV_TYPE_5150) 3561 temp = (IWN_CTOK(110) - sc->temp_off) * -5; 3562 else if (sc->hw_type == IWN_HW_REV_TYPE_4965) 3563 temp = IWN_CTOK(110); 3564 else 3565 temp = 110; 3566 memset(&crit, 0, sizeof crit); 3567 crit.tempR = htole32(temp); 3568 DPRINTF(("setting critical temperature to %d\n", temp)); 3569 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0); 3570 } 3571 3572 static int 3573 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni) 3574 { 3575 struct iwn_cmd_timing cmd; 3576 uint64_t val, mod; 3577 3578 memset(&cmd, 0, sizeof cmd); 3579 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 3580 cmd.bintval = htole16(ni->ni_intval); 3581 cmd.lintval = htole16(10); 3582 3583 /* Compute remaining time until next beacon. */ 3584 val = (uint64_t)ni->ni_intval * 1024; /* msecs -> usecs */ 3585 mod = le64toh(cmd.tstamp) % val; 3586 cmd.binitval = htole32((uint32_t)(val - mod)); 3587 3588 DPRINTF(("timing bintval=%u, tstamp=%" PRIu64 ", init=%" PRIu32 "\n", 3589 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod))); 3590 3591 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1); 3592 } 3593 3594 static void 3595 iwn4965_power_calibration(struct iwn_softc *sc, int temp) 3596 { 3597 /* Adjust TX power if need be (delta >= 3 degC). */ 3598 DPRINTF(("temperature %d->%d\n", sc->temp, temp)); 3599 if (abs(temp - sc->temp) >= 3) { 3600 /* Record temperature of last calibration. */ 3601 sc->temp = temp; 3602 (void)iwn4965_set_txpower(sc, 1); 3603 } 3604 } 3605 3606 /* 3607 * Set TX power for current channel (each rate has its own power settings). 3608 * This function takes into account the regulatory information from EEPROM, 3609 * the current temperature and the current voltage. 3610 */ 3611 static int 3612 iwn4965_set_txpower(struct iwn_softc *sc, int async) 3613 { 3614 /* Fixed-point arithmetic division using a n-bit fractional part. */ 3615 #define fdivround(a, b, n) \ 3616 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 3617 /* Linear interpolation. */ 3618 #define interpolate(x, x1, y1, x2, y2, n) \ 3619 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 3620 3621 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 }; 3622 struct ieee80211com *ic = &sc->sc_ic; 3623 struct iwn_ucode_info *uc = &sc->ucode_info; 3624 struct ieee80211_channel *ch; 3625 struct iwn4965_cmd_txpower cmd; 3626 struct iwn4965_eeprom_chan_samples *chans; 3627 const uint8_t *rf_gain, *dsp_gain; 3628 int32_t vdiff, tdiff; 3629 int i, c, grp, maxpwr; 3630 uint8_t chan; 3631 3632 /* Retrieve current channel from last RXON. */ 3633 chan = sc->rxon.chan; 3634 DPRINTF(("setting TX power for channel %d\n", chan)); 3635 ch = &ic->ic_channels[chan]; 3636 3637 memset(&cmd, 0, sizeof cmd); 3638 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1; 3639 cmd.chan = chan; 3640 3641 if (IEEE80211_IS_CHAN_5GHZ(ch)) { 3642 maxpwr = sc->maxpwr5GHz; 3643 rf_gain = iwn4965_rf_gain_5ghz; 3644 dsp_gain = iwn4965_dsp_gain_5ghz; 3645 } else { 3646 maxpwr = sc->maxpwr2GHz; 3647 rf_gain = iwn4965_rf_gain_2ghz; 3648 dsp_gain = iwn4965_dsp_gain_2ghz; 3649 } 3650 3651 /* Compute voltage compensation. */ 3652 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7; 3653 if (vdiff > 0) 3654 vdiff *= 2; 3655 if (abs(vdiff) > 2) 3656 vdiff = 0; 3657 DPRINTF(("voltage compensation=%d (UCODE=%d, EEPROM=%d)\n", 3658 vdiff, le32toh(uc->volt), sc->eeprom_voltage)); 3659 3660 /* Get channel attenuation group. */ 3661 if (chan <= 20) /* 1-20 */ 3662 grp = 4; 3663 else if (chan <= 43) /* 34-43 */ 3664 grp = 0; 3665 else if (chan <= 70) /* 44-70 */ 3666 grp = 1; 3667 else if (chan <= 124) /* 71-124 */ 3668 grp = 2; 3669 else /* 125-200 */ 3670 grp = 3; 3671 DPRINTF(("chan %d, attenuation group=%d\n", chan, grp)); 3672 3673 /* Get channel sub-band. */ 3674 for (i = 0; i < IWN_NBANDS; i++) 3675 if (sc->bands[i].lo != 0 && 3676 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi) 3677 break; 3678 if (i == IWN_NBANDS) /* Can't happen in real-life. */ 3679 return EINVAL; 3680 chans = sc->bands[i].chans; 3681 DPRINTF(("chan %d sub-band=%d\n", chan, i)); 3682 3683 for (c = 0; c < 2; c++) { 3684 uint8_t power, gain, temp; 3685 int maxchpwr, pwr, ridx, idx; 3686 3687 power = interpolate(chan, 3688 chans[0].num, chans[0].samples[c][1].power, 3689 chans[1].num, chans[1].samples[c][1].power, 1); 3690 gain = interpolate(chan, 3691 chans[0].num, chans[0].samples[c][1].gain, 3692 chans[1].num, chans[1].samples[c][1].gain, 1); 3693 temp = interpolate(chan, 3694 chans[0].num, chans[0].samples[c][1].temp, 3695 chans[1].num, chans[1].samples[c][1].temp, 1); 3696 DPRINTF(("TX chain %d: power=%d gain=%d temp=%d\n", 3697 c, power, gain, temp)); 3698 3699 /* Compute temperature compensation. */ 3700 tdiff = ((sc->temp - temp) * 2) / tdiv[grp]; 3701 DPRINTF(("temperature compensation=%d (current=%d, " 3702 "EEPROM=%d)\n", tdiff, sc->temp, temp)); 3703 3704 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) { 3705 /* Convert dBm to half-dBm. */ 3706 maxchpwr = sc->maxpwr[chan] * 2; 3707 if ((ridx / 8) & 1) 3708 maxchpwr -= 6; /* MIMO 2T: -3dB */ 3709 3710 pwr = maxpwr; 3711 3712 /* Adjust TX power based on rate. */ 3713 if ((ridx % 8) == 5) 3714 pwr -= 15; /* OFDM48: -7.5dB */ 3715 else if ((ridx % 8) == 6) 3716 pwr -= 17; /* OFDM54: -8.5dB */ 3717 else if ((ridx % 8) == 7) 3718 pwr -= 20; /* OFDM60: -10dB */ 3719 else 3720 pwr -= 10; /* Others: -5dB */ 3721 3722 /* Do not exceed channel max TX power. */ 3723 if (pwr > maxchpwr) 3724 pwr = maxchpwr; 3725 3726 idx = gain - (pwr - power) - tdiff - vdiff; 3727 if ((ridx / 8) & 1) /* MIMO */ 3728 idx += (int32_t)le32toh(uc->atten[grp][c]); 3729 3730 if (cmd.band == 0) 3731 idx += 9; /* 5GHz */ 3732 if (ridx == IWN_RIDX_MAX) 3733 idx += 5; /* CCK */ 3734 3735 /* Make sure idx stays in a valid range. */ 3736 if (idx < 0) 3737 idx = 0; 3738 else if (idx > IWN4965_MAX_PWR_INDEX) 3739 idx = IWN4965_MAX_PWR_INDEX; 3740 3741 DPRINTF(("TX chain %d, rate idx %d: power=%d\n", 3742 c, ridx, idx)); 3743 cmd.power[ridx].rf_gain[c] = rf_gain[idx]; 3744 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx]; 3745 } 3746 } 3747 3748 DPRINTF(("setting TX power for chan %d\n", chan)); 3749 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async); 3750 3751 #undef interpolate 3752 #undef fdivround 3753 } 3754 3755 static int 3756 iwn5000_set_txpower(struct iwn_softc *sc, int async) 3757 { 3758 struct iwn5000_cmd_txpower cmd; 3759 int cmdid; 3760 3761 /* 3762 * TX power calibration is handled automatically by the firmware 3763 * for 5000 Series. 3764 */ 3765 memset(&cmd, 0, sizeof cmd); 3766 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */ 3767 cmd.flags = IWN5000_TXPOWER_NO_CLOSED; 3768 cmd.srv_limit = IWN5000_TXPOWER_AUTO; 3769 DPRINTF(("setting TX power\n")); 3770 if (IWN_UCODE_API(sc->ucode_rev) == 1) 3771 cmdid = IWN_CMD_TXPOWER_DBM_V1; 3772 else 3773 cmdid = IWN_CMD_TXPOWER_DBM; 3774 return iwn_cmd(sc, cmdid, &cmd, sizeof cmd, async); 3775 } 3776 3777 /* 3778 * Retrieve the maximum RSSI (in dBm) among receivers. 3779 */ 3780 static int 3781 iwn4965_get_rssi(const struct iwn_rx_stat *stat) 3782 { 3783 const struct iwn4965_rx_phystat *phy = (const void *)stat->phybuf; 3784 uint8_t mask, agc; 3785 int rssi; 3786 3787 mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC; 3788 agc = (le16toh(phy->agc) >> 7) & 0x7f; 3789 3790 rssi = 0; 3791 if (mask & IWN_ANT_A) 3792 rssi = MAX(rssi, phy->rssi[0]); 3793 if (mask & IWN_ANT_B) 3794 rssi = MAX(rssi, phy->rssi[2]); 3795 if (mask & IWN_ANT_C) 3796 rssi = MAX(rssi, phy->rssi[4]); 3797 3798 return rssi - agc - IWN_RSSI_TO_DBM; 3799 } 3800 3801 static int 3802 iwn5000_get_rssi(const struct iwn_rx_stat *stat) 3803 { 3804 const struct iwn5000_rx_phystat *phy = (const void *)stat->phybuf; 3805 uint8_t agc; 3806 int rssi; 3807 3808 agc = (le32toh(phy->agc) >> 9) & 0x7f; 3809 3810 rssi = MAX(le16toh(phy->rssi[0]) & 0xff, 3811 le16toh(phy->rssi[1]) & 0xff); 3812 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi); 3813 3814 return rssi - agc - IWN_RSSI_TO_DBM; 3815 } 3816 3817 /* 3818 * Retrieve the average noise (in dBm) among receivers. 3819 */ 3820 static int 3821 iwn_get_noise(const struct iwn_rx_general_stats *stats) 3822 { 3823 int i, total, nbant, noise; 3824 3825 total = nbant = 0; 3826 for (i = 0; i < 3; i++) { 3827 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0) 3828 continue; 3829 total += noise; 3830 nbant++; 3831 } 3832 /* There should be at least one antenna but check anyway. */ 3833 return (nbant == 0) ? -127 : (total / nbant) - 107; 3834 } 3835 3836 /* 3837 * Compute temperature (in degC) from last received statistics. 3838 */ 3839 static int 3840 iwn4965_get_temperature(struct iwn_softc *sc) 3841 { 3842 struct iwn_ucode_info *uc = &sc->ucode_info; 3843 int32_t r1, r2, r3, r4, temp; 3844 3845 r1 = le32toh(uc->temp[0].chan20MHz); 3846 r2 = le32toh(uc->temp[1].chan20MHz); 3847 r3 = le32toh(uc->temp[2].chan20MHz); 3848 r4 = le32toh(sc->rawtemp); 3849 3850 if (r1 == r3) /* Prevents division by 0 (should not happen). */ 3851 return 0; 3852 3853 /* Sign-extend 23-bit R4 value to 32-bit. */ 3854 r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000; 3855 /* Compute temperature in Kelvin. */ 3856 temp = (259 * (r4 - r2)) / (r3 - r1); 3857 temp = (temp * 97) / 100 + 8; 3858 3859 DPRINTF(("temperature %dK/%dC\n", temp, IWN_KTOC(temp))); 3860 return IWN_KTOC(temp); 3861 } 3862 3863 static int 3864 iwn5000_get_temperature(struct iwn_softc *sc) 3865 { 3866 int32_t temp; 3867 3868 /* 3869 * Temperature is not used by the driver for 5000 Series because 3870 * TX power calibration is handled by firmware. We export it to 3871 * users through the sensor framework though. 3872 */ 3873 temp = le32toh(sc->rawtemp); 3874 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 3875 temp = (temp / -5) + sc->temp_off; 3876 temp = IWN_KTOC(temp); 3877 } 3878 return temp; 3879 } 3880 3881 /* 3882 * Initialize sensitivity calibration state machine. 3883 */ 3884 static int 3885 iwn_init_sensitivity(struct iwn_softc *sc) 3886 { 3887 struct iwn_ops *ops = &sc->ops; 3888 struct iwn_calib_state *calib = &sc->calib; 3889 uint32_t flags; 3890 int error; 3891 3892 /* Reset calibration state machine. */ 3893 memset(calib, 0, sizeof (*calib)); 3894 calib->state = IWN_CALIB_STATE_INIT; 3895 calib->cck_state = IWN_CCK_STATE_HIFA; 3896 /* Set initial correlation values. */ 3897 calib->ofdm_x1 = sc->limits->min_ofdm_x1; 3898 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1; 3899 calib->ofdm_x4 = sc->limits->min_ofdm_x4; 3900 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4; 3901 calib->cck_x4 = 125; 3902 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4; 3903 calib->energy_cck = sc->limits->energy_cck; 3904 3905 /* Write initial sensitivity. */ 3906 if ((error = iwn_send_sensitivity(sc)) != 0) 3907 return error; 3908 3909 /* Write initial gains. */ 3910 if ((error = ops->init_gains(sc)) != 0) 3911 return error; 3912 3913 /* Request statistics at each beacon interval. */ 3914 flags = 0; 3915 DPRINTF(("sending request for statistics\n")); 3916 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1); 3917 } 3918 3919 /* 3920 * Collect noise and RSSI statistics for the first 20 beacons received 3921 * after association and use them to determine connected antennas and 3922 * to set differential gains. 3923 */ 3924 static void 3925 iwn_collect_noise(struct iwn_softc *sc, 3926 const struct iwn_rx_general_stats *stats) 3927 { 3928 struct iwn_ops *ops = &sc->ops; 3929 struct iwn_calib_state *calib = &sc->calib; 3930 uint32_t val; 3931 int i; 3932 3933 /* Accumulate RSSI and noise for all 3 antennas. */ 3934 for (i = 0; i < 3; i++) { 3935 calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff; 3936 calib->noise[i] += le32toh(stats->noise[i]) & 0xff; 3937 } 3938 /* NB: We update differential gains only once after 20 beacons. */ 3939 if (++calib->nbeacons < 20) 3940 return; 3941 3942 /* Determine highest average RSSI. */ 3943 val = MAX(calib->rssi[0], calib->rssi[1]); 3944 val = MAX(calib->rssi[2], val); 3945 3946 /* Determine which antennas are connected. */ 3947 sc->chainmask = sc->rxchainmask; 3948 for (i = 0; i < 3; i++) 3949 if (val - calib->rssi[i] > 15 * 20) 3950 sc->chainmask &= ~(1 << i); 3951 DPRINTF(("RX chains mask: theoretical=0x%x, actual=0x%x\n", 3952 sc->rxchainmask, sc->chainmask)); 3953 3954 /* If none of the TX antennas are connected, keep at least one. */ 3955 if ((sc->chainmask & sc->txchainmask) == 0) 3956 sc->chainmask |= IWN_LSB(sc->txchainmask); 3957 3958 (void)ops->set_gains(sc); 3959 calib->state = IWN_CALIB_STATE_RUN; 3960 3961 #ifdef notyet 3962 /* XXX Disable RX chains with no antennas connected. */ 3963 sc->rxon.rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask)); 3964 (void)iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1); 3965 #endif 3966 3967 /* Enable power-saving mode if requested by user. */ 3968 if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON) 3969 (void)iwn_set_pslevel(sc, 0, 3, 1); 3970 } 3971 3972 static int 3973 iwn4965_init_gains(struct iwn_softc *sc) 3974 { 3975 struct iwn_phy_calib_gain cmd; 3976 3977 memset(&cmd, 0, sizeof cmd); 3978 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 3979 /* Differential gains initially set to 0 for all 3 antennas. */ 3980 DPRINTF(("setting initial differential gains\n")); 3981 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 3982 } 3983 3984 static int 3985 iwn5000_init_gains(struct iwn_softc *sc) 3986 { 3987 struct iwn_phy_calib cmd; 3988 3989 memset(&cmd, 0, sizeof cmd); 3990 cmd.code = sc->reset_noise_gain; 3991 cmd.ngroups = 1; 3992 cmd.isvalid = 1; 3993 DPRINTF(("setting initial differential gains\n")); 3994 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 3995 } 3996 3997 static int 3998 iwn4965_set_gains(struct iwn_softc *sc) 3999 { 4000 struct iwn_calib_state *calib = &sc->calib; 4001 struct iwn_phy_calib_gain cmd; 4002 int i, delta, noise; 4003 4004 /* Get minimal noise among connected antennas. */ 4005 noise = INT_MAX; /* NB: There's at least one antenna. */ 4006 for (i = 0; i < 3; i++) 4007 if (sc->chainmask & (1 << i)) 4008 noise = MIN(calib->noise[i], noise); 4009 4010 memset(&cmd, 0, sizeof cmd); 4011 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 4012 /* Set differential gains for connected antennas. */ 4013 for (i = 0; i < 3; i++) { 4014 if (sc->chainmask & (1 << i)) { 4015 /* Compute attenuation (in unit of 1.5dB). */ 4016 delta = (noise - (int32_t)calib->noise[i]) / 30; 4017 /* NB: delta <= 0 */ 4018 /* Limit to [-4.5dB,0]. */ 4019 cmd.gain[i] = MIN(abs(delta), 3); 4020 if (delta < 0) 4021 cmd.gain[i] |= 1 << 2; /* sign bit */ 4022 } 4023 } 4024 DPRINTF(("setting differential gains Ant A/B/C: %x/%x/%x (%x)\n", 4025 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask)); 4026 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4027 } 4028 4029 static int 4030 iwn5000_set_gains(struct iwn_softc *sc) 4031 { 4032 struct iwn_calib_state *calib = &sc->calib; 4033 struct iwn_phy_calib_gain cmd; 4034 int i, ant, div, delta; 4035 4036 /* We collected 20 beacons and !=6050 need a 1.5 factor. */ 4037 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30; 4038 4039 memset(&cmd, 0, sizeof cmd); 4040 cmd.code = sc->noise_gain; 4041 cmd.ngroups = 1; 4042 cmd.isvalid = 1; 4043 /* Get first available RX antenna as referential. */ 4044 ant = IWN_LSB(sc->rxchainmask); 4045 /* Set differential gains for other antennas. */ 4046 for (i = ant + 1; i < 3; i++) { 4047 if (sc->chainmask & (1 << i)) { 4048 /* The delta is relative to antenna "ant". */ 4049 delta = ((int32_t)calib->noise[ant] - 4050 (int32_t)calib->noise[i]) / div; 4051 /* Limit to [-4.5dB,+4.5dB]. */ 4052 cmd.gain[i - 1] = MIN(abs(delta), 3); 4053 if (delta < 0) 4054 cmd.gain[i - 1] |= 1 << 2; /* sign bit */ 4055 } 4056 } 4057 DPRINTF(("setting differential gains: %x/%x (%x)\n", 4058 cmd.gain[0], cmd.gain[1], sc->chainmask)); 4059 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4060 } 4061 4062 /* 4063 * Tune RF RX sensitivity based on the number of false alarms detected 4064 * during the last beacon period. 4065 */ 4066 static void 4067 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats) 4068 { 4069 #define inc(val, inc, max) \ 4070 if ((val) < (max)) { \ 4071 if ((val) < (max) - (inc)) \ 4072 (val) += (inc); \ 4073 else \ 4074 (val) = (max); \ 4075 needs_update = 1; \ 4076 } 4077 #define dec(val, dec, min) \ 4078 if ((val) > (min)) { \ 4079 if ((val) > (min) + (dec)) \ 4080 (val) -= (dec); \ 4081 else \ 4082 (val) = (min); \ 4083 needs_update = 1; \ 4084 } 4085 4086 const struct iwn_sensitivity_limits *limits = sc->limits; 4087 struct iwn_calib_state *calib = &sc->calib; 4088 uint32_t val, rxena, fa; 4089 uint32_t energy[3], energy_min; 4090 uint8_t noise[3], noise_ref; 4091 int i, needs_update = 0; 4092 4093 /* Check that we've been enabled long enough. */ 4094 if ((rxena = le32toh(stats->general.load)) == 0) 4095 return; 4096 4097 /* Compute number of false alarms since last call for OFDM. */ 4098 fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm; 4099 fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm; 4100 fa *= 200 * 1024; /* 200TU */ 4101 4102 /* Save counters values for next call. */ 4103 calib->bad_plcp_ofdm = le32toh(stats->ofdm.bad_plcp); 4104 calib->fa_ofdm = le32toh(stats->ofdm.fa); 4105 4106 if (fa > 50 * rxena) { 4107 /* High false alarm count, decrease sensitivity. */ 4108 DPRINTFN(2, ("OFDM high false alarm count: %u\n", fa)); 4109 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1); 4110 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1); 4111 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4); 4112 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4); 4113 4114 } else if (fa < 5 * rxena) { 4115 /* Low false alarm count, increase sensitivity. */ 4116 DPRINTFN(2, ("OFDM low false alarm count: %u\n", fa)); 4117 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1); 4118 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1); 4119 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4); 4120 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4); 4121 } 4122 4123 /* Compute maximum noise among 3 receivers. */ 4124 for (i = 0; i < 3; i++) 4125 noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff; 4126 val = MAX(noise[0], noise[1]); 4127 val = MAX(noise[2], val); 4128 /* Insert it into our samples table. */ 4129 calib->noise_samples[calib->cur_noise_sample] = val; 4130 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20; 4131 4132 /* Compute maximum noise among last 20 samples. */ 4133 noise_ref = calib->noise_samples[0]; 4134 for (i = 1; i < 20; i++) 4135 noise_ref = MAX(noise_ref, calib->noise_samples[i]); 4136 4137 /* Compute maximum energy among 3 receivers. */ 4138 for (i = 0; i < 3; i++) 4139 energy[i] = le32toh(stats->general.energy[i]); 4140 val = MIN(energy[0], energy[1]); 4141 val = MIN(energy[2], val); 4142 /* Insert it into our samples table. */ 4143 calib->energy_samples[calib->cur_energy_sample] = val; 4144 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10; 4145 4146 /* Compute minimum energy among last 10 samples. */ 4147 energy_min = calib->energy_samples[0]; 4148 for (i = 1; i < 10; i++) 4149 energy_min = MAX(energy_min, calib->energy_samples[i]); 4150 energy_min += 6; 4151 4152 /* Compute number of false alarms since last call for CCK. */ 4153 fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck; 4154 fa += le32toh(stats->cck.fa) - calib->fa_cck; 4155 fa *= 200 * 1024; /* 200TU */ 4156 4157 /* Save counters values for next call. */ 4158 calib->bad_plcp_cck = le32toh(stats->cck.bad_plcp); 4159 calib->fa_cck = le32toh(stats->cck.fa); 4160 4161 if (fa > 50 * rxena) { 4162 /* High false alarm count, decrease sensitivity. */ 4163 DPRINTFN(2, ("CCK high false alarm count: %u\n", fa)); 4164 calib->cck_state = IWN_CCK_STATE_HIFA; 4165 calib->low_fa = 0; 4166 4167 if (calib->cck_x4 > 160) { 4168 calib->noise_ref = noise_ref; 4169 if (calib->energy_cck > 2) 4170 dec(calib->energy_cck, 2, energy_min); 4171 } 4172 if (calib->cck_x4 < 160) { 4173 calib->cck_x4 = 161; 4174 needs_update = 1; 4175 } else 4176 inc(calib->cck_x4, 3, limits->max_cck_x4); 4177 4178 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4); 4179 4180 } else if (fa < 5 * rxena) { 4181 /* Low false alarm count, increase sensitivity. */ 4182 DPRINTFN(2, ("CCK low false alarm count: %u\n", fa)); 4183 calib->cck_state = IWN_CCK_STATE_LOFA; 4184 calib->low_fa++; 4185 4186 if (calib->cck_state != IWN_CCK_STATE_INIT && 4187 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 || 4188 calib->low_fa > 100)) { 4189 inc(calib->energy_cck, 2, limits->min_energy_cck); 4190 dec(calib->cck_x4, 3, limits->min_cck_x4); 4191 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4); 4192 } 4193 } else { 4194 /* Not worth to increase or decrease sensitivity. */ 4195 DPRINTFN(2, ("CCK normal false alarm count: %u\n", fa)); 4196 calib->low_fa = 0; 4197 calib->noise_ref = noise_ref; 4198 4199 if (calib->cck_state == IWN_CCK_STATE_HIFA) { 4200 /* Previous interval had many false alarms. */ 4201 dec(calib->energy_cck, 8, energy_min); 4202 } 4203 calib->cck_state = IWN_CCK_STATE_INIT; 4204 } 4205 4206 if (needs_update) 4207 (void)iwn_send_sensitivity(sc); 4208 #undef dec 4209 #undef inc 4210 } 4211 4212 static int 4213 iwn_send_sensitivity(struct iwn_softc *sc) 4214 { 4215 struct iwn_calib_state *calib = &sc->calib; 4216 struct iwn_enhanced_sensitivity_cmd cmd; 4217 int len; 4218 4219 memset(&cmd, 0, sizeof cmd); 4220 len = sizeof (struct iwn_sensitivity_cmd); 4221 cmd.which = IWN_SENSITIVITY_WORKTBL; 4222 /* OFDM modulation. */ 4223 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1); 4224 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1); 4225 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4); 4226 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4); 4227 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm); 4228 cmd.energy_ofdm_th = htole16(62); 4229 /* CCK modulation. */ 4230 cmd.corr_cck_x4 = htole16(calib->cck_x4); 4231 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4); 4232 cmd.energy_cck = htole16(calib->energy_cck); 4233 /* Barker modulation: use default values. */ 4234 cmd.corr_barker = htole16(190); 4235 cmd.corr_barker_mrc = htole16(sc->limits->barker_mrc); 4236 if (!(sc->sc_flags & IWN_FLAG_ENH_SENS)) 4237 goto send; 4238 /* Enhanced sensitivity settings. */ 4239 len = sizeof (struct iwn_enhanced_sensitivity_cmd); 4240 cmd.ofdm_det_slope_mrc = htole16(668); 4241 cmd.ofdm_det_icept_mrc = htole16(4); 4242 cmd.ofdm_det_slope = htole16(486); 4243 cmd.ofdm_det_icept = htole16(37); 4244 cmd.cck_det_slope_mrc = htole16(853); 4245 cmd.cck_det_icept_mrc = htole16(4); 4246 cmd.cck_det_slope = htole16(476); 4247 cmd.cck_det_icept = htole16(99); 4248 send: 4249 DPRINTFN(2, ("setting sensitivity %d/%d/%d/%d/%d/%d/%d\n", 4250 calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4, 4251 calib->ofdm_mrc_x4, calib->cck_x4, calib->cck_mrc_x4, 4252 calib->energy_cck)); 4253 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1); 4254 } 4255 4256 /* 4257 * Set STA mode power saving level (between 0 and 5). 4258 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 4259 */ 4260 static int 4261 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async) 4262 { 4263 struct iwn_pmgt_cmd cmd; 4264 const struct iwn_pmgt *pmgt; 4265 uint32_t maxp, skip_dtim; 4266 pcireg_t reg; 4267 int i; 4268 4269 /* Select which PS parameters to use. */ 4270 if (dtim <= 2) 4271 pmgt = &iwn_pmgt[0][level]; 4272 else if (dtim <= 10) 4273 pmgt = &iwn_pmgt[1][level]; 4274 else 4275 pmgt = &iwn_pmgt[2][level]; 4276 4277 memset(&cmd, 0, sizeof cmd); 4278 if (level != 0) /* not CAM */ 4279 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP); 4280 if (level == 5) 4281 cmd.flags |= htole16(IWN_PS_FAST_PD); 4282 /* Retrieve PCIe Active State Power Management (ASPM). */ 4283 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 4284 sc->sc_cap_off + PCIE_LCSR); 4285 if (!(reg & PCIE_LCSR_ASPM_L0S)) /* L0s Entry disabled. */ 4286 cmd.flags |= htole16(IWN_PS_PCI_PMGT); 4287 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024); 4288 cmd.txtimeout = htole32(pmgt->txtimeout * 1024); 4289 4290 if (dtim == 0) { 4291 dtim = 1; 4292 skip_dtim = 0; 4293 } else 4294 skip_dtim = pmgt->skip_dtim; 4295 if (skip_dtim != 0) { 4296 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM); 4297 maxp = pmgt->intval[4]; 4298 if (maxp == (uint32_t)-1) 4299 maxp = dtim * (skip_dtim + 1); 4300 else if (maxp > dtim) 4301 maxp = (maxp / dtim) * dtim; 4302 } else 4303 maxp = dtim; 4304 for (i = 0; i < 5; i++) 4305 cmd.intval[i] = htole32(MIN(maxp, pmgt->intval[i])); 4306 4307 DPRINTF(("setting power saving level to %d\n", level)); 4308 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 4309 } 4310 4311 int 4312 iwn5000_runtime_calib(struct iwn_softc *sc) 4313 { 4314 struct iwn5000_calib_config cmd; 4315 4316 memset(&cmd, 0, sizeof cmd); 4317 cmd.ucode.once.enable = 0xffffffff; 4318 cmd.ucode.once.start = IWN5000_CALIB_DC; 4319 DPRINTF(("configuring runtime calibration\n")); 4320 return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0); 4321 } 4322 4323 static int 4324 iwn_config_bt_coex_bluetooth(struct iwn_softc *sc) 4325 { 4326 struct iwn_bluetooth bluetooth; 4327 4328 memset(&bluetooth, 0, sizeof bluetooth); 4329 bluetooth.flags = IWN_BT_COEX_ENABLE; 4330 bluetooth.lead_time = IWN_BT_LEAD_TIME_DEF; 4331 bluetooth.max_kill = IWN_BT_MAX_KILL_DEF; 4332 4333 DPRINTF(("configuring bluetooth coexistence\n")); 4334 return iwn_cmd(sc, IWN_CMD_BT_COEX, &bluetooth, sizeof bluetooth, 0); 4335 } 4336 4337 static int 4338 iwn_config_bt_coex_prio_table(struct iwn_softc *sc) 4339 { 4340 uint8_t prio_table[16]; 4341 4342 memset(&prio_table, 0, sizeof prio_table); 4343 prio_table[ 0] = 6; /* init calibration 1 */ 4344 prio_table[ 1] = 7; /* init calibration 2 */ 4345 prio_table[ 2] = 2; /* periodic calib low 1 */ 4346 prio_table[ 3] = 3; /* periodic calib low 2 */ 4347 prio_table[ 4] = 4; /* periodic calib high 1 */ 4348 prio_table[ 5] = 5; /* periodic calib high 2 */ 4349 prio_table[ 6] = 6; /* dtim */ 4350 prio_table[ 7] = 8; /* scan52 */ 4351 prio_table[ 8] = 10; /* scan24 */ 4352 4353 DPRINTF(("sending priority lookup table\n")); 4354 return iwn_cmd(sc, IWN_CMD_BT_COEX_PRIO_TABLE, 4355 &prio_table, sizeof prio_table, 0); 4356 } 4357 4358 static int 4359 iwn_config_bt_coex_adv_config(struct iwn_softc *sc, struct iwn_bt_basic *basic, 4360 size_t len) 4361 { 4362 struct iwn_btcoex_prot btprot; 4363 int error; 4364 4365 basic->bt.flags = IWN_BT_COEX_ENABLE; 4366 basic->bt.lead_time = IWN_BT_LEAD_TIME_DEF; 4367 basic->bt.max_kill = IWN_BT_MAX_KILL_DEF; 4368 basic->bt.bt3_timer_t7_value = IWN_BT_BT3_T7_DEF; 4369 basic->bt.kill_ack_mask = IWN_BT_KILL_ACK_MASK_DEF; 4370 basic->bt.kill_cts_mask = IWN_BT_KILL_CTS_MASK_DEF; 4371 basic->bt3_prio_sample_time = IWN_BT_BT3_PRIO_SAMPLE_DEF; 4372 basic->bt3_timer_t2_value = IWN_BT_BT3_T2_DEF; 4373 basic->bt3_lookup_table[ 0] = htole32(0xaaaaaaaa); /* Normal */ 4374 basic->bt3_lookup_table[ 1] = htole32(0xaaaaaaaa); 4375 basic->bt3_lookup_table[ 2] = htole32(0xaeaaaaaa); 4376 basic->bt3_lookup_table[ 3] = htole32(0xaaaaaaaa); 4377 basic->bt3_lookup_table[ 4] = htole32(0xcc00ff28); 4378 basic->bt3_lookup_table[ 5] = htole32(0x0000aaaa); 4379 basic->bt3_lookup_table[ 6] = htole32(0xcc00aaaa); 4380 basic->bt3_lookup_table[ 7] = htole32(0x0000aaaa); 4381 basic->bt3_lookup_table[ 8] = htole32(0xc0004000); 4382 basic->bt3_lookup_table[ 9] = htole32(0x00004000); 4383 basic->bt3_lookup_table[10] = htole32(0xf0005000); 4384 basic->bt3_lookup_table[11] = htole32(0xf0005000); 4385 basic->reduce_txpower = 0; /* as not implemented */ 4386 basic->valid = IWN_BT_ALL_VALID_MASK; 4387 4388 DPRINTF(("configuring advanced bluetooth coexistence v1\n")); 4389 error = iwn_cmd(sc, IWN_CMD_BT_COEX, basic, len, 0); 4390 if (error != 0) { 4391 aprint_error_dev(sc->sc_dev, 4392 "could not configure advanced bluetooth coexistence\n"); 4393 return error; 4394 } 4395 4396 error = iwn_config_bt_coex_prio_table(sc); 4397 if (error != 0) { 4398 aprint_error_dev(sc->sc_dev, 4399 "could not configure send BT priority table\n"); 4400 return error; 4401 } 4402 4403 /* Force BT state machine change */ 4404 memset(&btprot, 0, sizeof btprot); 4405 btprot.open = 1; 4406 btprot.type = 1; 4407 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof btprot, 1); 4408 if (error != 0) { 4409 aprint_error_dev(sc->sc_dev, "could not open BT protcol\n"); 4410 return error; 4411 } 4412 4413 btprot.open = 0; 4414 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof btprot, 1); 4415 if (error != 0) { 4416 aprint_error_dev(sc->sc_dev, "could not close BT protcol\n"); 4417 return error; 4418 } 4419 return 0; 4420 } 4421 4422 static int 4423 iwn_config_bt_coex_adv1(struct iwn_softc *sc) 4424 { 4425 struct iwn_bt_adv1 d; 4426 4427 memset(&d, 0, sizeof d); 4428 d.prio_boost = IWN_BT_PRIO_BOOST_DEF; 4429 d.tx_prio_boost = 0; 4430 d.rx_prio_boost = 0; 4431 return iwn_config_bt_coex_adv_config(sc, &d.basic, sizeof d); 4432 } 4433 4434 static int 4435 iwn_config_bt_coex_adv2(struct iwn_softc *sc) 4436 { 4437 struct iwn_bt_adv2 d; 4438 4439 memset(&d, 0, sizeof d); 4440 d.prio_boost = IWN_BT_PRIO_BOOST_DEF; 4441 d.tx_prio_boost = 0; 4442 d.rx_prio_boost = 0; 4443 return iwn_config_bt_coex_adv_config(sc, &d.basic, sizeof d); 4444 } 4445 4446 static int 4447 iwn_config(struct iwn_softc *sc) 4448 { 4449 struct iwn_ops *ops = &sc->ops; 4450 struct ieee80211com *ic = &sc->sc_ic; 4451 struct ifnet *ifp = ic->ic_ifp; 4452 uint32_t txmask; 4453 uint16_t rxchain; 4454 int error; 4455 4456 error = ops->config_bt_coex(sc); 4457 if (error != 0) { 4458 aprint_error_dev(sc->sc_dev, 4459 "could not configure bluetooth coexistence\n"); 4460 return error; 4461 } 4462 4463 /* Set radio temperature sensor offset. */ 4464 if (sc->hw_type == IWN_HW_REV_TYPE_6005) { 4465 error = iwn6000_temp_offset_calib(sc); 4466 if (error != 0) { 4467 aprint_error_dev(sc->sc_dev, 4468 "could not set temperature offset\n"); 4469 return error; 4470 } 4471 } 4472 4473 if (sc->hw_type == IWN_HW_REV_TYPE_2030 || 4474 sc->hw_type == IWN_HW_REV_TYPE_2000 || 4475 sc->hw_type == IWN_HW_REV_TYPE_135 || 4476 sc->hw_type == IWN_HW_REV_TYPE_105) { 4477 error = iwn2000_temp_offset_calib(sc); 4478 if (error != 0) { 4479 aprint_error_dev(sc->sc_dev, 4480 "could not set temperature offset\n"); 4481 return error; 4482 } 4483 } 4484 4485 if (sc->hw_type == IWN_HW_REV_TYPE_6050 || 4486 sc->hw_type == IWN_HW_REV_TYPE_6005) { 4487 /* Configure runtime DC calibration. */ 4488 error = iwn5000_runtime_calib(sc); 4489 if (error != 0) { 4490 aprint_error_dev(sc->sc_dev, 4491 "could not configure runtime calibration\n"); 4492 return error; 4493 } 4494 } 4495 4496 /* Configure valid TX chains for 5000 Series. */ 4497 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4498 txmask = htole32(sc->txchainmask); 4499 DPRINTF(("configuring valid TX chains 0x%x\n", txmask)); 4500 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask, 4501 sizeof txmask, 0); 4502 if (error != 0) { 4503 aprint_error_dev(sc->sc_dev, 4504 "could not configure valid TX chains\n"); 4505 return error; 4506 } 4507 } 4508 4509 /* Set mode, channel, RX filter and enable RX. */ 4510 memset(&sc->rxon, 0, sizeof (struct iwn_rxon)); 4511 IEEE80211_ADDR_COPY(ic->ic_myaddr, CLLADDR(ifp->if_sadl)); 4512 IEEE80211_ADDR_COPY(sc->rxon.myaddr, ic->ic_myaddr); 4513 IEEE80211_ADDR_COPY(sc->rxon.wlap, ic->ic_myaddr); 4514 sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_ibss_chan); 4515 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 4516 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_ibss_chan)) 4517 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 4518 switch (ic->ic_opmode) { 4519 case IEEE80211_M_STA: 4520 sc->rxon.mode = IWN_MODE_STA; 4521 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST); 4522 break; 4523 case IEEE80211_M_MONITOR: 4524 sc->rxon.mode = IWN_MODE_MONITOR; 4525 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST | 4526 IWN_FILTER_CTL | IWN_FILTER_PROMISC); 4527 break; 4528 default: 4529 /* Should not get there. */ 4530 break; 4531 } 4532 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ 4533 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ 4534 sc->rxon.ht_single_mask = 0xff; 4535 sc->rxon.ht_dual_mask = 0xff; 4536 sc->rxon.ht_triple_mask = 0xff; 4537 rxchain = 4538 IWN_RXCHAIN_VALID(sc->rxchainmask) | 4539 IWN_RXCHAIN_MIMO_COUNT(2) | 4540 IWN_RXCHAIN_IDLE_COUNT(2); 4541 sc->rxon.rxchain = htole16(rxchain); 4542 DPRINTF(("setting configuration\n")); 4543 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 0); 4544 if (error != 0) { 4545 aprint_error_dev(sc->sc_dev, 4546 "RXON command failed\n"); 4547 return error; 4548 } 4549 4550 if ((error = iwn_add_broadcast_node(sc, 0)) != 0) { 4551 aprint_error_dev(sc->sc_dev, 4552 "could not add broadcast node\n"); 4553 return error; 4554 } 4555 4556 /* Configuration has changed, set TX power accordingly. */ 4557 if ((error = ops->set_txpower(sc, 0)) != 0) { 4558 aprint_error_dev(sc->sc_dev, 4559 "could not set TX power\n"); 4560 return error; 4561 } 4562 4563 if ((error = iwn_set_critical_temp(sc)) != 0) { 4564 aprint_error_dev(sc->sc_dev, 4565 "could not set critical temperature\n"); 4566 return error; 4567 } 4568 4569 /* Set power saving level to CAM during initialization. */ 4570 if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) { 4571 aprint_error_dev(sc->sc_dev, 4572 "could not set power saving level\n"); 4573 return error; 4574 } 4575 return 0; 4576 } 4577 4578 static uint16_t 4579 iwn_get_active_dwell_time(struct iwn_softc *sc, uint16_t flags, 4580 uint8_t n_probes) 4581 { 4582 /* No channel? Default to 2GHz settings */ 4583 if (flags & IEEE80211_CHAN_2GHZ) 4584 return IWN_ACTIVE_DWELL_TIME_2GHZ + 4585 IWN_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1); 4586 4587 /* 5GHz dwell time */ 4588 return IWN_ACTIVE_DWELL_TIME_5GHZ + 4589 IWN_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1); 4590 } 4591 4592 /* 4593 * Limit the total dwell time to 85% of the beacon interval. 4594 * 4595 * Returns the dwell time in milliseconds. 4596 */ 4597 static uint16_t 4598 iwn_limit_dwell(struct iwn_softc *sc, uint16_t dwell_time) 4599 { 4600 struct ieee80211com *ic = &sc->sc_ic; 4601 struct ieee80211_node *ni = ic->ic_bss; 4602 int bintval = 0; 4603 4604 /* bintval is in TU (1.024mS) */ 4605 if (ni != NULL) 4606 bintval = ni->ni_intval; 4607 4608 /* 4609 * If it's non-zero, we should calculate the minimum of 4610 * it and the DWELL_BASE. 4611 * 4612 * XXX Yes, the math should take into account that bintval 4613 * is 1.024mS, not 1mS.. 4614 */ 4615 if (bintval > 0) 4616 return MIN(IWN_PASSIVE_DWELL_BASE, ((bintval * 85) / 100)); 4617 4618 /* No association context? Default */ 4619 return IWN_PASSIVE_DWELL_BASE; 4620 } 4621 4622 static uint16_t 4623 iwn_get_passive_dwell_time(struct iwn_softc *sc, uint16_t flags) 4624 { 4625 uint16_t passive; 4626 if (flags & IEEE80211_CHAN_2GHZ) 4627 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_2GHZ; 4628 else 4629 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_5GHZ; 4630 4631 /* Clamp to the beacon interval if we're associated */ 4632 return iwn_limit_dwell(sc, passive); 4633 } 4634 4635 static int 4636 iwn_scan(struct iwn_softc *sc, uint16_t flags) 4637 { 4638 struct ieee80211com *ic = &sc->sc_ic; 4639 struct iwn_scan_hdr *hdr; 4640 struct iwn_cmd_data *tx; 4641 struct iwn_scan_essid *essid; 4642 struct iwn_scan_chan *chan; 4643 struct ieee80211_frame *wh; 4644 struct ieee80211_rateset *rs; 4645 struct ieee80211_channel *c; 4646 uint8_t *buf, *frm; 4647 uint16_t rxchain, dwell_active, dwell_passive; 4648 uint8_t txant; 4649 int buflen, error, is_active; 4650 4651 buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 4652 if (buf == NULL) { 4653 aprint_error_dev(sc->sc_dev, 4654 "could not allocate buffer for scan command\n"); 4655 return ENOMEM; 4656 } 4657 hdr = (struct iwn_scan_hdr *)buf; 4658 /* 4659 * Move to the next channel if no frames are received within 10ms 4660 * after sending the probe request. 4661 */ 4662 hdr->quiet_time = htole16(10); /* timeout in milliseconds */ 4663 hdr->quiet_threshold = htole16(1); /* min # of packets */ 4664 4665 /* Select antennas for scanning. */ 4666 rxchain = 4667 IWN_RXCHAIN_VALID(sc->rxchainmask) | 4668 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) | 4669 IWN_RXCHAIN_DRIVER_FORCE; 4670 if ((flags & IEEE80211_CHAN_5GHZ) && 4671 sc->hw_type == IWN_HW_REV_TYPE_4965) { 4672 /* Ant A must be avoided in 5GHz because of an HW bug. */ 4673 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_BC); 4674 } else /* Use all available RX antennas. */ 4675 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask); 4676 hdr->rxchain = htole16(rxchain); 4677 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON); 4678 4679 tx = (struct iwn_cmd_data *)(hdr + 1); 4680 tx->flags = htole32(IWN_TX_AUTO_SEQ); 4681 tx->id = sc->broadcast_id; 4682 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 4683 4684 if (flags & IEEE80211_CHAN_5GHZ) { 4685 hdr->crc_threshold = 0xffff; 4686 /* Send probe requests at 6Mbps. */ 4687 tx->plcp = iwn_rates[IWN_RIDX_OFDM6].plcp; 4688 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 4689 } else { 4690 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO); 4691 /* Send probe requests at 1Mbps. */ 4692 tx->plcp = iwn_rates[IWN_RIDX_CCK1].plcp; 4693 tx->rflags = IWN_RFLAG_CCK; 4694 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 4695 } 4696 /* Use the first valid TX antenna. */ 4697 txant = IWN_LSB(sc->txchainmask); 4698 tx->rflags |= IWN_RFLAG_ANT(txant); 4699 4700 /* 4701 * Only do active scanning if we're announcing a probe request 4702 * for a given SSID (or more, if we ever add it to the driver.) 4703 */ 4704 is_active = 0; 4705 4706 essid = (struct iwn_scan_essid *)(tx + 1); 4707 if (ic->ic_des_esslen != 0) { 4708 essid[0].id = IEEE80211_ELEMID_SSID; 4709 essid[0].len = ic->ic_des_esslen; 4710 memcpy(essid[0].data, ic->ic_des_essid, ic->ic_des_esslen); 4711 4712 is_active = 1; 4713 } 4714 /* 4715 * Build a probe request frame. Most of the following code is a 4716 * copy & paste of what is done in net80211. 4717 */ 4718 wh = (struct ieee80211_frame *)(essid + 20); 4719 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 4720 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 4721 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 4722 IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr); 4723 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr); 4724 IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr); 4725 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */ 4726 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */ 4727 4728 frm = (uint8_t *)(wh + 1); 4729 frm = ieee80211_add_ssid(frm, NULL, 0); 4730 frm = ieee80211_add_rates(frm, rs); 4731 #ifndef IEEE80211_NO_HT 4732 if (ic->ic_flags & IEEE80211_F_HTON) 4733 frm = ieee80211_add_htcaps(frm, ic); 4734 #endif 4735 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 4736 frm = ieee80211_add_xrates(frm, rs); 4737 4738 /* Set length of probe request. */ 4739 tx->len = htole16(frm - (uint8_t *)wh); 4740 4741 4742 /* 4743 * If active scanning is requested but a certain channel is 4744 * marked passive, we can do active scanning if we detect 4745 * transmissions. 4746 * 4747 * There is an issue with some firmware versions that triggers 4748 * a sysassert on a "good CRC threshold" of zero (== disabled), 4749 * on a radar channel even though this means that we should NOT 4750 * send probes. 4751 * 4752 * The "good CRC threshold" is the number of frames that we 4753 * need to receive during our dwell time on a channel before 4754 * sending out probes -- setting this to a huge value will 4755 * mean we never reach it, but at the same time work around 4756 * the aforementioned issue. Thus use IWN_GOOD_CRC_TH_NEVER 4757 * here instead of IWN_GOOD_CRC_TH_DISABLED. 4758 * 4759 * This was fixed in later versions along with some other 4760 * scan changes, and the threshold behaves as a flag in those 4761 * versions. 4762 */ 4763 4764 /* 4765 * If we're doing active scanning, set the crc_threshold 4766 * to a suitable value. This is different to active veruss 4767 * passive scanning depending upon the channel flags; the 4768 * firmware will obey that particular check for us. 4769 */ 4770 if (sc->tlv_feature_flags & IWN_UCODE_TLV_FLAGS_NEWSCAN) 4771 hdr->crc_threshold = is_active ? 4772 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_DISABLED; 4773 else 4774 hdr->crc_threshold = is_active ? 4775 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_NEVER; 4776 4777 chan = (struct iwn_scan_chan *)frm; 4778 for (c = &ic->ic_channels[1]; 4779 c <= &ic->ic_channels[IEEE80211_CHAN_MAX]; c++) { 4780 if ((c->ic_flags & flags) != flags) 4781 continue; 4782 4783 chan->chan = htole16(ieee80211_chan2ieee(ic, c)); 4784 DPRINTFN(2, ("adding channel %d\n", chan->chan)); 4785 chan->flags = 0; 4786 if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) 4787 chan->flags |= htole32(IWN_CHAN_ACTIVE); 4788 if (ic->ic_des_esslen != 0) 4789 chan->flags |= htole32(IWN_CHAN_NPBREQS(1)); 4790 4791 /* 4792 * Calculate the active/passive dwell times. 4793 */ 4794 4795 dwell_active = iwn_get_active_dwell_time(sc, flags, is_active); 4796 dwell_passive = iwn_get_passive_dwell_time(sc, flags); 4797 4798 /* Make sure they're valid */ 4799 if (dwell_passive <= dwell_active) 4800 dwell_passive = dwell_active + 1; 4801 4802 chan->active = htole16(dwell_active); 4803 chan->passive = htole16(dwell_passive); 4804 4805 chan->dsp_gain = 0x6e; 4806 if (IEEE80211_IS_CHAN_5GHZ(c)) { 4807 chan->rf_gain = 0x3b; 4808 } else { 4809 chan->rf_gain = 0x28; 4810 } 4811 hdr->nchan++; 4812 chan++; 4813 } 4814 4815 buflen = (uint8_t *)chan - buf; 4816 hdr->len = htole16(buflen); 4817 4818 DPRINTF(("sending scan command nchan=%d\n", hdr->nchan)); 4819 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1); 4820 free(buf, M_DEVBUF); 4821 return error; 4822 } 4823 4824 static int 4825 iwn_auth(struct iwn_softc *sc) 4826 { 4827 struct iwn_ops *ops = &sc->ops; 4828 struct ieee80211com *ic = &sc->sc_ic; 4829 struct ieee80211_node *ni = ic->ic_bss; 4830 int error; 4831 4832 /* Update adapter configuration. */ 4833 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4834 sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan); 4835 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 4836 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 4837 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 4838 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4839 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT); 4840 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4841 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE); 4842 switch (ic->ic_curmode) { 4843 case IEEE80211_MODE_11A: 4844 sc->rxon.cck_mask = 0; 4845 sc->rxon.ofdm_mask = 0x15; 4846 break; 4847 case IEEE80211_MODE_11B: 4848 sc->rxon.cck_mask = 0x03; 4849 sc->rxon.ofdm_mask = 0; 4850 break; 4851 default: /* Assume 802.11b/g. */ 4852 sc->rxon.cck_mask = 0x0f; 4853 sc->rxon.ofdm_mask = 0x15; 4854 } 4855 DPRINTF(("rxon chan %d flags %x cck %x ofdm %x\n", sc->rxon.chan, 4856 sc->rxon.flags, sc->rxon.cck_mask, sc->rxon.ofdm_mask)); 4857 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1); 4858 if (error != 0) { 4859 aprint_error_dev(sc->sc_dev, 4860 "RXON command failed\n"); 4861 return error; 4862 } 4863 4864 /* Configuration has changed, set TX power accordingly. */ 4865 if ((error = ops->set_txpower(sc, 1)) != 0) { 4866 aprint_error_dev(sc->sc_dev, 4867 "could not set TX power\n"); 4868 return error; 4869 } 4870 /* 4871 * Reconfiguring RXON clears the firmware nodes table so we must 4872 * add the broadcast node again. 4873 */ 4874 if ((error = iwn_add_broadcast_node(sc, 1)) != 0) { 4875 aprint_error_dev(sc->sc_dev, 4876 "could not add broadcast node\n"); 4877 return error; 4878 } 4879 return 0; 4880 } 4881 4882 static int 4883 iwn_run(struct iwn_softc *sc) 4884 { 4885 struct iwn_ops *ops = &sc->ops; 4886 struct ieee80211com *ic = &sc->sc_ic; 4887 struct ieee80211_node *ni = ic->ic_bss; 4888 struct iwn_node_info node; 4889 int error; 4890 4891 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 4892 /* Link LED blinks while monitoring. */ 4893 iwn_set_led(sc, IWN_LED_LINK, 5, 5); 4894 return 0; 4895 } 4896 if ((error = iwn_set_timing(sc, ni)) != 0) { 4897 aprint_error_dev(sc->sc_dev, 4898 "could not set timing\n"); 4899 return error; 4900 } 4901 4902 /* Update adapter configuration. */ 4903 sc->rxon.associd = htole16(IEEE80211_AID(ni->ni_associd)); 4904 /* Short preamble and slot time are negotiated when associating. */ 4905 sc->rxon.flags &= ~htole32(IWN_RXON_SHPREAMBLE | IWN_RXON_SHSLOT); 4906 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4907 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT); 4908 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4909 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE); 4910 sc->rxon.filter |= htole32(IWN_FILTER_BSS); 4911 DPRINTF(("rxon chan %d flags %x\n", sc->rxon.chan, sc->rxon.flags)); 4912 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1); 4913 if (error != 0) { 4914 aprint_error_dev(sc->sc_dev, 4915 "could not update configuration\n"); 4916 return error; 4917 } 4918 4919 /* Configuration has changed, set TX power accordingly. */ 4920 if ((error = ops->set_txpower(sc, 1)) != 0) { 4921 aprint_error_dev(sc->sc_dev, 4922 "could not set TX power\n"); 4923 return error; 4924 } 4925 4926 /* Fake a join to initialize the TX rate. */ 4927 ((struct iwn_node *)ni)->id = IWN_ID_BSS; 4928 iwn_newassoc(ni, 1); 4929 4930 /* Add BSS node. */ 4931 memset(&node, 0, sizeof node); 4932 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 4933 node.id = IWN_ID_BSS; 4934 #ifdef notyet 4935 node.htflags = htole32(IWN_AMDPU_SIZE_FACTOR(3) | 4936 IWN_AMDPU_DENSITY(5)); /* 2us */ 4937 #endif 4938 DPRINTF(("adding BSS node\n")); 4939 error = ops->add_node(sc, &node, 1); 4940 if (error != 0) { 4941 aprint_error_dev(sc->sc_dev, 4942 "could not add BSS node\n"); 4943 return error; 4944 } 4945 DPRINTF(("setting link quality for node %d\n", node.id)); 4946 if ((error = iwn_set_link_quality(sc, ni)) != 0) { 4947 aprint_error_dev(sc->sc_dev, 4948 "could not setup link quality for node %d\n", node.id); 4949 return error; 4950 } 4951 4952 if ((error = iwn_init_sensitivity(sc)) != 0) { 4953 aprint_error_dev(sc->sc_dev, 4954 "could not set sensitivity\n"); 4955 return error; 4956 } 4957 /* Start periodic calibration timer. */ 4958 sc->calib.state = IWN_CALIB_STATE_ASSOC; 4959 sc->calib_cnt = 0; 4960 callout_schedule(&sc->calib_to, hz/2); 4961 4962 /* Link LED always on while associated. */ 4963 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 4964 return 0; 4965 } 4966 4967 #ifdef IWN_HWCRYPTO 4968 /* 4969 * We support CCMP hardware encryption/decryption of unicast frames only. 4970 * HW support for TKIP really sucks. We should let TKIP die anyway. 4971 */ 4972 static int 4973 iwn_set_key(struct ieee80211com *ic, struct ieee80211_node *ni, 4974 struct ieee80211_key *k) 4975 { 4976 struct iwn_softc *sc = ic->ic_softc; 4977 struct iwn_ops *ops = &sc->ops; 4978 struct iwn_node *wn = (void *)ni; 4979 struct iwn_node_info node; 4980 uint16_t kflags; 4981 4982 if ((k->k_flags & IEEE80211_KEY_GROUP) || 4983 k->k_cipher != IEEE80211_CIPHER_CCMP) 4984 return ieee80211_set_key(ic, ni, k); 4985 4986 kflags = IWN_KFLAG_CCMP | IWN_KFLAG_MAP | IWN_KFLAG_KID(k->k_id); 4987 if (k->k_flags & IEEE80211_KEY_GROUP) 4988 kflags |= IWN_KFLAG_GROUP; 4989 4990 memset(&node, 0, sizeof node); 4991 node.id = (k->k_flags & IEEE80211_KEY_GROUP) ? 4992 sc->broadcast_id : wn->id; 4993 node.control = IWN_NODE_UPDATE; 4994 node.flags = IWN_FLAG_SET_KEY; 4995 node.kflags = htole16(kflags); 4996 node.kid = k->k_id; 4997 memcpy(node.key, k->k_key, k->k_len); 4998 DPRINTF(("set key id=%d for node %d\n", k->k_id, node.id)); 4999 return ops->add_node(sc, &node, 1); 5000 } 5001 5002 static void 5003 iwn_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni, 5004 struct ieee80211_key *k) 5005 { 5006 struct iwn_softc *sc = ic->ic_softc; 5007 struct iwn_ops *ops = &sc->ops; 5008 struct iwn_node *wn = (void *)ni; 5009 struct iwn_node_info node; 5010 5011 if ((k->k_flags & IEEE80211_KEY_GROUP) || 5012 k->k_cipher != IEEE80211_CIPHER_CCMP) { 5013 /* See comment about other ciphers above. */ 5014 ieee80211_delete_key(ic, ni, k); 5015 return; 5016 } 5017 if (ic->ic_state != IEEE80211_S_RUN) 5018 return; /* Nothing to do. */ 5019 memset(&node, 0, sizeof node); 5020 node.id = (k->k_flags & IEEE80211_KEY_GROUP) ? 5021 sc->broadcast_id : wn->id; 5022 node.control = IWN_NODE_UPDATE; 5023 node.flags = IWN_FLAG_SET_KEY; 5024 node.kflags = htole16(IWN_KFLAG_INVALID); 5025 node.kid = 0xff; 5026 DPRINTF(("delete keys for node %d\n", node.id)); 5027 (void)ops->add_node(sc, &node, 1); 5028 } 5029 #endif 5030 5031 /* XXX Added for NetBSD (copied from rev 1.39). */ 5032 5033 static int 5034 iwn_wme_update(struct ieee80211com *ic) 5035 { 5036 #define IWN_EXP2(v) htole16((1 << (v)) - 1) 5037 #define IWN_USEC(v) htole16(IEEE80211_TXOP_TO_US(v)) 5038 struct iwn_softc *sc = ic->ic_ifp->if_softc; 5039 const struct wmeParams *wmep; 5040 struct iwn_edca_params cmd; 5041 int ac; 5042 5043 /* don't override default WME values if WME is not actually enabled */ 5044 if (!(ic->ic_flags & IEEE80211_F_WME)) 5045 return 0; 5046 cmd.flags = 0; 5047 for (ac = 0; ac < WME_NUM_AC; ac++) { 5048 wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 5049 cmd.ac[ac].aifsn = wmep->wmep_aifsn; 5050 cmd.ac[ac].cwmin = IWN_EXP2(wmep->wmep_logcwmin); 5051 cmd.ac[ac].cwmax = IWN_EXP2(wmep->wmep_logcwmax); 5052 cmd.ac[ac].txoplimit = IWN_USEC(wmep->wmep_txopLimit); 5053 5054 DPRINTF(("setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d " 5055 "txop=%d\n", ac, cmd.ac[ac].aifsn, 5056 cmd.ac[ac].cwmin, 5057 cmd.ac[ac].cwmax, cmd.ac[ac].txoplimit)); 5058 } 5059 return iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 5060 #undef IWN_USEC 5061 #undef IWN_EXP2 5062 } 5063 5064 #ifndef IEEE80211_NO_HT 5065 /* 5066 * This function is called by upper layer when an ADDBA request is received 5067 * from another STA and before the ADDBA response is sent. 5068 */ 5069 static int 5070 iwn_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 5071 uint8_t tid) 5072 { 5073 struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid]; 5074 struct iwn_softc *sc = ic->ic_softc; 5075 struct iwn_ops *ops = &sc->ops; 5076 struct iwn_node *wn = (void *)ni; 5077 struct iwn_node_info node; 5078 5079 memset(&node, 0, sizeof node); 5080 node.id = wn->id; 5081 node.control = IWN_NODE_UPDATE; 5082 node.flags = IWN_FLAG_SET_ADDBA; 5083 node.addba_tid = tid; 5084 node.addba_ssn = htole16(ba->ba_winstart); 5085 DPRINTFN(2, ("ADDBA RA=%d TID=%d SSN=%d\n", wn->id, tid, 5086 ba->ba_winstart)); 5087 return ops->add_node(sc, &node, 1); 5088 } 5089 5090 /* 5091 * This function is called by upper layer on teardown of an HT-immediate 5092 * Block Ack agreement (eg. uppon receipt of a DELBA frame). 5093 */ 5094 static void 5095 iwn_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni, 5096 uint8_t tid) 5097 { 5098 struct iwn_softc *sc = ic->ic_softc; 5099 struct iwn_ops *ops = &sc->ops; 5100 struct iwn_node *wn = (void *)ni; 5101 struct iwn_node_info node; 5102 5103 memset(&node, 0, sizeof node); 5104 node.id = wn->id; 5105 node.control = IWN_NODE_UPDATE; 5106 node.flags = IWN_FLAG_SET_DELBA; 5107 node.delba_tid = tid; 5108 DPRINTFN(2, ("DELBA RA=%d TID=%d\n", wn->id, tid)); 5109 (void)ops->add_node(sc, &node, 1); 5110 } 5111 5112 /* 5113 * This function is called by upper layer when an ADDBA response is received 5114 * from another STA. 5115 */ 5116 static int 5117 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 5118 uint8_t tid) 5119 { 5120 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid]; 5121 struct iwn_softc *sc = ic->ic_softc; 5122 struct iwn_ops *ops = &sc->ops; 5123 struct iwn_node *wn = (void *)ni; 5124 struct iwn_node_info node; 5125 int error; 5126 5127 /* Enable TX for the specified RA/TID. */ 5128 wn->disable_tid &= ~(1 << tid); 5129 memset(&node, 0, sizeof node); 5130 node.id = wn->id; 5131 node.control = IWN_NODE_UPDATE; 5132 node.flags = IWN_FLAG_SET_DISABLE_TID; 5133 node.disable_tid = htole16(wn->disable_tid); 5134 error = ops->add_node(sc, &node, 1); 5135 if (error != 0) 5136 return error; 5137 5138 if ((error = iwn_nic_lock(sc)) != 0) 5139 return error; 5140 ops->ampdu_tx_start(sc, ni, tid, ba->ba_winstart); 5141 iwn_nic_unlock(sc); 5142 return 0; 5143 } 5144 5145 static void 5146 iwn_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni, 5147 uint8_t tid) 5148 { 5149 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid]; 5150 struct iwn_softc *sc = ic->ic_softc; 5151 struct iwn_ops *ops = &sc->ops; 5152 5153 if (iwn_nic_lock(sc) != 0) 5154 return; 5155 ops->ampdu_tx_stop(sc, tid, ba->ba_winstart); 5156 iwn_nic_unlock(sc); 5157 } 5158 5159 static void 5160 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 5161 uint8_t tid, uint16_t ssn) 5162 { 5163 struct iwn_node *wn = (void *)ni; 5164 int qid = 7 + tid; 5165 5166 /* Stop TX scheduler while we're changing its configuration. */ 5167 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5168 IWN4965_TXQ_STATUS_CHGACT); 5169 5170 /* Assign RA/TID translation to the queue. */ 5171 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid), 5172 wn->id << 4 | tid); 5173 5174 /* Enable chain-building mode for the queue. */ 5175 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid); 5176 5177 /* Set starting sequence number from the ADDBA request. */ 5178 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5179 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 5180 5181 /* Set scheduler window size. */ 5182 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid), 5183 IWN_SCHED_WINSZ); 5184 /* Set scheduler frame limit. */ 5185 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 5186 IWN_SCHED_LIMIT << 16); 5187 5188 /* Enable interrupts for the queue. */ 5189 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 5190 5191 /* Mark the queue as active. */ 5192 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5193 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA | 5194 iwn_tid2fifo[tid] << 1); 5195 } 5196 5197 static void 5198 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn) 5199 { 5200 int qid = 7 + tid; 5201 5202 /* Stop TX scheduler while we're changing its configuration. */ 5203 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5204 IWN4965_TXQ_STATUS_CHGACT); 5205 5206 /* Set starting sequence number from the ADDBA request. */ 5207 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5208 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 5209 5210 /* Disable interrupts for the queue. */ 5211 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 5212 5213 /* Mark the queue as inactive. */ 5214 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5215 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1); 5216 } 5217 5218 static void 5219 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 5220 uint8_t tid, uint16_t ssn) 5221 { 5222 struct iwn_node *wn = (void *)ni; 5223 int qid = 10 + tid; 5224 5225 /* Stop TX scheduler while we're changing its configuration. */ 5226 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5227 IWN5000_TXQ_STATUS_CHGACT); 5228 5229 /* Assign RA/TID translation to the queue. */ 5230 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid), 5231 wn->id << 4 | tid); 5232 5233 /* Enable chain-building mode for the queue. */ 5234 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid); 5235 5236 /* Enable aggregation for the queue. */ 5237 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 5238 5239 /* Set starting sequence number from the ADDBA request. */ 5240 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5241 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 5242 5243 /* Set scheduler window size and frame limit. */ 5244 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 5245 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 5246 5247 /* Enable interrupts for the queue. */ 5248 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 5249 5250 /* Mark the queue as active. */ 5251 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5252 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]); 5253 } 5254 5255 static void 5256 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn) 5257 { 5258 int qid = 10 + tid; 5259 5260 /* Stop TX scheduler while we're changing its configuration. */ 5261 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5262 IWN5000_TXQ_STATUS_CHGACT); 5263 5264 /* Disable aggregation for the queue. */ 5265 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 5266 5267 /* Set starting sequence number from the ADDBA request. */ 5268 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5269 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 5270 5271 /* Disable interrupts for the queue. */ 5272 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 5273 5274 /* Mark the queue as inactive. */ 5275 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5276 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]); 5277 } 5278 #endif /* !IEEE80211_NO_HT */ 5279 5280 /* 5281 * Query calibration tables from the initialization firmware. We do this 5282 * only once at first boot. Called from a process context. 5283 */ 5284 static int 5285 iwn5000_query_calibration(struct iwn_softc *sc) 5286 { 5287 struct iwn5000_calib_config cmd; 5288 int error; 5289 5290 memset(&cmd, 0, sizeof cmd); 5291 cmd.ucode.once.enable = 0xffffffff; 5292 cmd.ucode.once.start = 0xffffffff; 5293 cmd.ucode.once.send = 0xffffffff; 5294 cmd.ucode.flags = 0xffffffff; 5295 DPRINTF(("sending calibration query\n")); 5296 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0); 5297 if (error != 0) 5298 return error; 5299 5300 /* Wait at most two seconds for calibration to complete. */ 5301 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) 5302 error = tsleep(sc, PCATCH, "iwncal", 2 * hz); 5303 return error; 5304 } 5305 5306 /* 5307 * Send calibration results to the runtime firmware. These results were 5308 * obtained on first boot from the initialization firmware. 5309 */ 5310 static int 5311 iwn5000_send_calibration(struct iwn_softc *sc) 5312 { 5313 int idx, error; 5314 5315 for (idx = 0; idx < 5; idx++) { 5316 if (sc->calibcmd[idx].buf == NULL) 5317 continue; /* No results available. */ 5318 DPRINTF(("send calibration result idx=%d len=%d\n", 5319 idx, sc->calibcmd[idx].len)); 5320 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf, 5321 sc->calibcmd[idx].len, 0); 5322 if (error != 0) { 5323 aprint_error_dev(sc->sc_dev, 5324 "could not send calibration result\n"); 5325 return error; 5326 } 5327 } 5328 return 0; 5329 } 5330 5331 static int 5332 iwn5000_send_wimax_coex(struct iwn_softc *sc) 5333 { 5334 struct iwn5000_wimax_coex wimax; 5335 5336 #ifdef notyet 5337 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 5338 /* Enable WiMAX coexistence for combo adapters. */ 5339 wimax.flags = 5340 IWN_WIMAX_COEX_ASSOC_WA_UNMASK | 5341 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK | 5342 IWN_WIMAX_COEX_STA_TABLE_VALID | 5343 IWN_WIMAX_COEX_ENABLE; 5344 memcpy(wimax.events, iwn6050_wimax_events, 5345 sizeof iwn6050_wimax_events); 5346 } else 5347 #endif 5348 { 5349 /* Disable WiMAX coexistence. */ 5350 wimax.flags = 0; 5351 memset(wimax.events, 0, sizeof wimax.events); 5352 } 5353 DPRINTF(("Configuring WiMAX coexistence\n")); 5354 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0); 5355 } 5356 5357 static int 5358 iwn6000_temp_offset_calib(struct iwn_softc *sc) 5359 { 5360 struct iwn6000_phy_calib_temp_offset cmd; 5361 5362 memset(&cmd, 0, sizeof cmd); 5363 cmd.code = IWN6000_PHY_CALIB_TEMP_OFFSET; 5364 cmd.ngroups = 1; 5365 cmd.isvalid = 1; 5366 if (sc->eeprom_temp != 0) 5367 cmd.offset = htole16(sc->eeprom_temp); 5368 else 5369 cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET); 5370 DPRINTF(("setting radio sensor offset to %d\n", le16toh(cmd.offset))); 5371 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 5372 } 5373 5374 static int 5375 iwn2000_temp_offset_calib(struct iwn_softc *sc) 5376 { 5377 struct iwn2000_phy_calib_temp_offset cmd; 5378 5379 memset(&cmd, 0, sizeof cmd); 5380 cmd.code = IWN2000_PHY_CALIB_TEMP_OFFSET; 5381 cmd.ngroups = 1; 5382 cmd.isvalid = 1; 5383 if (sc->eeprom_rawtemp != 0) { 5384 cmd.offset_low = htole16(sc->eeprom_rawtemp); 5385 cmd.offset_high = htole16(sc->eeprom_temp); 5386 } else { 5387 cmd.offset_low = htole16(IWN_DEFAULT_TEMP_OFFSET); 5388 cmd.offset_high = htole16(IWN_DEFAULT_TEMP_OFFSET); 5389 } 5390 cmd.burnt_voltage_ref = htole16(sc->eeprom_voltage); 5391 DPRINTF(("setting radio sensor offset to %d:%d, voltage to %d\n", 5392 le16toh(cmd.offset_low), le16toh(cmd.offset_high), 5393 le16toh(cmd.burnt_voltage_ref))); 5394 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 5395 } 5396 5397 /* 5398 * This function is called after the runtime firmware notifies us of its 5399 * readiness (called in a process context). 5400 */ 5401 static int 5402 iwn4965_post_alive(struct iwn_softc *sc) 5403 { 5404 int error, qid; 5405 5406 if ((error = iwn_nic_lock(sc)) != 0) 5407 return error; 5408 5409 /* Clear TX scheduler state in SRAM. */ 5410 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 5411 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0, 5412 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t)); 5413 5414 /* Set physical address of TX scheduler rings (1KB aligned). */ 5415 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 5416 5417 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 5418 5419 /* Disable chain mode for all our 16 queues. */ 5420 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0); 5421 5422 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) { 5423 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0); 5424 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 5425 5426 /* Set scheduler window size. */ 5427 iwn_mem_write(sc, sc->sched_base + 5428 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ); 5429 /* Set scheduler frame limit. */ 5430 iwn_mem_write(sc, sc->sched_base + 5431 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 5432 IWN_SCHED_LIMIT << 16); 5433 } 5434 5435 /* Enable interrupts for all our 16 queues. */ 5436 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff); 5437 /* Identify TX FIFO rings (0-7). */ 5438 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff); 5439 5440 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 5441 for (qid = 0; qid < 7; qid++) { 5442 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 }; 5443 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5444 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1); 5445 } 5446 iwn_nic_unlock(sc); 5447 return 0; 5448 } 5449 5450 /* 5451 * This function is called after the initialization or runtime firmware 5452 * notifies us of its readiness (called in a process context). 5453 */ 5454 static int 5455 iwn5000_post_alive(struct iwn_softc *sc) 5456 { 5457 int error, qid; 5458 5459 /* Switch to using ICT interrupt mode. */ 5460 iwn5000_ict_reset(sc); 5461 5462 if ((error = iwn_nic_lock(sc)) != 0) 5463 return error; 5464 5465 /* Clear TX scheduler state in SRAM. */ 5466 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 5467 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0, 5468 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t)); 5469 5470 /* Set physical address of TX scheduler rings (1KB aligned). */ 5471 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 5472 5473 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 5474 5475 /* Enable chain mode for all queues, except command queue. */ 5476 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef); 5477 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0); 5478 5479 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) { 5480 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0); 5481 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 5482 5483 iwn_mem_write(sc, sc->sched_base + 5484 IWN5000_SCHED_QUEUE_OFFSET(qid), 0); 5485 /* Set scheduler window size and frame limit. */ 5486 iwn_mem_write(sc, sc->sched_base + 5487 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 5488 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 5489 } 5490 5491 /* Enable interrupts for all our 20 queues. */ 5492 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff); 5493 /* Identify TX FIFO rings (0-7). */ 5494 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff); 5495 5496 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 5497 for (qid = 0; qid < 7; qid++) { 5498 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 }; 5499 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5500 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 5501 } 5502 iwn_nic_unlock(sc); 5503 5504 /* Configure WiMAX coexistence for combo adapters. */ 5505 error = iwn5000_send_wimax_coex(sc); 5506 if (error != 0) { 5507 aprint_error_dev(sc->sc_dev, 5508 "could not configure WiMAX coexistence\n"); 5509 return error; 5510 } 5511 if (sc->hw_type != IWN_HW_REV_TYPE_5150) { 5512 struct iwn5000_phy_calib_crystal cmd; 5513 5514 /* Perform crystal calibration. */ 5515 memset(&cmd, 0, sizeof cmd); 5516 cmd.code = IWN5000_PHY_CALIB_CRYSTAL; 5517 cmd.ngroups = 1; 5518 cmd.isvalid = 1; 5519 cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff; 5520 cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff; 5521 DPRINTF(("sending crystal calibration %d, %d\n", 5522 cmd.cap_pin[0], cmd.cap_pin[1])); 5523 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 5524 if (error != 0) { 5525 aprint_error_dev(sc->sc_dev, 5526 "crystal calibration failed\n"); 5527 return error; 5528 } 5529 } 5530 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) { 5531 /* Query calibration from the initialization firmware. */ 5532 if ((error = iwn5000_query_calibration(sc)) != 0) { 5533 aprint_error_dev(sc->sc_dev, 5534 "could not query calibration\n"); 5535 return error; 5536 } 5537 /* 5538 * We have the calibration results now, reboot with the 5539 * runtime firmware (call ourselves recursively!) 5540 */ 5541 iwn_hw_stop(sc); 5542 error = iwn_hw_init(sc); 5543 } else { 5544 /* Send calibration results to runtime firmware. */ 5545 error = iwn5000_send_calibration(sc); 5546 } 5547 return error; 5548 } 5549 5550 /* 5551 * The firmware boot code is small and is intended to be copied directly into 5552 * the NIC internal memory (no DMA transfer). 5553 */ 5554 static int 5555 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size) 5556 { 5557 int error, ntries; 5558 5559 size /= sizeof (uint32_t); 5560 5561 if ((error = iwn_nic_lock(sc)) != 0) 5562 return error; 5563 5564 /* Copy microcode image into NIC memory. */ 5565 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE, 5566 (const uint32_t *)ucode, size); 5567 5568 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0); 5569 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE); 5570 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size); 5571 5572 /* Start boot load now. */ 5573 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START); 5574 5575 /* Wait for transfer to complete. */ 5576 for (ntries = 0; ntries < 1000; ntries++) { 5577 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) & 5578 IWN_BSM_WR_CTRL_START)) 5579 break; 5580 DELAY(10); 5581 } 5582 if (ntries == 1000) { 5583 aprint_error_dev(sc->sc_dev, 5584 "could not load boot firmware\n"); 5585 iwn_nic_unlock(sc); 5586 return ETIMEDOUT; 5587 } 5588 5589 /* Enable boot after power up. */ 5590 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN); 5591 5592 iwn_nic_unlock(sc); 5593 return 0; 5594 } 5595 5596 static int 5597 iwn4965_load_firmware(struct iwn_softc *sc) 5598 { 5599 struct iwn_fw_info *fw = &sc->fw; 5600 struct iwn_dma_info *dma = &sc->fw_dma; 5601 int error; 5602 5603 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 5604 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 5605 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, fw->init.datasz, 5606 BUS_DMASYNC_PREWRITE); 5607 memcpy((char *)dma->vaddr + IWN4965_FW_DATA_MAXSZ, 5608 fw->init.text, fw->init.textsz); 5609 bus_dmamap_sync(sc->sc_dmat, dma->map, IWN4965_FW_DATA_MAXSZ, 5610 fw->init.textsz, BUS_DMASYNC_PREWRITE); 5611 5612 /* Tell adapter where to find initialization sections. */ 5613 if ((error = iwn_nic_lock(sc)) != 0) 5614 return error; 5615 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 5616 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz); 5617 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 5618 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 5619 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 5620 iwn_nic_unlock(sc); 5621 5622 /* Load firmware boot code. */ 5623 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 5624 if (error != 0) { 5625 aprint_error_dev(sc->sc_dev, 5626 "could not load boot firmware\n"); 5627 return error; 5628 } 5629 /* Now press "execute". */ 5630 IWN_WRITE(sc, IWN_RESET, 0); 5631 5632 /* Wait at most one second for first alive notification. */ 5633 if ((error = tsleep(sc, PCATCH, "iwninit", hz)) != 0) { 5634 aprint_error_dev(sc->sc_dev, 5635 "timeout waiting for adapter to initialize\n"); 5636 return error; 5637 } 5638 5639 /* Retrieve current temperature for initial TX power calibration. */ 5640 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz; 5641 sc->temp = iwn4965_get_temperature(sc); 5642 5643 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 5644 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 5645 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, fw->main.datasz, 5646 BUS_DMASYNC_PREWRITE); 5647 memcpy((char *)dma->vaddr + IWN4965_FW_DATA_MAXSZ, 5648 fw->main.text, fw->main.textsz); 5649 bus_dmamap_sync(sc->sc_dmat, dma->map, IWN4965_FW_DATA_MAXSZ, 5650 fw->main.textsz, BUS_DMASYNC_PREWRITE); 5651 5652 /* Tell adapter where to find runtime sections. */ 5653 if ((error = iwn_nic_lock(sc)) != 0) 5654 return error; 5655 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 5656 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz); 5657 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 5658 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 5659 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, 5660 IWN_FW_UPDATED | fw->main.textsz); 5661 iwn_nic_unlock(sc); 5662 5663 return 0; 5664 } 5665 5666 static int 5667 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst, 5668 const uint8_t *section, int size) 5669 { 5670 struct iwn_dma_info *dma = &sc->fw_dma; 5671 int error; 5672 5673 /* Copy firmware section into pre-allocated DMA-safe memory. */ 5674 memcpy(dma->vaddr, section, size); 5675 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, size, BUS_DMASYNC_PREWRITE); 5676 5677 if ((error = iwn_nic_lock(sc)) != 0) 5678 return error; 5679 5680 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 5681 IWN_FH_TX_CONFIG_DMA_PAUSE); 5682 5683 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst); 5684 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL), 5685 IWN_LOADDR(dma->paddr)); 5686 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL), 5687 IWN_HIADDR(dma->paddr) << 28 | size); 5688 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL), 5689 IWN_FH_TXBUF_STATUS_TBNUM(1) | 5690 IWN_FH_TXBUF_STATUS_TBIDX(1) | 5691 IWN_FH_TXBUF_STATUS_TFBD_VALID); 5692 5693 /* Kick Flow Handler to start DMA transfer. */ 5694 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 5695 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD); 5696 5697 iwn_nic_unlock(sc); 5698 5699 /* Wait at most five seconds for FH DMA transfer to complete. */ 5700 return tsleep(sc, PCATCH, "iwninit", 5 * hz); 5701 } 5702 5703 static int 5704 iwn5000_load_firmware(struct iwn_softc *sc) 5705 { 5706 struct iwn_fw_part *fw; 5707 int error; 5708 5709 /* Load the initialization firmware on first boot only. */ 5710 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ? 5711 &sc->fw.main : &sc->fw.init; 5712 5713 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE, 5714 fw->text, fw->textsz); 5715 if (error != 0) { 5716 aprint_error_dev(sc->sc_dev, 5717 "could not load firmware %s section\n", ".text"); 5718 return error; 5719 } 5720 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE, 5721 fw->data, fw->datasz); 5722 if (error != 0) { 5723 aprint_error_dev(sc->sc_dev, 5724 "could not load firmware %s section\n", ".data"); 5725 return error; 5726 } 5727 5728 /* Now press "execute". */ 5729 IWN_WRITE(sc, IWN_RESET, 0); 5730 return 0; 5731 } 5732 5733 /* 5734 * Extract text and data sections from a legacy firmware image. 5735 */ 5736 static int 5737 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw) 5738 { 5739 const uint32_t *ptr; 5740 size_t hdrlen = 24; 5741 uint32_t rev; 5742 5743 ptr = (const uint32_t *)fw->data; 5744 rev = le32toh(*ptr++); 5745 5746 sc->ucode_rev = rev; 5747 5748 /* Check firmware API version. */ 5749 if (IWN_FW_API(rev) <= 1) { 5750 aprint_error_dev(sc->sc_dev, 5751 "bad firmware, need API version >=2\n"); 5752 return EINVAL; 5753 } 5754 if (IWN_FW_API(rev) >= 3) { 5755 /* Skip build number (version 2 header). */ 5756 hdrlen += 4; 5757 ptr++; 5758 } 5759 if (fw->size < hdrlen) { 5760 aprint_error_dev(sc->sc_dev, 5761 "firmware too short: %zd bytes\n", fw->size); 5762 return EINVAL; 5763 } 5764 fw->main.textsz = le32toh(*ptr++); 5765 fw->main.datasz = le32toh(*ptr++); 5766 fw->init.textsz = le32toh(*ptr++); 5767 fw->init.datasz = le32toh(*ptr++); 5768 fw->boot.textsz = le32toh(*ptr++); 5769 5770 /* Check that all firmware sections fit. */ 5771 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz + 5772 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 5773 aprint_error_dev(sc->sc_dev, 5774 "firmware too short: %zd bytes\n", fw->size); 5775 return EINVAL; 5776 } 5777 5778 /* Get pointers to firmware sections. */ 5779 fw->main.text = (const uint8_t *)ptr; 5780 fw->main.data = fw->main.text + fw->main.textsz; 5781 fw->init.text = fw->main.data + fw->main.datasz; 5782 fw->init.data = fw->init.text + fw->init.textsz; 5783 fw->boot.text = fw->init.data + fw->init.datasz; 5784 return 0; 5785 } 5786 5787 /* 5788 * Extract text and data sections from a TLV firmware image. 5789 */ 5790 static int 5791 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw, 5792 uint16_t alt) 5793 { 5794 const struct iwn_fw_tlv_hdr *hdr; 5795 const struct iwn_fw_tlv *tlv; 5796 const uint8_t *ptr, *end; 5797 uint64_t altmask; 5798 uint32_t len; 5799 5800 if (fw->size < sizeof (*hdr)) { 5801 aprint_error_dev(sc->sc_dev, 5802 "firmware too short: %zd bytes\n", fw->size); 5803 return EINVAL; 5804 } 5805 hdr = (const struct iwn_fw_tlv_hdr *)fw->data; 5806 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) { 5807 aprint_error_dev(sc->sc_dev, 5808 "bad firmware signature 0x%08x\n", le32toh(hdr->signature)); 5809 return EINVAL; 5810 } 5811 DPRINTF(("FW: \"%.64s\", build 0x%x\n", hdr->descr, 5812 le32toh(hdr->build))); 5813 sc->ucode_rev = le32toh(hdr->rev); 5814 5815 /* 5816 * Select the closest supported alternative that is less than 5817 * or equal to the specified one. 5818 */ 5819 altmask = le64toh(hdr->altmask); 5820 while (alt > 0 && !(altmask & (1ULL << alt))) 5821 alt--; /* Downgrade. */ 5822 DPRINTF(("using alternative %d\n", alt)); 5823 5824 ptr = (const uint8_t *)(hdr + 1); 5825 end = (const uint8_t *)(fw->data + fw->size); 5826 5827 /* Parse type-length-value fields. */ 5828 while (ptr + sizeof (*tlv) <= end) { 5829 tlv = (const struct iwn_fw_tlv *)ptr; 5830 len = le32toh(tlv->len); 5831 5832 ptr += sizeof (*tlv); 5833 if (ptr + len > end) { 5834 aprint_error_dev(sc->sc_dev, 5835 "firmware too short: %zd bytes\n", fw->size); 5836 return EINVAL; 5837 } 5838 /* Skip other alternatives. */ 5839 if (tlv->alt != 0 && tlv->alt != htole16(alt)) 5840 goto next; 5841 5842 switch (le16toh(tlv->type)) { 5843 case IWN_FW_TLV_MAIN_TEXT: 5844 fw->main.text = ptr; 5845 fw->main.textsz = len; 5846 break; 5847 case IWN_FW_TLV_MAIN_DATA: 5848 fw->main.data = ptr; 5849 fw->main.datasz = len; 5850 break; 5851 case IWN_FW_TLV_INIT_TEXT: 5852 fw->init.text = ptr; 5853 fw->init.textsz = len; 5854 break; 5855 case IWN_FW_TLV_INIT_DATA: 5856 fw->init.data = ptr; 5857 fw->init.datasz = len; 5858 break; 5859 case IWN_FW_TLV_BOOT_TEXT: 5860 fw->boot.text = ptr; 5861 fw->boot.textsz = len; 5862 break; 5863 case IWN_FW_TLV_ENH_SENS: 5864 if (len != 0) { 5865 aprint_error_dev(sc->sc_dev, 5866 "TLV type %d has invalid size %u\n", 5867 le16toh(tlv->type), len); 5868 goto next; 5869 } 5870 sc->sc_flags |= IWN_FLAG_ENH_SENS; 5871 break; 5872 case IWN_FW_TLV_PHY_CALIB: 5873 if (len != sizeof(uint32_t)) { 5874 aprint_error_dev(sc->sc_dev, 5875 "TLV type %d has invalid size %u\n", 5876 le16toh(tlv->type), len); 5877 goto next; 5878 } 5879 if (le32toh(*ptr) <= IWN5000_PHY_CALIB_MAX) { 5880 sc->reset_noise_gain = le32toh(*ptr); 5881 sc->noise_gain = le32toh(*ptr) + 1; 5882 } 5883 break; 5884 case IWN_FW_TLV_FLAGS: 5885 if (len < sizeof(uint32_t)) 5886 break; 5887 if (len % sizeof(uint32_t)) 5888 break; 5889 sc->tlv_feature_flags = le32toh(*ptr); 5890 DPRINTF(("feature: 0x%08x\n", sc->tlv_feature_flags)); 5891 break; 5892 default: 5893 DPRINTF(("TLV type %d not handled\n", 5894 le16toh(tlv->type))); 5895 break; 5896 } 5897 next: /* TLV fields are 32-bit aligned. */ 5898 ptr += (len + 3) & ~3; 5899 } 5900 return 0; 5901 } 5902 5903 static int 5904 iwn_read_firmware(struct iwn_softc *sc) 5905 { 5906 struct iwn_fw_info *fw = &sc->fw; 5907 firmware_handle_t fwh; 5908 int error; 5909 5910 /* 5911 * Some PHY calibration commands are firmware-dependent; these 5912 * are the default values that will be overridden if 5913 * necessary. 5914 */ 5915 sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN; 5916 sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN; 5917 5918 /* Initialize for error returns */ 5919 fw->data = NULL; 5920 fw->size = 0; 5921 5922 /* Open firmware image. */ 5923 if ((error = firmware_open("if_iwn", sc->fwname, &fwh)) != 0) { 5924 aprint_error_dev(sc->sc_dev, 5925 "could not get firmware handle %s\n", sc->fwname); 5926 return error; 5927 } 5928 fw->size = firmware_get_size(fwh); 5929 if (fw->size < sizeof (uint32_t)) { 5930 aprint_error_dev(sc->sc_dev, 5931 "firmware too short: %zd bytes\n", fw->size); 5932 firmware_close(fwh); 5933 return EINVAL; 5934 } 5935 5936 /* Read the firmware. */ 5937 fw->data = firmware_malloc(fw->size); 5938 if (fw->data == NULL) { 5939 aprint_error_dev(sc->sc_dev, 5940 "not enough memory to stock firmware %s\n", sc->fwname); 5941 firmware_close(fwh); 5942 return ENOMEM; 5943 } 5944 error = firmware_read(fwh, 0, fw->data, fw->size); 5945 firmware_close(fwh); 5946 if (error != 0) { 5947 aprint_error_dev(sc->sc_dev, 5948 "could not read firmware %s\n", sc->fwname); 5949 goto out; 5950 } 5951 5952 /* Retrieve text and data sections. */ 5953 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */ 5954 error = iwn_read_firmware_leg(sc, fw); 5955 else 5956 error = iwn_read_firmware_tlv(sc, fw, 1); 5957 if (error != 0) { 5958 aprint_error_dev(sc->sc_dev, 5959 "could not read firmware sections\n"); 5960 goto out; 5961 } 5962 5963 /* Make sure text and data sections fit in hardware memory. */ 5964 if (fw->main.textsz > sc->fw_text_maxsz || 5965 fw->main.datasz > sc->fw_data_maxsz || 5966 fw->init.textsz > sc->fw_text_maxsz || 5967 fw->init.datasz > sc->fw_data_maxsz || 5968 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ || 5969 (fw->boot.textsz & 3) != 0) { 5970 aprint_error_dev(sc->sc_dev, 5971 "firmware sections too large\n"); 5972 goto out; 5973 } 5974 5975 /* We can proceed with loading the firmware. */ 5976 return 0; 5977 out: 5978 firmware_free(fw->data, fw->size); 5979 fw->data = NULL; 5980 fw->size = 0; 5981 return error ? error : EINVAL; 5982 } 5983 5984 static int 5985 iwn_clock_wait(struct iwn_softc *sc) 5986 { 5987 int ntries; 5988 5989 /* Set "initialization complete" bit. */ 5990 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 5991 5992 /* Wait for clock stabilization. */ 5993 for (ntries = 0; ntries < 2500; ntries++) { 5994 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY) 5995 return 0; 5996 DELAY(10); 5997 } 5998 aprint_error_dev(sc->sc_dev, 5999 "timeout waiting for clock stabilization\n"); 6000 return ETIMEDOUT; 6001 } 6002 6003 static int 6004 iwn_apm_init(struct iwn_softc *sc) 6005 { 6006 pcireg_t reg; 6007 int error; 6008 6009 /* Disable L0s exit timer (NMI bug workaround). */ 6010 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER); 6011 /* Don't wait for ICH L0s (ICH bug workaround). */ 6012 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX); 6013 6014 /* Set FH wait threshold to max (HW bug under stress workaround). */ 6015 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000); 6016 6017 /* Enable HAP INTA to move adapter from L1a to L0s. */ 6018 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A); 6019 6020 /* Retrieve PCIe Active State Power Management (ASPM). */ 6021 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 6022 sc->sc_cap_off + PCIE_LCSR); 6023 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 6024 if (reg & PCIE_LCSR_ASPM_L1) /* L1 Entry enabled. */ 6025 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 6026 else 6027 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 6028 6029 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 6030 sc->hw_type <= IWN_HW_REV_TYPE_1000) 6031 IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT); 6032 6033 /* Wait for clock stabilization before accessing prph. */ 6034 if ((error = iwn_clock_wait(sc)) != 0) 6035 return error; 6036 6037 if ((error = iwn_nic_lock(sc)) != 0) 6038 return error; 6039 if (sc->hw_type == IWN_HW_REV_TYPE_4965) { 6040 /* Enable DMA and BSM (Bootstrap State Machine). */ 6041 iwn_prph_write(sc, IWN_APMG_CLK_EN, 6042 IWN_APMG_CLK_CTRL_DMA_CLK_RQT | 6043 IWN_APMG_CLK_CTRL_BSM_CLK_RQT); 6044 } else { 6045 /* Enable DMA. */ 6046 iwn_prph_write(sc, IWN_APMG_CLK_EN, 6047 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 6048 } 6049 DELAY(20); 6050 /* Disable L1-Active. */ 6051 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS); 6052 iwn_nic_unlock(sc); 6053 6054 return 0; 6055 } 6056 6057 static void 6058 iwn_apm_stop_master(struct iwn_softc *sc) 6059 { 6060 int ntries; 6061 6062 /* Stop busmaster DMA activity. */ 6063 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER); 6064 for (ntries = 0; ntries < 100; ntries++) { 6065 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED) 6066 return; 6067 DELAY(10); 6068 } 6069 aprint_error_dev(sc->sc_dev, "timeout waiting for master\n"); 6070 } 6071 6072 static void 6073 iwn_apm_stop(struct iwn_softc *sc) 6074 { 6075 iwn_apm_stop_master(sc); 6076 6077 /* Reset the entire device. */ 6078 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW); 6079 DELAY(10); 6080 /* Clear "initialization complete" bit. */ 6081 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 6082 } 6083 6084 static int 6085 iwn4965_nic_config(struct iwn_softc *sc) 6086 { 6087 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) { 6088 /* 6089 * I don't believe this to be correct but this is what the 6090 * vendor driver is doing. Probably the bits should not be 6091 * shifted in IWN_RFCFG_*. 6092 */ 6093 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6094 IWN_RFCFG_TYPE(sc->rfcfg) | 6095 IWN_RFCFG_STEP(sc->rfcfg) | 6096 IWN_RFCFG_DASH(sc->rfcfg)); 6097 } 6098 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6099 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 6100 return 0; 6101 } 6102 6103 static int 6104 iwn5000_nic_config(struct iwn_softc *sc) 6105 { 6106 uint32_t tmp; 6107 int error; 6108 6109 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) { 6110 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6111 IWN_RFCFG_TYPE(sc->rfcfg) | 6112 IWN_RFCFG_STEP(sc->rfcfg) | 6113 IWN_RFCFG_DASH(sc->rfcfg)); 6114 } 6115 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6116 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 6117 6118 if ((error = iwn_nic_lock(sc)) != 0) 6119 return error; 6120 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS); 6121 6122 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 6123 /* 6124 * Select first Switching Voltage Regulator (1.32V) to 6125 * solve a stability issue related to noisy DC2DC line 6126 * in the silicon of 1000 Series. 6127 */ 6128 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR); 6129 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK; 6130 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32; 6131 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp); 6132 } 6133 iwn_nic_unlock(sc); 6134 6135 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) { 6136 /* Use internal power amplifier only. */ 6137 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA); 6138 } 6139 if ((sc->hw_type == IWN_HW_REV_TYPE_6050 || 6140 sc->hw_type == IWN_HW_REV_TYPE_6005) && sc->calib_ver >= 6) { 6141 /* Indicate that ROM calibration version is >=6. */ 6142 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6); 6143 } 6144 if (sc->hw_type == IWN_HW_REV_TYPE_6005) 6145 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_6050_1X2); 6146 if (sc->hw_type == IWN_HW_REV_TYPE_2030 || 6147 sc->hw_type == IWN_HW_REV_TYPE_2000 || 6148 sc->hw_type == IWN_HW_REV_TYPE_135 || 6149 sc->hw_type == IWN_HW_REV_TYPE_105) 6150 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_IQ_INVERT); 6151 return 0; 6152 } 6153 6154 /* 6155 * Take NIC ownership over Intel Active Management Technology (AMT). 6156 */ 6157 static int 6158 iwn_hw_prepare(struct iwn_softc *sc) 6159 { 6160 int ntries; 6161 6162 /* Check if hardware is ready. */ 6163 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 6164 for (ntries = 0; ntries < 5; ntries++) { 6165 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 6166 IWN_HW_IF_CONFIG_NIC_READY) 6167 return 0; 6168 DELAY(10); 6169 } 6170 6171 /* Hardware not ready, force into ready state. */ 6172 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE); 6173 for (ntries = 0; ntries < 15000; ntries++) { 6174 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) & 6175 IWN_HW_IF_CONFIG_PREPARE_DONE)) 6176 break; 6177 DELAY(10); 6178 } 6179 if (ntries == 15000) 6180 return ETIMEDOUT; 6181 6182 /* Hardware should be ready now. */ 6183 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 6184 for (ntries = 0; ntries < 5; ntries++) { 6185 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 6186 IWN_HW_IF_CONFIG_NIC_READY) 6187 return 0; 6188 DELAY(10); 6189 } 6190 return ETIMEDOUT; 6191 } 6192 6193 static int 6194 iwn_hw_init(struct iwn_softc *sc) 6195 { 6196 struct iwn_ops *ops = &sc->ops; 6197 int error, chnl, qid; 6198 6199 /* Clear pending interrupts. */ 6200 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6201 6202 if ((error = iwn_apm_init(sc)) != 0) { 6203 aprint_error_dev(sc->sc_dev, 6204 "could not power ON adapter\n"); 6205 return error; 6206 } 6207 6208 /* Select VMAIN power source. */ 6209 if ((error = iwn_nic_lock(sc)) != 0) 6210 return error; 6211 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK); 6212 iwn_nic_unlock(sc); 6213 6214 /* Perform adapter-specific initialization. */ 6215 if ((error = ops->nic_config(sc)) != 0) 6216 return error; 6217 6218 /* Initialize RX ring. */ 6219 if ((error = iwn_nic_lock(sc)) != 0) 6220 return error; 6221 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 6222 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0); 6223 /* Set physical address of RX ring (256-byte aligned). */ 6224 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8); 6225 /* Set physical address of RX status (16-byte aligned). */ 6226 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4); 6227 /* Enable RX. */ 6228 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 6229 IWN_FH_RX_CONFIG_ENA | 6230 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */ 6231 IWN_FH_RX_CONFIG_IRQ_DST_HOST | 6232 IWN_FH_RX_CONFIG_SINGLE_FRAME | 6233 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) | 6234 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG)); 6235 iwn_nic_unlock(sc); 6236 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7); 6237 6238 if ((error = iwn_nic_lock(sc)) != 0) 6239 return error; 6240 6241 /* Initialize TX scheduler. */ 6242 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 6243 6244 /* Set physical address of "keep warm" page (16-byte aligned). */ 6245 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4); 6246 6247 /* Initialize TX rings. */ 6248 for (qid = 0; qid < sc->ntxqs; qid++) { 6249 struct iwn_tx_ring *txq = &sc->txq[qid]; 6250 6251 /* Set physical address of TX ring (256-byte aligned). */ 6252 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid), 6253 txq->desc_dma.paddr >> 8); 6254 } 6255 iwn_nic_unlock(sc); 6256 6257 /* Enable DMA channels. */ 6258 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 6259 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 6260 IWN_FH_TX_CONFIG_DMA_ENA | 6261 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA); 6262 } 6263 6264 /* Clear "radio off" and "commands blocked" bits. */ 6265 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 6266 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED); 6267 6268 /* Clear pending interrupts. */ 6269 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6270 /* Enable interrupt coalescing. */ 6271 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8); 6272 /* Enable interrupts. */ 6273 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 6274 6275 /* _Really_ make sure "radio off" bit is cleared! */ 6276 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 6277 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 6278 6279 /* Enable shadow registers. */ 6280 if (sc->hw_type >= IWN_HW_REV_TYPE_6000) 6281 IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff); 6282 6283 if ((error = ops->load_firmware(sc)) != 0) { 6284 aprint_error_dev(sc->sc_dev, 6285 "could not load firmware\n"); 6286 return error; 6287 } 6288 /* Wait at most one second for firmware alive notification. */ 6289 if ((error = tsleep(sc, PCATCH, "iwninit", hz)) != 0) { 6290 aprint_error_dev(sc->sc_dev, 6291 "timeout waiting for adapter to initialize\n"); 6292 return error; 6293 } 6294 /* Do post-firmware initialization. */ 6295 return ops->post_alive(sc); 6296 } 6297 6298 static void 6299 iwn_hw_stop(struct iwn_softc *sc) 6300 { 6301 int chnl, qid, ntries; 6302 6303 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO); 6304 6305 /* Disable interrupts. */ 6306 IWN_WRITE(sc, IWN_INT_MASK, 0); 6307 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6308 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff); 6309 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 6310 6311 /* Make sure we no longer hold the NIC lock. */ 6312 iwn_nic_unlock(sc); 6313 6314 /* Stop TX scheduler. */ 6315 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 6316 6317 /* Stop all DMA channels. */ 6318 if (iwn_nic_lock(sc) == 0) { 6319 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 6320 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0); 6321 for (ntries = 0; ntries < 200; ntries++) { 6322 if (IWN_READ(sc, IWN_FH_TX_STATUS) & 6323 IWN_FH_TX_STATUS_IDLE(chnl)) 6324 break; 6325 DELAY(10); 6326 } 6327 } 6328 iwn_nic_unlock(sc); 6329 } 6330 6331 /* Stop RX ring. */ 6332 iwn_reset_rx_ring(sc, &sc->rxq); 6333 6334 /* Reset all TX rings. */ 6335 for (qid = 0; qid < sc->ntxqs; qid++) 6336 iwn_reset_tx_ring(sc, &sc->txq[qid]); 6337 6338 if (iwn_nic_lock(sc) == 0) { 6339 iwn_prph_write(sc, IWN_APMG_CLK_DIS, 6340 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 6341 iwn_nic_unlock(sc); 6342 } 6343 DELAY(5); 6344 /* Power OFF adapter. */ 6345 iwn_apm_stop(sc); 6346 } 6347 6348 static int 6349 iwn_init(struct ifnet *ifp) 6350 { 6351 struct iwn_softc *sc = ifp->if_softc; 6352 struct ieee80211com *ic = &sc->sc_ic; 6353 int error; 6354 6355 mutex_enter(&sc->sc_mtx); 6356 if (sc->sc_flags & IWN_FLAG_HW_INITED) 6357 goto out; 6358 if ((error = iwn_hw_prepare(sc)) != 0) { 6359 aprint_error_dev(sc->sc_dev, 6360 "hardware not ready\n"); 6361 goto fail; 6362 } 6363 6364 /* Check that the radio is not disabled by hardware switch. */ 6365 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) { 6366 aprint_error_dev(sc->sc_dev, 6367 "radio is disabled by hardware switch\n"); 6368 error = EPERM; /* :-) */ 6369 goto fail; 6370 } 6371 6372 /* Read firmware images from the filesystem. */ 6373 if ((error = iwn_read_firmware(sc)) != 0) { 6374 aprint_error_dev(sc->sc_dev, 6375 "could not read firmware\n"); 6376 goto fail; 6377 } 6378 6379 /* Initialize interrupt mask to default value. */ 6380 sc->int_mask = IWN_INT_MASK_DEF; 6381 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 6382 6383 /* Initialize hardware and upload firmware. */ 6384 KASSERT(sc->fw.data != NULL && sc->fw.size > 0); 6385 error = iwn_hw_init(sc); 6386 firmware_free(sc->fw.data, sc->fw.size); 6387 sc->fw.data = NULL; 6388 sc->fw.size = 0; 6389 if (error != 0) { 6390 aprint_error_dev(sc->sc_dev, 6391 "could not initialize hardware\n"); 6392 goto fail; 6393 } 6394 6395 /* Configure adapter now that it is ready. */ 6396 if ((error = iwn_config(sc)) != 0) { 6397 aprint_error_dev(sc->sc_dev, 6398 "could not configure device\n"); 6399 goto fail; 6400 } 6401 6402 sc->sc_beacon_wait = 0; 6403 6404 ifp->if_flags &= ~IFF_OACTIVE; 6405 ifp->if_flags |= IFF_RUNNING; 6406 6407 if (ic->ic_opmode != IEEE80211_M_MONITOR) 6408 ieee80211_begin_scan(ic, 0); 6409 else 6410 ieee80211_new_state(ic, IEEE80211_S_RUN, -1); 6411 6412 sc->sc_flags |= IWN_FLAG_HW_INITED; 6413 out: 6414 mutex_exit(&sc->sc_mtx); 6415 return 0; 6416 6417 fail: mutex_exit(&sc->sc_mtx); 6418 iwn_stop(ifp, 1); 6419 return error; 6420 } 6421 6422 static void 6423 iwn_stop(struct ifnet *ifp, int disable) 6424 { 6425 struct iwn_softc *sc = ifp->if_softc; 6426 struct ieee80211com *ic = &sc->sc_ic; 6427 6428 if (!disable) 6429 mutex_enter(&sc->sc_mtx); 6430 sc->sc_flags &= ~IWN_FLAG_HW_INITED; 6431 ifp->if_timer = sc->sc_tx_timer = 0; 6432 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 6433 6434 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 6435 6436 /* Power OFF hardware. */ 6437 iwn_hw_stop(sc); 6438 6439 if (!disable) 6440 mutex_exit(&sc->sc_mtx); 6441 } 6442 6443 /* 6444 * XXX MCLGETI alternative 6445 * 6446 * With IWN_USE_RBUF defined it uses the rbuf cache for receive buffers 6447 * as long as there are available free buffers then it uses MEXTMALLOC., 6448 * Without IWN_USE_RBUF defined it uses MEXTMALLOC exclusively. 6449 * The MCLGET4K code is used for testing an alternative mbuf cache. 6450 */ 6451 6452 static struct mbuf * 6453 MCLGETIalt(struct iwn_softc *sc, int how, 6454 struct ifnet *ifp __unused, u_int size) 6455 { 6456 struct mbuf *m; 6457 #ifdef IWN_USE_RBUF 6458 struct iwn_rbuf *rbuf; 6459 #endif 6460 6461 MGETHDR(m, how, MT_DATA); 6462 if (m == NULL) 6463 return NULL; 6464 6465 #ifdef IWN_USE_RBUF 6466 if (sc->rxq.nb_free_entries > 0 && 6467 (rbuf = iwn_alloc_rbuf(sc)) != NULL) { 6468 /* Attach buffer to mbuf header. */ 6469 MEXTADD(m, rbuf->vaddr, size, 0, iwn_free_rbuf, rbuf); 6470 m->m_flags |= M_EXT_RW; 6471 } 6472 else { 6473 MEXTMALLOC(m, size, how); 6474 if ((m->m_flags & M_EXT) == 0) { 6475 m_freem(m); 6476 return NULL; 6477 } 6478 } 6479 6480 #else 6481 #ifdef MCLGET4K 6482 if (size == 4096) 6483 MCLGET4K(m, how); 6484 else 6485 panic("size must be 4k"); 6486 #else 6487 MEXTMALLOC(m, size, how); 6488 #endif 6489 if ((m->m_flags & M_EXT) == 0) { 6490 m_freem(m); 6491 return NULL; 6492 } 6493 #endif 6494 6495 return m; 6496 } 6497 6498 #ifdef IWN_USE_RBUF 6499 static struct iwn_rbuf * 6500 iwn_alloc_rbuf(struct iwn_softc *sc) 6501 { 6502 struct iwn_rbuf *rbuf; 6503 mutex_enter(&sc->rxq.freelist_mtx); 6504 6505 rbuf = SLIST_FIRST(&sc->rxq.freelist); 6506 if (rbuf != NULL) { 6507 SLIST_REMOVE_HEAD(&sc->rxq.freelist, next); 6508 sc->rxq.nb_free_entries --; 6509 } 6510 mutex_exit(&sc->rxq.freelist_mtx); 6511 return rbuf; 6512 } 6513 6514 /* 6515 * This is called automatically by the network stack when the mbuf to which 6516 * our RX buffer is attached is freed. 6517 */ 6518 static void 6519 iwn_free_rbuf(struct mbuf* m, void *buf, size_t size, void *arg) 6520 { 6521 struct iwn_rbuf *rbuf = arg; 6522 struct iwn_softc *sc = rbuf->sc; 6523 6524 /* Put the RX buffer back in the free list. */ 6525 mutex_enter(&sc->rxq.freelist_mtx); 6526 SLIST_INSERT_HEAD(&sc->rxq.freelist, rbuf, next); 6527 mutex_exit(&sc->rxq.freelist_mtx); 6528 6529 sc->rxq.nb_free_entries ++; 6530 if (__predict_true(m != NULL)) 6531 pool_cache_put(mb_cache, m); 6532 } 6533 6534 static int 6535 iwn_alloc_rpool(struct iwn_softc *sc) 6536 { 6537 struct iwn_rx_ring *ring = &sc->rxq; 6538 struct iwn_rbuf *rbuf; 6539 int i, error; 6540 6541 mutex_init(&ring->freelist_mtx, MUTEX_DEFAULT, IPL_NET); 6542 6543 /* Allocate a big chunk of DMA'able memory... */ 6544 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->buf_dma, NULL, 6545 IWN_RBUF_COUNT * IWN_RBUF_SIZE, PAGE_SIZE); 6546 if (error != 0) { 6547 aprint_error_dev(sc->sc_dev, 6548 "could not allocate RX buffers DMA memory\n"); 6549 return error; 6550 } 6551 /* ...and split it into chunks of IWN_RBUF_SIZE bytes. */ 6552 SLIST_INIT(&ring->freelist); 6553 for (i = 0; i < IWN_RBUF_COUNT; i++) { 6554 rbuf = &ring->rbuf[i]; 6555 6556 rbuf->sc = sc; /* Backpointer for callbacks. */ 6557 rbuf->vaddr = (void *)((vaddr_t)ring->buf_dma.vaddr + i * IWN_RBUF_SIZE); 6558 rbuf->paddr = ring->buf_dma.paddr + i * IWN_RBUF_SIZE; 6559 6560 SLIST_INSERT_HEAD(&ring->freelist, rbuf, next); 6561 } 6562 ring->nb_free_entries = IWN_RBUF_COUNT; 6563 return 0; 6564 } 6565 6566 static void 6567 iwn_free_rpool(struct iwn_softc *sc) 6568 { 6569 iwn_dma_contig_free(&sc->rxq.buf_dma); 6570 } 6571 #endif 6572 6573 /* 6574 * XXX: Hack to set the current channel to the value advertised in beacons or 6575 * probe responses. Only used during AP detection. 6576 * XXX: Duplicated from if_iwi.c 6577 */ 6578 static void 6579 iwn_fix_channel(struct ieee80211com *ic, struct mbuf *m, 6580 struct iwn_rx_stat *stat) 6581 { 6582 struct iwn_softc *sc = ic->ic_ifp->if_softc; 6583 struct ieee80211_frame *wh; 6584 uint8_t subtype; 6585 uint8_t *frm, *efrm; 6586 6587 wh = mtod(m, struct ieee80211_frame *); 6588 6589 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT) 6590 return; 6591 6592 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 6593 6594 if (subtype != IEEE80211_FC0_SUBTYPE_BEACON && 6595 subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP) 6596 return; 6597 6598 if (sc->sc_flags & IWN_FLAG_SCANNING_5GHZ) { 6599 int chan = le16toh(stat->chan); 6600 if (chan < __arraycount(ic->ic_channels)) 6601 ic->ic_curchan = &ic->ic_channels[chan]; 6602 return; 6603 } 6604 6605 frm = (uint8_t *)(wh + 1); 6606 efrm = mtod(m, uint8_t *) + m->m_len; 6607 6608 frm += 12; /* skip tstamp, bintval and capinfo fields */ 6609 while (frm + 2 < efrm) { 6610 if (*frm == IEEE80211_ELEMID_DSPARMS) { 6611 #if IEEE80211_CHAN_MAX < 255 6612 if (frm[2] <= IEEE80211_CHAN_MAX) 6613 #endif 6614 ic->ic_curchan = &ic->ic_channels[frm[2]]; 6615 } 6616 6617 frm += frm[1] + 2; 6618 } 6619 } 6620 6621 #ifdef notyetMODULE 6622 6623 MODULE(MODULE_CLASS_DRIVER, if_iwn, "pci"); 6624 6625 #ifdef _MODULE 6626 #include "ioconf.c" 6627 #endif 6628 6629 static int 6630 if_iwn_modcmd(modcmd_t cmd, void *data) 6631 { 6632 int error = 0; 6633 6634 switch (cmd) { 6635 case MODULE_CMD_INIT: 6636 #ifdef _MODULE 6637 error = config_init_component(cfdriver_ioconf_if_iwn, 6638 cfattach_ioconf_if_iwn, cfdata_ioconf_if_iwn); 6639 #endif 6640 return error; 6641 case MODULE_CMD_FINI: 6642 #ifdef _MODULE 6643 error = config_fini_component(cfdriver_ioconf_if_iwn, 6644 cfattach_ioconf_if_iwn, cfdata_ioconf_if_iwn); 6645 #endif 6646 return error; 6647 case MODULE_CMD_AUTOUNLOAD: 6648 #ifdef _MODULE 6649 /* XXX This is not optional! */ 6650 #endif 6651 return error; 6652 default: 6653 return ENOTTY; 6654 } 6655 } 6656 #endif 6657