xref: /openbsd-src/sys/dev/pci/if_iwn.c (revision c90a81c56dcebd6a1b73fe4aff9b03385b8e63b3)
1 /*	$OpenBSD: if_iwn.c,v 1.205 2019/01/13 22:57:37 kn Exp $	*/
2 
3 /*-
4  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network
21  * adapters.
22  */
23 
24 #include "bpfilter.h"
25 
26 #include <sys/param.h>
27 #include <sys/sockio.h>
28 #include <sys/mbuf.h>
29 #include <sys/kernel.h>
30 #include <sys/rwlock.h>
31 #include <sys/socket.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/conf.h>
35 #include <sys/device.h>
36 #include <sys/task.h>
37 #include <sys/endian.h>
38 
39 #include <machine/bus.h>
40 #include <machine/intr.h>
41 
42 #include <dev/pci/pcireg.h>
43 #include <dev/pci/pcivar.h>
44 #include <dev/pci/pcidevs.h>
45 
46 #if NBPFILTER > 0
47 #include <net/bpf.h>
48 #endif
49 #include <net/if.h>
50 #include <net/if_dl.h>
51 #include <net/if_media.h>
52 
53 #include <netinet/in.h>
54 #include <netinet/if_ether.h>
55 
56 #include <net80211/ieee80211_var.h>
57 #include <net80211/ieee80211_amrr.h>
58 #include <net80211/ieee80211_mira.h>
59 #include <net80211/ieee80211_radiotap.h>
60 
61 #include <dev/pci/if_iwnreg.h>
62 #include <dev/pci/if_iwnvar.h>
63 
64 static const struct pci_matchid iwn_devices[] = {
65 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_4965_1 },
66 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_4965_2 },
67 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5100_1 },
68 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5100_2 },
69 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5150_1 },
70 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5150_2 },
71 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5300_1 },
72 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5300_2 },
73 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5350_1 },
74 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5350_2 },
75 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_1000_1 },
76 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_1000_2 },
77 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6300_1 },
78 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6300_2 },
79 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6200_1 },
80 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6200_2 },
81 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6050_1 },
82 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6050_2 },
83 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6005_1 },
84 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6005_2 },
85 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6030_1 },
86 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6030_2 },
87 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_1030_1 },
88 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_1030_2 },
89 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_100_1 },
90 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_100_2 },
91 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_130_1 },
92 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_130_2 },
93 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6235_1 },
94 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6235_2 },
95 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_2230_1 },
96 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_2230_2 },
97 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_2200_1 },
98 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_2200_2 },
99 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_135_1 },
100 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_135_2 },
101 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_105_1 },
102 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_105_2 },
103 };
104 
105 int		iwn_match(struct device *, void *, void *);
106 void		iwn_attach(struct device *, struct device *, void *);
107 int		iwn4965_attach(struct iwn_softc *, pci_product_id_t);
108 int		iwn5000_attach(struct iwn_softc *, pci_product_id_t);
109 #if NBPFILTER > 0
110 void		iwn_radiotap_attach(struct iwn_softc *);
111 #endif
112 int		iwn_detach(struct device *, int);
113 int		iwn_activate(struct device *, int);
114 void		iwn_wakeup(struct iwn_softc *);
115 void		iwn_init_task(void *);
116 int		iwn_nic_lock(struct iwn_softc *);
117 int		iwn_eeprom_lock(struct iwn_softc *);
118 int		iwn_init_otprom(struct iwn_softc *);
119 int		iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int);
120 int		iwn_dma_contig_alloc(bus_dma_tag_t, struct iwn_dma_info *,
121 		    void **, bus_size_t, bus_size_t);
122 void		iwn_dma_contig_free(struct iwn_dma_info *);
123 int		iwn_alloc_sched(struct iwn_softc *);
124 void		iwn_free_sched(struct iwn_softc *);
125 int		iwn_alloc_kw(struct iwn_softc *);
126 void		iwn_free_kw(struct iwn_softc *);
127 int		iwn_alloc_ict(struct iwn_softc *);
128 void		iwn_free_ict(struct iwn_softc *);
129 int		iwn_alloc_fwmem(struct iwn_softc *);
130 void		iwn_free_fwmem(struct iwn_softc *);
131 int		iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
132 void		iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
133 void		iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
134 int		iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *,
135 		    int);
136 void		iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
137 void		iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
138 void		iwn5000_ict_reset(struct iwn_softc *);
139 int		iwn_read_eeprom(struct iwn_softc *);
140 void		iwn4965_read_eeprom(struct iwn_softc *);
141 void		iwn4965_print_power_group(struct iwn_softc *, int);
142 void		iwn5000_read_eeprom(struct iwn_softc *);
143 void		iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t);
144 void		iwn_read_eeprom_enhinfo(struct iwn_softc *);
145 struct		ieee80211_node *iwn_node_alloc(struct ieee80211com *);
146 void		iwn_newassoc(struct ieee80211com *, struct ieee80211_node *,
147 		    int);
148 int		iwn_media_change(struct ifnet *);
149 int		iwn_newstate(struct ieee80211com *, enum ieee80211_state, int);
150 void		iwn_iter_func(void *, struct ieee80211_node *);
151 void		iwn_calib_timeout(void *);
152 int		iwn_ccmp_decap(struct iwn_softc *, struct mbuf *,
153 		    struct ieee80211_node *);
154 void		iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *,
155 		    struct iwn_rx_data *);
156 void		iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *,
157 		    struct iwn_rx_data *);
158 void		iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *,
159 		    struct iwn_rx_data *);
160 void		iwn5000_rx_calib_results(struct iwn_softc *,
161 		    struct iwn_rx_desc *, struct iwn_rx_data *);
162 void		iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *,
163 		    struct iwn_rx_data *);
164 void		iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
165 		    struct iwn_rx_data *);
166 void		iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
167 		    struct iwn_rx_data *);
168 void		iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
169 		    uint8_t, uint8_t, uint8_t, uint16_t);
170 void		iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *);
171 void		iwn_notif_intr(struct iwn_softc *);
172 void		iwn_wakeup_intr(struct iwn_softc *);
173 void		iwn_fatal_intr(struct iwn_softc *);
174 int		iwn_intr(void *);
175 void		iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t,
176 		    uint16_t);
177 void		iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t,
178 		    uint16_t);
179 void		iwn5000_reset_sched(struct iwn_softc *, int, int);
180 int		iwn_tx(struct iwn_softc *, struct mbuf *,
181 		    struct ieee80211_node *);
182 int		iwn_rval2ridx(int);
183 void		iwn_start(struct ifnet *);
184 void		iwn_watchdog(struct ifnet *);
185 int		iwn_ioctl(struct ifnet *, u_long, caddr_t);
186 int		iwn_cmd(struct iwn_softc *, int, const void *, int, int);
187 int		iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *,
188 		    int);
189 int		iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *,
190 		    int);
191 int		iwn_set_link_quality(struct iwn_softc *,
192 		    struct ieee80211_node *);
193 int		iwn_add_broadcast_node(struct iwn_softc *, int, int);
194 void		iwn_updateedca(struct ieee80211com *);
195 void		iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t);
196 int		iwn_set_critical_temp(struct iwn_softc *);
197 int		iwn_set_timing(struct iwn_softc *, struct ieee80211_node *);
198 void		iwn4965_power_calibration(struct iwn_softc *, int);
199 int		iwn4965_set_txpower(struct iwn_softc *, int);
200 int		iwn5000_set_txpower(struct iwn_softc *, int);
201 int		iwn4965_get_rssi(const struct iwn_rx_stat *);
202 int		iwn5000_get_rssi(const struct iwn_rx_stat *);
203 int		iwn_get_noise(const struct iwn_rx_general_stats *);
204 int		iwn4965_get_temperature(struct iwn_softc *);
205 int		iwn5000_get_temperature(struct iwn_softc *);
206 int		iwn_init_sensitivity(struct iwn_softc *);
207 void		iwn_collect_noise(struct iwn_softc *,
208 		    const struct iwn_rx_general_stats *);
209 int		iwn4965_init_gains(struct iwn_softc *);
210 int		iwn5000_init_gains(struct iwn_softc *);
211 int		iwn4965_set_gains(struct iwn_softc *);
212 int		iwn5000_set_gains(struct iwn_softc *);
213 void		iwn_tune_sensitivity(struct iwn_softc *,
214 		    const struct iwn_rx_stats *);
215 int		iwn_send_sensitivity(struct iwn_softc *);
216 int		iwn_set_pslevel(struct iwn_softc *, int, int, int);
217 int		iwn_send_temperature_offset(struct iwn_softc *);
218 int		iwn_send_btcoex(struct iwn_softc *);
219 int		iwn_send_advanced_btcoex(struct iwn_softc *);
220 int		iwn5000_runtime_calib(struct iwn_softc *);
221 int		iwn_config(struct iwn_softc *);
222 uint16_t	iwn_get_active_dwell_time(struct iwn_softc *, uint16_t, uint8_t);
223 uint16_t	iwn_limit_dwell(struct iwn_softc *, uint16_t);
224 uint16_t	iwn_get_passive_dwell_time(struct iwn_softc *, uint16_t);
225 int		iwn_scan(struct iwn_softc *, uint16_t, int);
226 void		iwn_scan_abort(struct iwn_softc *);
227 int		iwn_bgscan(struct ieee80211com *);
228 int		iwn_auth(struct iwn_softc *, int);
229 int		iwn_run(struct iwn_softc *);
230 int		iwn_set_key(struct ieee80211com *, struct ieee80211_node *,
231 		    struct ieee80211_key *);
232 void		iwn_delete_key(struct ieee80211com *, struct ieee80211_node *,
233 		    struct ieee80211_key *);
234 void		iwn_update_htprot(struct ieee80211com *,
235 		    struct ieee80211_node *);
236 int		iwn_ampdu_rx_start(struct ieee80211com *,
237 		    struct ieee80211_node *, uint8_t);
238 void		iwn_ampdu_rx_stop(struct ieee80211com *,
239 		    struct ieee80211_node *, uint8_t);
240 int		iwn_ampdu_tx_start(struct ieee80211com *,
241 		    struct ieee80211_node *, uint8_t);
242 void		iwn_ampdu_tx_stop(struct ieee80211com *,
243 		    struct ieee80211_node *, uint8_t);
244 void		iwn4965_ampdu_tx_start(struct iwn_softc *,
245 		    struct ieee80211_node *, uint8_t, uint16_t);
246 void		iwn4965_ampdu_tx_stop(struct iwn_softc *,
247 		    uint8_t, uint16_t);
248 void		iwn5000_ampdu_tx_start(struct iwn_softc *,
249 		    struct ieee80211_node *, uint8_t, uint16_t);
250 void		iwn5000_ampdu_tx_stop(struct iwn_softc *,
251 		    uint8_t, uint16_t);
252 int		iwn5000_query_calibration(struct iwn_softc *);
253 int		iwn5000_send_calibration(struct iwn_softc *);
254 int		iwn5000_send_wimax_coex(struct iwn_softc *);
255 int		iwn5000_crystal_calib(struct iwn_softc *);
256 int		iwn6000_temp_offset_calib(struct iwn_softc *);
257 int		iwn2000_temp_offset_calib(struct iwn_softc *);
258 int		iwn4965_post_alive(struct iwn_softc *);
259 int		iwn5000_post_alive(struct iwn_softc *);
260 int		iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *,
261 		    int);
262 int		iwn4965_load_firmware(struct iwn_softc *);
263 int		iwn5000_load_firmware_section(struct iwn_softc *, uint32_t,
264 		    const uint8_t *, int);
265 int		iwn5000_load_firmware(struct iwn_softc *);
266 int		iwn_read_firmware_leg(struct iwn_softc *,
267 		    struct iwn_fw_info *);
268 int		iwn_read_firmware_tlv(struct iwn_softc *,
269 		    struct iwn_fw_info *, uint16_t);
270 int		iwn_read_firmware(struct iwn_softc *);
271 int		iwn_clock_wait(struct iwn_softc *);
272 int		iwn_apm_init(struct iwn_softc *);
273 void		iwn_apm_stop_master(struct iwn_softc *);
274 void		iwn_apm_stop(struct iwn_softc *);
275 int		iwn4965_nic_config(struct iwn_softc *);
276 int		iwn5000_nic_config(struct iwn_softc *);
277 int		iwn_hw_prepare(struct iwn_softc *);
278 int		iwn_hw_init(struct iwn_softc *);
279 void		iwn_hw_stop(struct iwn_softc *);
280 int		iwn_init(struct ifnet *);
281 void		iwn_stop(struct ifnet *, int);
282 
283 #ifdef IWN_DEBUG
284 #define DPRINTF(x)	do { if (iwn_debug > 0) printf x; } while (0)
285 #define DPRINTFN(n, x)	do { if (iwn_debug >= (n)) printf x; } while (0)
286 int iwn_debug = 1;
287 #else
288 #define DPRINTF(x)
289 #define DPRINTFN(n, x)
290 #endif
291 
292 struct cfdriver iwn_cd = {
293 	NULL, "iwn", DV_IFNET
294 };
295 
296 struct cfattach iwn_ca = {
297 	sizeof (struct iwn_softc), iwn_match, iwn_attach, iwn_detach,
298 	iwn_activate
299 };
300 
301 int
302 iwn_match(struct device *parent, void *match, void *aux)
303 {
304 	return pci_matchbyid((struct pci_attach_args *)aux, iwn_devices,
305 	    nitems(iwn_devices));
306 }
307 
308 void
309 iwn_attach(struct device *parent, struct device *self, void *aux)
310 {
311 	struct iwn_softc *sc = (struct iwn_softc *)self;
312 	struct ieee80211com *ic = &sc->sc_ic;
313 	struct ifnet *ifp = &ic->ic_if;
314 	struct pci_attach_args *pa = aux;
315 	const char *intrstr;
316 	pci_intr_handle_t ih;
317 	pcireg_t memtype, reg;
318 	int i, error;
319 
320 	sc->sc_pct = pa->pa_pc;
321 	sc->sc_pcitag = pa->pa_tag;
322 	sc->sc_dmat = pa->pa_dmat;
323 
324 	/*
325 	 * Get the offset of the PCI Express Capability Structure in PCI
326 	 * Configuration Space.
327 	 */
328 	error = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
329 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
330 	if (error == 0) {
331 		printf(": PCIe capability structure not found!\n");
332 		return;
333 	}
334 
335 	/* Clear device-specific "PCI retry timeout" register (41h). */
336 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
337 	if (reg & 0xff00)
338 		pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
339 
340 	/* Hardware bug workaround. */
341 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
342 	if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
343 		DPRINTF(("PCIe INTx Disable set\n"));
344 		reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
345 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
346 		    PCI_COMMAND_STATUS_REG, reg);
347 	}
348 
349 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IWN_PCI_BAR0);
350 	error = pci_mapreg_map(pa, IWN_PCI_BAR0, memtype, 0, &sc->sc_st,
351 	    &sc->sc_sh, NULL, &sc->sc_sz, 0);
352 	if (error != 0) {
353 		printf(": can't map mem space\n");
354 		return;
355 	}
356 
357 	/* Install interrupt handler. */
358 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
359 		printf(": can't map interrupt\n");
360 		return;
361 	}
362 	intrstr = pci_intr_string(sc->sc_pct, ih);
363 	sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET, iwn_intr, sc,
364 	    sc->sc_dev.dv_xname);
365 	if (sc->sc_ih == NULL) {
366 		printf(": can't establish interrupt");
367 		if (intrstr != NULL)
368 			printf(" at %s", intrstr);
369 		printf("\n");
370 		return;
371 	}
372 	printf(": %s", intrstr);
373 
374 	/* Read hardware revision and attach. */
375 	sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> 4) & 0x1f;
376 	if (sc->hw_type == IWN_HW_REV_TYPE_4965)
377 		error = iwn4965_attach(sc, PCI_PRODUCT(pa->pa_id));
378 	else
379 		error = iwn5000_attach(sc, PCI_PRODUCT(pa->pa_id));
380 	if (error != 0) {
381 		printf(": could not attach device\n");
382 		return;
383 	}
384 
385 	if ((error = iwn_hw_prepare(sc)) != 0) {
386 		printf(": hardware not ready\n");
387 		return;
388 	}
389 
390 	/* Read MAC address, channels, etc from EEPROM. */
391 	if ((error = iwn_read_eeprom(sc)) != 0) {
392 		printf(": could not read EEPROM\n");
393 		return;
394 	}
395 
396 	/* Allocate DMA memory for firmware transfers. */
397 	if ((error = iwn_alloc_fwmem(sc)) != 0) {
398 		printf(": could not allocate memory for firmware\n");
399 		return;
400 	}
401 
402 	/* Allocate "Keep Warm" page. */
403 	if ((error = iwn_alloc_kw(sc)) != 0) {
404 		printf(": could not allocate keep warm page\n");
405 		goto fail1;
406 	}
407 
408 	/* Allocate ICT table for 5000 Series. */
409 	if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
410 	    (error = iwn_alloc_ict(sc)) != 0) {
411 		printf(": could not allocate ICT table\n");
412 		goto fail2;
413 	}
414 
415 	/* Allocate TX scheduler "rings". */
416 	if ((error = iwn_alloc_sched(sc)) != 0) {
417 		printf(": could not allocate TX scheduler rings\n");
418 		goto fail3;
419 	}
420 
421 	/* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */
422 	for (i = 0; i < sc->ntxqs; i++) {
423 		if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) {
424 			printf(": could not allocate TX ring %d\n", i);
425 			goto fail4;
426 		}
427 	}
428 
429 	/* Allocate RX ring. */
430 	if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) {
431 		printf(": could not allocate RX ring\n");
432 		goto fail4;
433 	}
434 
435 	/* Clear pending interrupts. */
436 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
437 
438 	/* Count the number of available chains. */
439 	sc->ntxchains =
440 	    ((sc->txchainmask >> 2) & 1) +
441 	    ((sc->txchainmask >> 1) & 1) +
442 	    ((sc->txchainmask >> 0) & 1);
443 	sc->nrxchains =
444 	    ((sc->rxchainmask >> 2) & 1) +
445 	    ((sc->rxchainmask >> 1) & 1) +
446 	    ((sc->rxchainmask >> 0) & 1);
447 	printf(", MIMO %dT%dR, %.4s, address %s\n", sc->ntxchains,
448 	    sc->nrxchains, sc->eeprom_domain, ether_sprintf(ic->ic_myaddr));
449 
450 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
451 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
452 	ic->ic_state = IEEE80211_S_INIT;
453 
454 	/* Set device capabilities. */
455 	ic->ic_caps =
456 	    IEEE80211_C_WEP |		/* WEP */
457 	    IEEE80211_C_RSN |		/* WPA/RSN */
458 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
459 	    IEEE80211_C_SCANALLBAND |	/* driver scans all bands at once */
460 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
461 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
462 	    IEEE80211_C_SHPREAMBLE |	/* short preamble supported */
463 	    IEEE80211_C_PMGT;		/* power saving supported */
464 
465 	/* No optional HT features supported for now, */
466 	ic->ic_htcaps = 0;
467 	ic->ic_htxcaps = 0;
468 	ic->ic_txbfcaps = 0;
469 	ic->ic_aselcaps = 0;
470 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
471 	if (sc->sc_flags & IWN_FLAG_HAS_11N) {
472 		/* Set HT capabilities. */
473 		ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
474 #ifdef notyet
475 		ic->ic_htcaps |=
476 #if IWN_RBUF_SIZE == 8192
477 		    IEEE80211_HTCAP_AMSDU7935 |
478 #endif
479 		    IEEE80211_HTCAP_CBW20_40 |
480 		    IEEE80211_HTCAP_SGI40;
481 		if (sc->hw_type != IWN_HW_REV_TYPE_4965)
482 			ic->ic_htcaps |= IEEE80211_HTCAP_GF;
483 		if (sc->hw_type == IWN_HW_REV_TYPE_6050)
484 			ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DYN;
485 		else
486 			ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DIS;
487 #endif	/* notyet */
488 	}
489 
490 	/* Set supported legacy rates. */
491 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
492 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
493 	if (sc->sc_flags & IWN_FLAG_HAS_5GHZ) {
494 		ic->ic_sup_rates[IEEE80211_MODE_11A] =
495 		    ieee80211_std_rateset_11a;
496 	}
497 	if (sc->sc_flags & IWN_FLAG_HAS_11N) {
498 		/* Set supported HT rates. */
499 		ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
500 #ifdef notyet
501 		if (sc->nrxchains > 1)
502 			ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
503 		if (sc->nrxchains > 2)
504 			ic->ic_sup_mcs[2] = 0xff;	/* MCS 16-23 */
505 #endif
506 	}
507 
508 	/* IBSS channel undefined for now. */
509 	ic->ic_ibss_chan = &ic->ic_channels[0];
510 
511 	ifp->if_softc = sc;
512 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
513 	ifp->if_ioctl = iwn_ioctl;
514 	ifp->if_start = iwn_start;
515 	ifp->if_watchdog = iwn_watchdog;
516 	memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
517 
518 	if_attach(ifp);
519 	ieee80211_ifattach(ifp);
520 	ic->ic_node_alloc = iwn_node_alloc;
521 	ic->ic_bgscan_start = iwn_bgscan;
522 	ic->ic_newassoc = iwn_newassoc;
523 	ic->ic_updateedca = iwn_updateedca;
524 	ic->ic_set_key = iwn_set_key;
525 	ic->ic_delete_key = iwn_delete_key;
526 	ic->ic_update_htprot = iwn_update_htprot;
527 	ic->ic_ampdu_rx_start = iwn_ampdu_rx_start;
528 	ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop;
529 #ifdef notyet
530 	ic->ic_ampdu_tx_start = iwn_ampdu_tx_start;
531 	ic->ic_ampdu_tx_stop = iwn_ampdu_tx_stop;
532 #endif
533 
534 	/* Override 802.11 state transition machine. */
535 	sc->sc_newstate = ic->ic_newstate;
536 	ic->ic_newstate = iwn_newstate;
537 	ieee80211_media_init(ifp, iwn_media_change, ieee80211_media_status);
538 
539 	sc->amrr.amrr_min_success_threshold =  1;
540 	sc->amrr.amrr_max_success_threshold = 15;
541 
542 #if NBPFILTER > 0
543 	iwn_radiotap_attach(sc);
544 #endif
545 	timeout_set(&sc->calib_to, iwn_calib_timeout, sc);
546 	rw_init(&sc->sc_rwlock, "iwnlock");
547 	task_set(&sc->init_task, iwn_init_task, sc);
548 	return;
549 
550 	/* Free allocated memory if something failed during attachment. */
551 fail4:	while (--i >= 0)
552 		iwn_free_tx_ring(sc, &sc->txq[i]);
553 	iwn_free_sched(sc);
554 fail3:	if (sc->ict != NULL)
555 		iwn_free_ict(sc);
556 fail2:	iwn_free_kw(sc);
557 fail1:	iwn_free_fwmem(sc);
558 }
559 
560 int
561 iwn4965_attach(struct iwn_softc *sc, pci_product_id_t pid)
562 {
563 	struct iwn_ops *ops = &sc->ops;
564 
565 	ops->load_firmware = iwn4965_load_firmware;
566 	ops->read_eeprom = iwn4965_read_eeprom;
567 	ops->post_alive = iwn4965_post_alive;
568 	ops->nic_config = iwn4965_nic_config;
569 	ops->update_sched = iwn4965_update_sched;
570 	ops->get_temperature = iwn4965_get_temperature;
571 	ops->get_rssi = iwn4965_get_rssi;
572 	ops->set_txpower = iwn4965_set_txpower;
573 	ops->init_gains = iwn4965_init_gains;
574 	ops->set_gains = iwn4965_set_gains;
575 	ops->add_node = iwn4965_add_node;
576 	ops->tx_done = iwn4965_tx_done;
577 	ops->ampdu_tx_start = iwn4965_ampdu_tx_start;
578 	ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop;
579 	sc->ntxqs = IWN4965_NTXQUEUES;
580 	sc->ndmachnls = IWN4965_NDMACHNLS;
581 	sc->broadcast_id = IWN4965_ID_BROADCAST;
582 	sc->rxonsz = IWN4965_RXONSZ;
583 	sc->schedsz = IWN4965_SCHEDSZ;
584 	sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ;
585 	sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ;
586 	sc->fwsz = IWN4965_FWSZ;
587 	sc->sched_txfact_addr = IWN4965_SCHED_TXFACT;
588 	sc->limits = &iwn4965_sensitivity_limits;
589 	sc->fwname = "iwn-4965";
590 	/* Override chains masks, ROM is known to be broken. */
591 	sc->txchainmask = IWN_ANT_AB;
592 	sc->rxchainmask = IWN_ANT_ABC;
593 
594 	return 0;
595 }
596 
597 int
598 iwn5000_attach(struct iwn_softc *sc, pci_product_id_t pid)
599 {
600 	struct iwn_ops *ops = &sc->ops;
601 
602 	ops->load_firmware = iwn5000_load_firmware;
603 	ops->read_eeprom = iwn5000_read_eeprom;
604 	ops->post_alive = iwn5000_post_alive;
605 	ops->nic_config = iwn5000_nic_config;
606 	ops->update_sched = iwn5000_update_sched;
607 	ops->get_temperature = iwn5000_get_temperature;
608 	ops->get_rssi = iwn5000_get_rssi;
609 	ops->set_txpower = iwn5000_set_txpower;
610 	ops->init_gains = iwn5000_init_gains;
611 	ops->set_gains = iwn5000_set_gains;
612 	ops->add_node = iwn5000_add_node;
613 	ops->tx_done = iwn5000_tx_done;
614 	ops->ampdu_tx_start = iwn5000_ampdu_tx_start;
615 	ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop;
616 	sc->ntxqs = IWN5000_NTXQUEUES;
617 	sc->ndmachnls = IWN5000_NDMACHNLS;
618 	sc->broadcast_id = IWN5000_ID_BROADCAST;
619 	sc->rxonsz = IWN5000_RXONSZ;
620 	sc->schedsz = IWN5000_SCHEDSZ;
621 	sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ;
622 	sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ;
623 	sc->fwsz = IWN5000_FWSZ;
624 	sc->sched_txfact_addr = IWN5000_SCHED_TXFACT;
625 
626 	switch (sc->hw_type) {
627 	case IWN_HW_REV_TYPE_5100:
628 		sc->limits = &iwn5000_sensitivity_limits;
629 		sc->fwname = "iwn-5000";
630 		/* Override chains masks, ROM is known to be broken. */
631 		sc->txchainmask = IWN_ANT_B;
632 		sc->rxchainmask = IWN_ANT_AB;
633 		break;
634 	case IWN_HW_REV_TYPE_5150:
635 		sc->limits = &iwn5150_sensitivity_limits;
636 		sc->fwname = "iwn-5150";
637 		break;
638 	case IWN_HW_REV_TYPE_5300:
639 	case IWN_HW_REV_TYPE_5350:
640 		sc->limits = &iwn5000_sensitivity_limits;
641 		sc->fwname = "iwn-5000";
642 		break;
643 	case IWN_HW_REV_TYPE_1000:
644 		sc->limits = &iwn1000_sensitivity_limits;
645 		sc->fwname = "iwn-1000";
646 		break;
647 	case IWN_HW_REV_TYPE_6000:
648 		sc->limits = &iwn6000_sensitivity_limits;
649 		sc->fwname = "iwn-6000";
650 		if (pid == PCI_PRODUCT_INTEL_WL_6200_1 ||
651 		    pid == PCI_PRODUCT_INTEL_WL_6200_2) {
652 			sc->sc_flags |= IWN_FLAG_INTERNAL_PA;
653 			/* Override chains masks, ROM is known to be broken. */
654 			sc->txchainmask = IWN_ANT_BC;
655 			sc->rxchainmask = IWN_ANT_BC;
656 		}
657 		break;
658 	case IWN_HW_REV_TYPE_6050:
659 		sc->limits = &iwn6000_sensitivity_limits;
660 		sc->fwname = "iwn-6050";
661 		break;
662 	case IWN_HW_REV_TYPE_6005:
663 		sc->limits = &iwn6000_sensitivity_limits;
664 		if (pid != PCI_PRODUCT_INTEL_WL_6005_1 &&
665 		    pid != PCI_PRODUCT_INTEL_WL_6005_2) {
666 			sc->fwname = "iwn-6030";
667 			sc->sc_flags |= IWN_FLAG_ADV_BT_COEX;
668 		} else
669 			sc->fwname = "iwn-6005";
670 		break;
671 	case IWN_HW_REV_TYPE_2030:
672 		sc->limits = &iwn2000_sensitivity_limits;
673 		sc->fwname = "iwn-2030";
674 		sc->sc_flags |= IWN_FLAG_ADV_BT_COEX;
675 		break;
676 	case IWN_HW_REV_TYPE_2000:
677 		sc->limits = &iwn2000_sensitivity_limits;
678 		sc->fwname = "iwn-2000";
679 		break;
680 	case IWN_HW_REV_TYPE_135:
681 		sc->limits = &iwn2000_sensitivity_limits;
682 		sc->fwname = "iwn-135";
683 		sc->sc_flags |= IWN_FLAG_ADV_BT_COEX;
684 		break;
685 	case IWN_HW_REV_TYPE_105:
686 		sc->limits = &iwn2000_sensitivity_limits;
687 		sc->fwname = "iwn-105";
688 		break;
689 	default:
690 		printf(": adapter type %d not supported\n", sc->hw_type);
691 		return ENOTSUP;
692 	}
693 	return 0;
694 }
695 
696 #if NBPFILTER > 0
697 /*
698  * Attach the interface to 802.11 radiotap.
699  */
700 void
701 iwn_radiotap_attach(struct iwn_softc *sc)
702 {
703 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
704 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
705 
706 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
707 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
708 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWN_RX_RADIOTAP_PRESENT);
709 
710 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
711 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
712 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWN_TX_RADIOTAP_PRESENT);
713 }
714 #endif
715 
716 int
717 iwn_detach(struct device *self, int flags)
718 {
719 	struct iwn_softc *sc = (struct iwn_softc *)self;
720 	struct ifnet *ifp = &sc->sc_ic.ic_if;
721 	int qid;
722 
723 	timeout_del(&sc->calib_to);
724 	task_del(systq, &sc->init_task);
725 
726 	/* Uninstall interrupt handler. */
727 	if (sc->sc_ih != NULL)
728 		pci_intr_disestablish(sc->sc_pct, sc->sc_ih);
729 
730 	/* Free DMA resources. */
731 	iwn_free_rx_ring(sc, &sc->rxq);
732 	for (qid = 0; qid < sc->ntxqs; qid++)
733 		iwn_free_tx_ring(sc, &sc->txq[qid]);
734 	iwn_free_sched(sc);
735 	iwn_free_kw(sc);
736 	if (sc->ict != NULL)
737 		iwn_free_ict(sc);
738 	iwn_free_fwmem(sc);
739 
740 	bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz);
741 
742 	ieee80211_ifdetach(ifp);
743 	if_detach(ifp);
744 
745 	return 0;
746 }
747 
748 int
749 iwn_activate(struct device *self, int act)
750 {
751 	struct iwn_softc *sc = (struct iwn_softc *)self;
752 	struct ifnet *ifp = &sc->sc_ic.ic_if;
753 
754 	switch (act) {
755 	case DVACT_SUSPEND:
756 		if (ifp->if_flags & IFF_RUNNING)
757 			iwn_stop(ifp, 0);
758 		break;
759 	case DVACT_WAKEUP:
760 		iwn_wakeup(sc);
761 		break;
762 	}
763 
764 	return 0;
765 }
766 
767 void
768 iwn_wakeup(struct iwn_softc *sc)
769 {
770 	pcireg_t reg;
771 
772 	/* Clear device-specific "PCI retry timeout" register (41h). */
773 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
774 	if (reg & 0xff00)
775 		pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
776 	iwn_init_task(sc);
777 }
778 
779 void
780 iwn_init_task(void *arg1)
781 {
782 	struct iwn_softc *sc = arg1;
783 	struct ifnet *ifp = &sc->sc_ic.ic_if;
784 	int s;
785 
786 	rw_enter_write(&sc->sc_rwlock);
787 	s = splnet();
788 
789 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
790 		iwn_init(ifp);
791 
792 	splx(s);
793 	rw_exit_write(&sc->sc_rwlock);
794 }
795 
796 int
797 iwn_nic_lock(struct iwn_softc *sc)
798 {
799 	int ntries;
800 
801 	/* Request exclusive access to NIC. */
802 	IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
803 
804 	/* Spin until we actually get the lock. */
805 	for (ntries = 0; ntries < 1000; ntries++) {
806 		if ((IWN_READ(sc, IWN_GP_CNTRL) &
807 		     (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) ==
808 		    IWN_GP_CNTRL_MAC_ACCESS_ENA)
809 			return 0;
810 		DELAY(10);
811 	}
812 	return ETIMEDOUT;
813 }
814 
815 static __inline void
816 iwn_nic_unlock(struct iwn_softc *sc)
817 {
818 	IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
819 }
820 
821 static __inline uint32_t
822 iwn_prph_read(struct iwn_softc *sc, uint32_t addr)
823 {
824 	IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr);
825 	IWN_BARRIER_READ_WRITE(sc);
826 	return IWN_READ(sc, IWN_PRPH_RDATA);
827 }
828 
829 static __inline void
830 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
831 {
832 	IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr);
833 	IWN_BARRIER_WRITE(sc);
834 	IWN_WRITE(sc, IWN_PRPH_WDATA, data);
835 }
836 
837 static __inline void
838 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
839 {
840 	iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask);
841 }
842 
843 static __inline void
844 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
845 {
846 	iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask);
847 }
848 
849 static __inline void
850 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr,
851     const uint32_t *data, int count)
852 {
853 	for (; count > 0; count--, data++, addr += 4)
854 		iwn_prph_write(sc, addr, *data);
855 }
856 
857 static __inline uint32_t
858 iwn_mem_read(struct iwn_softc *sc, uint32_t addr)
859 {
860 	IWN_WRITE(sc, IWN_MEM_RADDR, addr);
861 	IWN_BARRIER_READ_WRITE(sc);
862 	return IWN_READ(sc, IWN_MEM_RDATA);
863 }
864 
865 static __inline void
866 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
867 {
868 	IWN_WRITE(sc, IWN_MEM_WADDR, addr);
869 	IWN_BARRIER_WRITE(sc);
870 	IWN_WRITE(sc, IWN_MEM_WDATA, data);
871 }
872 
873 static __inline void
874 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data)
875 {
876 	uint32_t tmp;
877 
878 	tmp = iwn_mem_read(sc, addr & ~3);
879 	if (addr & 3)
880 		tmp = (tmp & 0x0000ffff) | data << 16;
881 	else
882 		tmp = (tmp & 0xffff0000) | data;
883 	iwn_mem_write(sc, addr & ~3, tmp);
884 }
885 
886 #ifdef IWN_DEBUG
887 
888 static __inline void
889 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data,
890     int count)
891 {
892 	for (; count > 0; count--, addr += 4)
893 		*data++ = iwn_mem_read(sc, addr);
894 }
895 
896 #endif
897 
898 static __inline void
899 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val,
900     int count)
901 {
902 	for (; count > 0; count--, addr += 4)
903 		iwn_mem_write(sc, addr, val);
904 }
905 
906 int
907 iwn_eeprom_lock(struct iwn_softc *sc)
908 {
909 	int i, ntries;
910 
911 	for (i = 0; i < 100; i++) {
912 		/* Request exclusive access to EEPROM. */
913 		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
914 		    IWN_HW_IF_CONFIG_EEPROM_LOCKED);
915 
916 		/* Spin until we actually get the lock. */
917 		for (ntries = 0; ntries < 100; ntries++) {
918 			if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
919 			    IWN_HW_IF_CONFIG_EEPROM_LOCKED)
920 				return 0;
921 			DELAY(10);
922 		}
923 	}
924 	return ETIMEDOUT;
925 }
926 
927 static __inline void
928 iwn_eeprom_unlock(struct iwn_softc *sc)
929 {
930 	IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED);
931 }
932 
933 /*
934  * Initialize access by host to One Time Programmable ROM.
935  * NB: This kind of ROM can be found on 1000 or 6000 Series only.
936  */
937 int
938 iwn_init_otprom(struct iwn_softc *sc)
939 {
940 	uint16_t prev, base, next;
941 	int count, error;
942 
943 	/* Wait for clock stabilization before accessing prph. */
944 	if ((error = iwn_clock_wait(sc)) != 0)
945 		return error;
946 
947 	if ((error = iwn_nic_lock(sc)) != 0)
948 		return error;
949 	iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
950 	DELAY(5);
951 	iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
952 	iwn_nic_unlock(sc);
953 
954 	/* Set auto clock gate disable bit for HW with OTP shadow RAM. */
955 	if (sc->hw_type != IWN_HW_REV_TYPE_1000) {
956 		IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT,
957 		    IWN_RESET_LINK_PWR_MGMT_DIS);
958 	}
959 	IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER);
960 	/* Clear ECC status. */
961 	IWN_SETBITS(sc, IWN_OTP_GP,
962 	    IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS);
963 
964 	/*
965 	 * Find the block before last block (contains the EEPROM image)
966 	 * for HW without OTP shadow RAM.
967 	 */
968 	if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
969 		/* Switch to absolute addressing mode. */
970 		IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS);
971 		base = 0;
972 		for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) {
973 			error = iwn_read_prom_data(sc, base, &next, 2);
974 			if (error != 0)
975 				return error;
976 			if (next == 0)	/* End of linked-list. */
977 				break;
978 			prev = base;
979 			base = letoh16(next);
980 		}
981 		if (count == 0 || count == IWN1000_OTP_NBLOCKS)
982 			return EIO;
983 		/* Skip "next" word. */
984 		sc->prom_base = prev + 1;
985 	}
986 	return 0;
987 }
988 
989 int
990 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count)
991 {
992 	uint8_t *out = data;
993 	uint32_t val, tmp;
994 	int ntries;
995 
996 	addr += sc->prom_base;
997 	for (; count > 0; count -= 2, addr++) {
998 		IWN_WRITE(sc, IWN_EEPROM, addr << 2);
999 		for (ntries = 0; ntries < 10; ntries++) {
1000 			val = IWN_READ(sc, IWN_EEPROM);
1001 			if (val & IWN_EEPROM_READ_VALID)
1002 				break;
1003 			DELAY(5);
1004 		}
1005 		if (ntries == 10) {
1006 			printf("%s: timeout reading ROM at 0x%x\n",
1007 			    sc->sc_dev.dv_xname, addr);
1008 			return ETIMEDOUT;
1009 		}
1010 		if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1011 			/* OTPROM, check for ECC errors. */
1012 			tmp = IWN_READ(sc, IWN_OTP_GP);
1013 			if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) {
1014 				printf("%s: OTPROM ECC error at 0x%x\n",
1015 				    sc->sc_dev.dv_xname, addr);
1016 				return EIO;
1017 			}
1018 			if (tmp & IWN_OTP_GP_ECC_CORR_STTS) {
1019 				/* Correctable ECC error, clear bit. */
1020 				IWN_SETBITS(sc, IWN_OTP_GP,
1021 				    IWN_OTP_GP_ECC_CORR_STTS);
1022 			}
1023 		}
1024 		*out++ = val >> 16;
1025 		if (count > 1)
1026 			*out++ = val >> 24;
1027 	}
1028 	return 0;
1029 }
1030 
1031 int
1032 iwn_dma_contig_alloc(bus_dma_tag_t tag, struct iwn_dma_info *dma, void **kvap,
1033     bus_size_t size, bus_size_t alignment)
1034 {
1035 	int nsegs, error;
1036 
1037 	dma->tag = tag;
1038 	dma->size = size;
1039 
1040 	error = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1041 	    &dma->map);
1042 	if (error != 0)
1043 		goto fail;
1044 
1045 	error = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1046 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1047 	if (error != 0)
1048 		goto fail;
1049 
1050 	error = bus_dmamem_map(tag, &dma->seg, 1, size, &dma->vaddr,
1051 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1052 	if (error != 0)
1053 		goto fail;
1054 
1055 	error = bus_dmamap_load_raw(tag, dma->map, &dma->seg, 1, size,
1056 	    BUS_DMA_NOWAIT);
1057 	if (error != 0)
1058 		goto fail;
1059 
1060 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1061 
1062 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1063 	if (kvap != NULL)
1064 		*kvap = dma->vaddr;
1065 
1066 	return 0;
1067 
1068 fail:	iwn_dma_contig_free(dma);
1069 	return error;
1070 }
1071 
1072 void
1073 iwn_dma_contig_free(struct iwn_dma_info *dma)
1074 {
1075 	if (dma->map != NULL) {
1076 		if (dma->vaddr != NULL) {
1077 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1078 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1079 			bus_dmamap_unload(dma->tag, dma->map);
1080 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1081 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1082 			dma->vaddr = NULL;
1083 		}
1084 		bus_dmamap_destroy(dma->tag, dma->map);
1085 		dma->map = NULL;
1086 	}
1087 }
1088 
1089 int
1090 iwn_alloc_sched(struct iwn_softc *sc)
1091 {
1092 	/* TX scheduler rings must be aligned on a 1KB boundary. */
1093 	return iwn_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
1094 	    (void **)&sc->sched, sc->schedsz, 1024);
1095 }
1096 
1097 void
1098 iwn_free_sched(struct iwn_softc *sc)
1099 {
1100 	iwn_dma_contig_free(&sc->sched_dma);
1101 }
1102 
1103 int
1104 iwn_alloc_kw(struct iwn_softc *sc)
1105 {
1106 	/* "Keep Warm" page must be aligned on a 4KB boundary. */
1107 	return iwn_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, NULL, 4096,
1108 	    4096);
1109 }
1110 
1111 void
1112 iwn_free_kw(struct iwn_softc *sc)
1113 {
1114 	iwn_dma_contig_free(&sc->kw_dma);
1115 }
1116 
1117 int
1118 iwn_alloc_ict(struct iwn_softc *sc)
1119 {
1120 	/* ICT table must be aligned on a 4KB boundary. */
1121 	return iwn_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
1122 	    (void **)&sc->ict, IWN_ICT_SIZE, 4096);
1123 }
1124 
1125 void
1126 iwn_free_ict(struct iwn_softc *sc)
1127 {
1128 	iwn_dma_contig_free(&sc->ict_dma);
1129 }
1130 
1131 int
1132 iwn_alloc_fwmem(struct iwn_softc *sc)
1133 {
1134 	/* Must be aligned on a 16-byte boundary. */
1135 	return iwn_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, NULL,
1136 	    sc->fwsz, 16);
1137 }
1138 
1139 void
1140 iwn_free_fwmem(struct iwn_softc *sc)
1141 {
1142 	iwn_dma_contig_free(&sc->fw_dma);
1143 }
1144 
1145 int
1146 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1147 {
1148 	bus_size_t size;
1149 	int i, error;
1150 
1151 	ring->cur = 0;
1152 
1153 	/* Allocate RX descriptors (256-byte aligned). */
1154 	size = IWN_RX_RING_COUNT * sizeof (uint32_t);
1155 	error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma,
1156 	    (void **)&ring->desc, size, 256);
1157 	if (error != 0) {
1158 		printf("%s: could not allocate RX ring DMA memory\n",
1159 		    sc->sc_dev.dv_xname);
1160 		goto fail;
1161 	}
1162 
1163 	/* Allocate RX status area (16-byte aligned). */
1164 	error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1165 	    (void **)&ring->stat, sizeof (struct iwn_rx_status), 16);
1166 	if (error != 0) {
1167 		printf("%s: could not allocate RX status DMA memory\n",
1168 		    sc->sc_dev.dv_xname);
1169 		goto fail;
1170 	}
1171 
1172 	/*
1173 	 * Allocate and map RX buffers.
1174 	 */
1175 	for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1176 		struct iwn_rx_data *data = &ring->data[i];
1177 
1178 		error = bus_dmamap_create(sc->sc_dmat, IWN_RBUF_SIZE, 1,
1179 		    IWN_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1180 		    &data->map);
1181 		if (error != 0) {
1182 			printf("%s: could not create RX buf DMA map\n",
1183 			    sc->sc_dev.dv_xname);
1184 			goto fail;
1185 		}
1186 
1187 		data->m = MCLGETI(NULL, M_DONTWAIT, NULL, IWN_RBUF_SIZE);
1188 		if (data->m == NULL) {
1189 			printf("%s: could not allocate RX mbuf\n",
1190 			    sc->sc_dev.dv_xname);
1191 			error = ENOBUFS;
1192 			goto fail;
1193 		}
1194 
1195 		error = bus_dmamap_load(sc->sc_dmat, data->map,
1196 		    mtod(data->m, void *), IWN_RBUF_SIZE, NULL,
1197 		    BUS_DMA_NOWAIT | BUS_DMA_READ);
1198 		if (error != 0) {
1199 			printf("%s: can't map mbuf (error %d)\n",
1200 			    sc->sc_dev.dv_xname, error);
1201 			goto fail;
1202 		}
1203 
1204 		/* Set physical address of RX buffer (256-byte aligned). */
1205 		ring->desc[i] = htole32(data->map->dm_segs[0].ds_addr >> 8);
1206 	}
1207 
1208 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0, size,
1209 	    BUS_DMASYNC_PREWRITE);
1210 
1211 	return 0;
1212 
1213 fail:	iwn_free_rx_ring(sc, ring);
1214 	return error;
1215 }
1216 
1217 void
1218 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1219 {
1220 	int ntries;
1221 
1222 	if (iwn_nic_lock(sc) == 0) {
1223 		IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
1224 		for (ntries = 0; ntries < 1000; ntries++) {
1225 			if (IWN_READ(sc, IWN_FH_RX_STATUS) &
1226 			    IWN_FH_RX_STATUS_IDLE)
1227 				break;
1228 			DELAY(10);
1229 		}
1230 		iwn_nic_unlock(sc);
1231 	}
1232 	ring->cur = 0;
1233 	sc->last_rx_valid = 0;
1234 }
1235 
1236 void
1237 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1238 {
1239 	int i;
1240 
1241 	iwn_dma_contig_free(&ring->desc_dma);
1242 	iwn_dma_contig_free(&ring->stat_dma);
1243 
1244 	for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1245 		struct iwn_rx_data *data = &ring->data[i];
1246 
1247 		if (data->m != NULL) {
1248 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1249 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1250 			bus_dmamap_unload(sc->sc_dmat, data->map);
1251 			m_freem(data->m);
1252 		}
1253 		if (data->map != NULL)
1254 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1255 	}
1256 }
1257 
1258 int
1259 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid)
1260 {
1261 	bus_addr_t paddr;
1262 	bus_size_t size;
1263 	int i, error;
1264 
1265 	ring->qid = qid;
1266 	ring->queued = 0;
1267 	ring->cur = 0;
1268 
1269 	/* Allocate TX descriptors (256-byte aligned). */
1270 	size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc);
1271 	error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma,
1272 	    (void **)&ring->desc, size, 256);
1273 	if (error != 0) {
1274 		printf("%s: could not allocate TX ring DMA memory\n",
1275 		    sc->sc_dev.dv_xname);
1276 		goto fail;
1277 	}
1278 	/*
1279 	 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need
1280 	 * to allocate commands space for other rings.
1281 	 * XXX Do we really need to allocate descriptors for other rings?
1282 	 */
1283 	if (qid > 4)
1284 		return 0;
1285 
1286 	size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd);
1287 	error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma,
1288 	    (void **)&ring->cmd, size, 4);
1289 	if (error != 0) {
1290 		printf("%s: could not allocate TX cmd DMA memory\n",
1291 		    sc->sc_dev.dv_xname);
1292 		goto fail;
1293 	}
1294 
1295 	paddr = ring->cmd_dma.paddr;
1296 	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1297 		struct iwn_tx_data *data = &ring->data[i];
1298 
1299 		data->cmd_paddr = paddr;
1300 		data->scratch_paddr = paddr + 12;
1301 		paddr += sizeof (struct iwn_tx_cmd);
1302 
1303 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1304 		    IWN_MAX_SCATTER - 1, MCLBYTES, 0, BUS_DMA_NOWAIT,
1305 		    &data->map);
1306 		if (error != 0) {
1307 			printf("%s: could not create TX buf DMA map\n",
1308 			    sc->sc_dev.dv_xname);
1309 			goto fail;
1310 		}
1311 	}
1312 	return 0;
1313 
1314 fail:	iwn_free_tx_ring(sc, ring);
1315 	return error;
1316 }
1317 
1318 void
1319 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1320 {
1321 	int i;
1322 
1323 	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1324 		struct iwn_tx_data *data = &ring->data[i];
1325 
1326 		if (data->m != NULL) {
1327 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1328 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1329 			bus_dmamap_unload(sc->sc_dmat, data->map);
1330 			m_freem(data->m);
1331 			data->m = NULL;
1332 		}
1333 	}
1334 	/* Clear TX descriptors. */
1335 	memset(ring->desc, 0, ring->desc_dma.size);
1336 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1337 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1338 	sc->qfullmsk &= ~(1 << ring->qid);
1339 	ring->queued = 0;
1340 	ring->cur = 0;
1341 }
1342 
1343 void
1344 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1345 {
1346 	int i;
1347 
1348 	iwn_dma_contig_free(&ring->desc_dma);
1349 	iwn_dma_contig_free(&ring->cmd_dma);
1350 
1351 	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1352 		struct iwn_tx_data *data = &ring->data[i];
1353 
1354 		if (data->m != NULL) {
1355 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1356 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1357 			bus_dmamap_unload(sc->sc_dmat, data->map);
1358 			m_freem(data->m);
1359 		}
1360 		if (data->map != NULL)
1361 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1362 	}
1363 }
1364 
1365 void
1366 iwn5000_ict_reset(struct iwn_softc *sc)
1367 {
1368 	/* Disable interrupts. */
1369 	IWN_WRITE(sc, IWN_INT_MASK, 0);
1370 
1371 	/* Reset ICT table. */
1372 	memset(sc->ict, 0, IWN_ICT_SIZE);
1373 	sc->ict_cur = 0;
1374 
1375 	/* Set physical address of ICT table (4KB aligned). */
1376 	DPRINTF(("enabling ICT\n"));
1377 	IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE |
1378 	    IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12);
1379 
1380 	/* Enable periodic RX interrupt. */
1381 	sc->int_mask |= IWN_INT_RX_PERIODIC;
1382 	/* Switch to ICT interrupt mode in driver. */
1383 	sc->sc_flags |= IWN_FLAG_USE_ICT;
1384 
1385 	/* Re-enable interrupts. */
1386 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
1387 	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
1388 }
1389 
1390 int
1391 iwn_read_eeprom(struct iwn_softc *sc)
1392 {
1393 	struct iwn_ops *ops = &sc->ops;
1394 	struct ieee80211com *ic = &sc->sc_ic;
1395 	uint16_t val;
1396 	int error;
1397 
1398 	/* Check whether adapter has an EEPROM or an OTPROM. */
1399 	if (sc->hw_type >= IWN_HW_REV_TYPE_1000 &&
1400 	    (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP))
1401 		sc->sc_flags |= IWN_FLAG_HAS_OTPROM;
1402 	DPRINTF(("%s found\n", (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ?
1403 	    "OTPROM" : "EEPROM"));
1404 
1405 	/* Adapter has to be powered on for EEPROM access to work. */
1406 	if ((error = iwn_apm_init(sc)) != 0) {
1407 		printf("%s: could not power ON adapter\n",
1408 		    sc->sc_dev.dv_xname);
1409 		return error;
1410 	}
1411 
1412 	if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) {
1413 		printf("%s: bad ROM signature\n", sc->sc_dev.dv_xname);
1414 		return EIO;
1415 	}
1416 	if ((error = iwn_eeprom_lock(sc)) != 0) {
1417 		printf("%s: could not lock ROM (error=%d)\n",
1418 		    sc->sc_dev.dv_xname, error);
1419 		return error;
1420 	}
1421 	if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1422 		if ((error = iwn_init_otprom(sc)) != 0) {
1423 			printf("%s: could not initialize OTPROM\n",
1424 			    sc->sc_dev.dv_xname);
1425 			return error;
1426 		}
1427 	}
1428 
1429 	iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2);
1430 	DPRINTF(("SKU capabilities=0x%04x\n", letoh16(val)));
1431 	/* Check if HT support is bonded out. */
1432 	if (val & htole16(IWN_EEPROM_SKU_CAP_11N))
1433 		sc->sc_flags |= IWN_FLAG_HAS_11N;
1434 
1435 	iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2);
1436 	sc->rfcfg = letoh16(val);
1437 	DPRINTF(("radio config=0x%04x\n", sc->rfcfg));
1438 	/* Read Tx/Rx chains from ROM unless it's known to be broken. */
1439 	if (sc->txchainmask == 0)
1440 		sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg);
1441 	if (sc->rxchainmask == 0)
1442 		sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg);
1443 
1444 	/* Read MAC address. */
1445 	iwn_read_prom_data(sc, IWN_EEPROM_MAC, ic->ic_myaddr, 6);
1446 
1447 	/* Read adapter-specific information from EEPROM. */
1448 	ops->read_eeprom(sc);
1449 
1450 	iwn_apm_stop(sc);	/* Power OFF adapter. */
1451 
1452 	iwn_eeprom_unlock(sc);
1453 	return 0;
1454 }
1455 
1456 void
1457 iwn4965_read_eeprom(struct iwn_softc *sc)
1458 {
1459 	uint32_t addr;
1460 	uint16_t val;
1461 	int i;
1462 
1463 	/* Read regulatory domain (4 ASCII characters). */
1464 	iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4);
1465 
1466 	/* Read the list of authorized channels (20MHz ones only). */
1467 	for (i = 0; i < 5; i++) {
1468 		addr = iwn4965_regulatory_bands[i];
1469 		iwn_read_eeprom_channels(sc, i, addr);
1470 	}
1471 
1472 	/* Read maximum allowed TX power for 2GHz and 5GHz bands. */
1473 	iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2);
1474 	sc->maxpwr2GHz = val & 0xff;
1475 	sc->maxpwr5GHz = val >> 8;
1476 	/* Check that EEPROM values are within valid range. */
1477 	if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50)
1478 		sc->maxpwr5GHz = 38;
1479 	if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50)
1480 		sc->maxpwr2GHz = 38;
1481 	DPRINTF(("maxpwr 2GHz=%d 5GHz=%d\n", sc->maxpwr2GHz, sc->maxpwr5GHz));
1482 
1483 	/* Read samples for each TX power group. */
1484 	iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands,
1485 	    sizeof sc->bands);
1486 
1487 	/* Read voltage at which samples were taken. */
1488 	iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2);
1489 	sc->eeprom_voltage = (int16_t)letoh16(val);
1490 	DPRINTF(("voltage=%d (in 0.3V)\n", sc->eeprom_voltage));
1491 
1492 #ifdef IWN_DEBUG
1493 	/* Print samples. */
1494 	if (iwn_debug > 0) {
1495 		for (i = 0; i < IWN_NBANDS; i++)
1496 			iwn4965_print_power_group(sc, i);
1497 	}
1498 #endif
1499 }
1500 
1501 #ifdef IWN_DEBUG
1502 void
1503 iwn4965_print_power_group(struct iwn_softc *sc, int i)
1504 {
1505 	struct iwn4965_eeprom_band *band = &sc->bands[i];
1506 	struct iwn4965_eeprom_chan_samples *chans = band->chans;
1507 	int j, c;
1508 
1509 	printf("===band %d===\n", i);
1510 	printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi);
1511 	printf("chan1 num=%d\n", chans[0].num);
1512 	for (c = 0; c < 2; c++) {
1513 		for (j = 0; j < IWN_NSAMPLES; j++) {
1514 			printf("chain %d, sample %d: temp=%d gain=%d "
1515 			    "power=%d pa_det=%d\n", c, j,
1516 			    chans[0].samples[c][j].temp,
1517 			    chans[0].samples[c][j].gain,
1518 			    chans[0].samples[c][j].power,
1519 			    chans[0].samples[c][j].pa_det);
1520 		}
1521 	}
1522 	printf("chan2 num=%d\n", chans[1].num);
1523 	for (c = 0; c < 2; c++) {
1524 		for (j = 0; j < IWN_NSAMPLES; j++) {
1525 			printf("chain %d, sample %d: temp=%d gain=%d "
1526 			    "power=%d pa_det=%d\n", c, j,
1527 			    chans[1].samples[c][j].temp,
1528 			    chans[1].samples[c][j].gain,
1529 			    chans[1].samples[c][j].power,
1530 			    chans[1].samples[c][j].pa_det);
1531 		}
1532 	}
1533 }
1534 #endif
1535 
1536 void
1537 iwn5000_read_eeprom(struct iwn_softc *sc)
1538 {
1539 	struct iwn5000_eeprom_calib_hdr hdr;
1540 	int32_t volt;
1541 	uint32_t base, addr;
1542 	uint16_t val;
1543 	int i;
1544 
1545 	/* Read regulatory domain (4 ASCII characters). */
1546 	iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
1547 	base = letoh16(val);
1548 	iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN,
1549 	    sc->eeprom_domain, 4);
1550 
1551 	/* Read the list of authorized channels (20MHz ones only). */
1552 	for (i = 0; i < 5; i++) {
1553 		addr = base + iwn5000_regulatory_bands[i];
1554 		iwn_read_eeprom_channels(sc, i, addr);
1555 	}
1556 
1557 	/* Read enhanced TX power information for 6000 Series. */
1558 	if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
1559 		iwn_read_eeprom_enhinfo(sc);
1560 
1561 	iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2);
1562 	base = letoh16(val);
1563 	iwn_read_prom_data(sc, base, &hdr, sizeof hdr);
1564 	DPRINTF(("calib version=%u pa type=%u voltage=%u\n",
1565 	    hdr.version, hdr.pa_type, letoh16(hdr.volt)));
1566 	sc->calib_ver = hdr.version;
1567 
1568 	if (sc->hw_type == IWN_HW_REV_TYPE_2030 ||
1569 	    sc->hw_type == IWN_HW_REV_TYPE_2000 ||
1570 	    sc->hw_type == IWN_HW_REV_TYPE_135 ||
1571 	    sc->hw_type == IWN_HW_REV_TYPE_105) {
1572 		sc->eeprom_voltage = letoh16(hdr.volt);
1573 		iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
1574 		sc->eeprom_temp = letoh16(val);
1575 		iwn_read_prom_data(sc, base + IWN2000_EEPROM_RAWTEMP, &val, 2);
1576 		sc->eeprom_rawtemp = letoh16(val);
1577 	}
1578 
1579 	if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
1580 		/* Compute temperature offset. */
1581 		iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
1582 		sc->eeprom_temp = letoh16(val);
1583 		iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2);
1584 		volt = letoh16(val);
1585 		sc->temp_off = sc->eeprom_temp - (volt / -5);
1586 		DPRINTF(("temp=%d volt=%d offset=%dK\n",
1587 		    sc->eeprom_temp, volt, sc->temp_off));
1588 	} else {
1589 		/* Read crystal calibration. */
1590 		iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL,
1591 		    &sc->eeprom_crystal, sizeof (uint32_t));
1592 		DPRINTF(("crystal calibration 0x%08x\n",
1593 		    letoh32(sc->eeprom_crystal)));
1594 	}
1595 }
1596 
1597 void
1598 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr)
1599 {
1600 	struct ieee80211com *ic = &sc->sc_ic;
1601 	const struct iwn_chan_band *band = &iwn_bands[n];
1602 	struct iwn_eeprom_chan channels[IWN_MAX_CHAN_PER_BAND];
1603 	uint8_t chan;
1604 	int i;
1605 
1606 	iwn_read_prom_data(sc, addr, channels,
1607 	    band->nchan * sizeof (struct iwn_eeprom_chan));
1608 
1609 	for (i = 0; i < band->nchan; i++) {
1610 		if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID))
1611 			continue;
1612 
1613 		chan = band->chan[i];
1614 
1615 		if (n == 0) {	/* 2GHz band */
1616 			ic->ic_channels[chan].ic_freq =
1617 			    ieee80211_ieee2mhz(chan, IEEE80211_CHAN_2GHZ);
1618 			ic->ic_channels[chan].ic_flags =
1619 			    IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
1620 			    IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
1621 
1622 		} else {	/* 5GHz band */
1623 			/*
1624 			 * Some adapters support channels 7, 8, 11 and 12
1625 			 * both in the 2GHz and 4.9GHz bands.
1626 			 * Because of limitations in our net80211 layer,
1627 			 * we don't support them in the 4.9GHz band.
1628 			 */
1629 			if (chan <= 14)
1630 				continue;
1631 
1632 			ic->ic_channels[chan].ic_freq =
1633 			    ieee80211_ieee2mhz(chan, IEEE80211_CHAN_5GHZ);
1634 			ic->ic_channels[chan].ic_flags = IEEE80211_CHAN_A;
1635 			/* We have at least one valid 5GHz channel. */
1636 			sc->sc_flags |= IWN_FLAG_HAS_5GHZ;
1637 		}
1638 
1639 		/* Is active scan allowed on this channel? */
1640 		if (!(channels[i].flags & IWN_EEPROM_CHAN_ACTIVE)) {
1641 			ic->ic_channels[chan].ic_flags |=
1642 			    IEEE80211_CHAN_PASSIVE;
1643 		}
1644 
1645 		/* Save maximum allowed TX power for this channel. */
1646 		sc->maxpwr[chan] = channels[i].maxpwr;
1647 
1648 		if (sc->sc_flags & IWN_FLAG_HAS_11N)
1649 			ic->ic_channels[chan].ic_flags |= IEEE80211_CHAN_HT;
1650 
1651 		DPRINTF(("adding chan %d flags=0x%x maxpwr=%d\n",
1652 		    chan, channels[i].flags, sc->maxpwr[chan]));
1653 	}
1654 }
1655 
1656 void
1657 iwn_read_eeprom_enhinfo(struct iwn_softc *sc)
1658 {
1659 	struct iwn_eeprom_enhinfo enhinfo[35];
1660 	uint16_t val, base;
1661 	int8_t maxpwr;
1662 	int i;
1663 
1664 	iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
1665 	base = letoh16(val);
1666 	iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO,
1667 	    enhinfo, sizeof enhinfo);
1668 
1669 	memset(sc->enh_maxpwr, 0, sizeof sc->enh_maxpwr);
1670 	for (i = 0; i < nitems(enhinfo); i++) {
1671 		if (enhinfo[i].chan == 0 || enhinfo[i].reserved != 0)
1672 			continue;	/* Skip invalid entries. */
1673 
1674 		maxpwr = 0;
1675 		if (sc->txchainmask & IWN_ANT_A)
1676 			maxpwr = MAX(maxpwr, enhinfo[i].chain[0]);
1677 		if (sc->txchainmask & IWN_ANT_B)
1678 			maxpwr = MAX(maxpwr, enhinfo[i].chain[1]);
1679 		if (sc->txchainmask & IWN_ANT_C)
1680 			maxpwr = MAX(maxpwr, enhinfo[i].chain[2]);
1681 		if (sc->ntxchains == 2)
1682 			maxpwr = MAX(maxpwr, enhinfo[i].mimo2);
1683 		else if (sc->ntxchains == 3)
1684 			maxpwr = MAX(maxpwr, enhinfo[i].mimo3);
1685 		maxpwr /= 2;	/* Convert half-dBm to dBm. */
1686 
1687 		DPRINTF(("enhinfo %d, maxpwr=%d\n", i, maxpwr));
1688 		sc->enh_maxpwr[i] = maxpwr;
1689 	}
1690 }
1691 
1692 struct ieee80211_node *
1693 iwn_node_alloc(struct ieee80211com *ic)
1694 {
1695 	return malloc(sizeof (struct iwn_node), M_DEVBUF, M_NOWAIT | M_ZERO);
1696 }
1697 
1698 void
1699 iwn_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, int isnew)
1700 {
1701 	struct iwn_softc *sc = ic->ic_if.if_softc;
1702 	struct iwn_node *wn = (void *)ni;
1703 	uint8_t rate;
1704 	int ridx, i;
1705 
1706 	if ((ni->ni_flags & IEEE80211_NODE_HT) == 0)
1707 		ieee80211_amrr_node_init(&sc->amrr, &wn->amn);
1708 
1709 	/* Start at lowest available bit-rate, AMRR/MiRA will raise. */
1710 	ni->ni_txrate = 0;
1711 	ni->ni_txmcs = 0;
1712 
1713 	for (i = 0; i < ni->ni_rates.rs_nrates; i++) {
1714 		rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL;
1715 		/* Map 802.11 rate to HW rate index. */
1716 		for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) {
1717 			if (iwn_rates[ridx].plcp != IWN_PLCP_INVALID &&
1718 			    iwn_rates[ridx].rate == rate)
1719 				break;
1720 		}
1721 		wn->ridx[i] = ridx;
1722 	}
1723 }
1724 
1725 int
1726 iwn_media_change(struct ifnet *ifp)
1727 {
1728 	struct iwn_softc *sc = ifp->if_softc;
1729 	struct ieee80211com *ic = &sc->sc_ic;
1730 	uint8_t rate, ridx;
1731 	int error;
1732 
1733 	error = ieee80211_media_change(ifp);
1734 	if (error != ENETRESET)
1735 		return error;
1736 
1737 	if (ic->ic_fixed_mcs != -1)
1738 		sc->fixed_ridx = iwn_mcs2ridx[ic->ic_fixed_mcs];
1739 	if (ic->ic_fixed_rate != -1) {
1740 		rate = ic->ic_sup_rates[ic->ic_curmode].
1741 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
1742 		/* Map 802.11 rate to HW rate index. */
1743 		for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++)
1744 			if (iwn_rates[ridx].plcp != IWN_PLCP_INVALID &&
1745 			    iwn_rates[ridx].rate == rate)
1746 				break;
1747 		sc->fixed_ridx = ridx;
1748 	}
1749 
1750 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1751 	    (IFF_UP | IFF_RUNNING)) {
1752 		iwn_stop(ifp, 0);
1753 		error = iwn_init(ifp);
1754 	}
1755 	return error;
1756 }
1757 
1758 int
1759 iwn_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
1760 {
1761 	struct ifnet *ifp = &ic->ic_if;
1762 	struct iwn_softc *sc = ifp->if_softc;
1763 	struct ieee80211_node *ni = ic->ic_bss;
1764 	struct iwn_node *wn = (void *)ni;
1765 	int error;
1766 
1767 	if (ic->ic_state == IEEE80211_S_RUN) {
1768 		ieee80211_mira_cancel_timeouts(&wn->mn);
1769 		timeout_del(&sc->calib_to);
1770 		sc->calib.state = IWN_CALIB_STATE_INIT;
1771 		if (sc->sc_flags & IWN_FLAG_BGSCAN)
1772 			iwn_scan_abort(sc);
1773 	}
1774 
1775 	if (ic->ic_state == IEEE80211_S_SCAN) {
1776 		if (nstate == IEEE80211_S_SCAN) {
1777 			if (sc->sc_flags & IWN_FLAG_SCANNING)
1778 				return 0;
1779 		} else
1780 			sc->sc_flags &= ~IWN_FLAG_SCANNING;
1781 		/* Turn LED off when leaving scan state. */
1782 		iwn_set_led(sc, IWN_LED_LINK, 1, 0);
1783 	}
1784 
1785 	if (ic->ic_state >= IEEE80211_S_ASSOC &&
1786 	    nstate <= IEEE80211_S_ASSOC) {
1787 		/* Reset state to handle re- and disassociations. */
1788 		sc->rxon.associd = 0;
1789 		sc->rxon.filter &= ~htole32(IWN_FILTER_BSS);
1790 		sc->calib.state = IWN_CALIB_STATE_INIT;
1791 		error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
1792 		if (error != 0)
1793 			printf("%s: RXON command failed\n",
1794 			    sc->sc_dev.dv_xname);
1795 	}
1796 
1797 	switch (nstate) {
1798 	case IEEE80211_S_SCAN:
1799 		/* Make the link LED blink while we're scanning. */
1800 		iwn_set_led(sc, IWN_LED_LINK, 10, 10);
1801 
1802 		if ((error = iwn_scan(sc, IEEE80211_CHAN_2GHZ, 0)) != 0) {
1803 			printf("%s: could not initiate scan\n",
1804 			    sc->sc_dev.dv_xname);
1805 			return error;
1806 		}
1807 		if (ifp->if_flags & IFF_DEBUG)
1808 			printf("%s: %s -> %s\n", ifp->if_xname,
1809 			    ieee80211_state_name[ic->ic_state],
1810 			    ieee80211_state_name[nstate]);
1811 		if ((sc->sc_flags & IWN_FLAG_BGSCAN) == 0) {
1812 			ieee80211_set_link_state(ic, LINK_STATE_DOWN);
1813 			ieee80211_free_allnodes(ic, 1);
1814 		}
1815 		ic->ic_state = nstate;
1816 		return 0;
1817 
1818 	case IEEE80211_S_ASSOC:
1819 		if (ic->ic_state != IEEE80211_S_RUN)
1820 			break;
1821 		/* FALLTHROUGH */
1822 	case IEEE80211_S_AUTH:
1823 		if ((error = iwn_auth(sc, arg)) != 0) {
1824 			printf("%s: could not move to auth state\n",
1825 			    sc->sc_dev.dv_xname);
1826 			return error;
1827 		}
1828 		break;
1829 
1830 	case IEEE80211_S_RUN:
1831 		if ((error = iwn_run(sc)) != 0) {
1832 			printf("%s: could not move to run state\n",
1833 			    sc->sc_dev.dv_xname);
1834 			return error;
1835 		}
1836 		break;
1837 
1838 	case IEEE80211_S_INIT:
1839 		sc->calib.state = IWN_CALIB_STATE_INIT;
1840 		break;
1841 	}
1842 
1843 	return sc->sc_newstate(ic, nstate, arg);
1844 }
1845 
1846 void
1847 iwn_iter_func(void *arg, struct ieee80211_node *ni)
1848 {
1849 	struct iwn_softc *sc = arg;
1850 	struct iwn_node *wn = (void *)ni;
1851 
1852 	if ((ni->ni_flags & IEEE80211_NODE_HT) == 0)
1853 		ieee80211_amrr_choose(&sc->amrr, ni, &wn->amn);
1854 }
1855 
1856 void
1857 iwn_calib_timeout(void *arg)
1858 {
1859 	struct iwn_softc *sc = arg;
1860 	struct ieee80211com *ic = &sc->sc_ic;
1861 	int s;
1862 
1863 	s = splnet();
1864 	if (ic->ic_fixed_rate == -1) {
1865 		if (ic->ic_opmode == IEEE80211_M_STA)
1866 			iwn_iter_func(sc, ic->ic_bss);
1867 		else
1868 			ieee80211_iterate_nodes(ic, iwn_iter_func, sc);
1869 	}
1870 	/* Force automatic TX power calibration every 60 secs. */
1871 	if (++sc->calib_cnt >= 120) {
1872 		uint32_t flags = 0;
1873 
1874 		DPRINTFN(2, ("sending request for statistics\n"));
1875 		(void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags,
1876 		    sizeof flags, 1);
1877 		sc->calib_cnt = 0;
1878 	}
1879 	splx(s);
1880 
1881 	/* Automatic rate control triggered every 500ms. */
1882 	timeout_add_msec(&sc->calib_to, 500);
1883 }
1884 
1885 int
1886 iwn_ccmp_decap(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
1887 {
1888 	struct ieee80211com *ic = &sc->sc_ic;
1889 	struct ieee80211_key *k = &ni->ni_pairwise_key;
1890 	struct ieee80211_frame *wh;
1891 	struct ieee80211_rx_ba *ba;
1892 	uint64_t pn, *prsc;
1893 	uint8_t *ivp;
1894 	uint8_t tid;
1895 	int hdrlen, hasqos;
1896 
1897 	wh = mtod(m, struct ieee80211_frame *);
1898 	hdrlen = ieee80211_get_hdrlen(wh);
1899 	ivp = (uint8_t *)wh + hdrlen;
1900 
1901 	/* Check that ExtIV bit is be set. */
1902 	if (!(ivp[3] & IEEE80211_WEP_EXTIV)) {
1903 		DPRINTF(("CCMP decap ExtIV not set\n"));
1904 		return 1;
1905 	}
1906 	hasqos = ieee80211_has_qos(wh);
1907 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
1908 	ba = hasqos ? &ni->ni_rx_ba[tid] : NULL;
1909 	prsc = &k->k_rsc[tid];
1910 
1911 	/* Extract the 48-bit PN from the CCMP header. */
1912 	pn = (uint64_t)ivp[0]       |
1913 	     (uint64_t)ivp[1] <<  8 |
1914 	     (uint64_t)ivp[4] << 16 |
1915 	     (uint64_t)ivp[5] << 24 |
1916 	     (uint64_t)ivp[6] << 32 |
1917 	     (uint64_t)ivp[7] << 40;
1918 	if (pn <= *prsc) {
1919 		if (hasqos && ba->ba_state == IEEE80211_BA_AGREED) {
1920 			/*
1921 			 * This is an A-MPDU subframe.
1922 			 * Such frames may be received out of order due to
1923 			 * legitimate retransmissions of failed subframes
1924 			 * in previous A-MPDUs. Duplicates will be handled
1925 			 * in ieee80211_input() as part of A-MPDU reordering.
1926 			 */
1927 		} else if (ieee80211_has_seq(wh)) {
1928 			/*
1929 			 * Not necessarily a replayed frame since we did not
1930 			 * check the sequence number of the 802.11 header yet.
1931 			 */
1932 			int nrxseq, orxseq;
1933 
1934 			nrxseq = letoh16(*(u_int16_t *)wh->i_seq) >>
1935 			    IEEE80211_SEQ_SEQ_SHIFT;
1936 			if (hasqos)
1937 				orxseq = ni->ni_qos_rxseqs[tid];
1938 			else
1939 				orxseq = ni->ni_rxseq;
1940 			if (nrxseq < orxseq) {
1941 				DPRINTF(("CCMP replayed (n=%d < o=%d)\n",
1942 				    nrxseq, orxseq));
1943 				ic->ic_stats.is_ccmp_replays++;
1944 				return 1;
1945 			}
1946 		} else {
1947 			DPRINTF(("CCMP replayed\n"));
1948 			ic->ic_stats.is_ccmp_replays++;
1949 			return 1;
1950 		}
1951 	}
1952 	/* Update last seen packet number. */
1953 	*prsc = pn;
1954 
1955 	/* Clear Protected bit and strip IV. */
1956 	wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
1957 	memmove(mtod(m, caddr_t) + IEEE80211_CCMP_HDRLEN, wh, hdrlen);
1958 	m_adj(m, IEEE80211_CCMP_HDRLEN);
1959 	/* Strip MIC. */
1960 	m_adj(m, -IEEE80211_CCMP_MICLEN);
1961 	return 0;
1962 }
1963 
1964 /*
1965  * Process an RX_PHY firmware notification.  This is usually immediately
1966  * followed by an MPDU_RX_DONE notification.
1967  */
1968 void
1969 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc,
1970     struct iwn_rx_data *data)
1971 {
1972 	struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1);
1973 
1974 	DPRINTFN(2, ("received PHY stats\n"));
1975 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
1976 	    sizeof (*stat), BUS_DMASYNC_POSTREAD);
1977 
1978 	/* Save RX statistics, they will be used on MPDU_RX_DONE. */
1979 	memcpy(&sc->last_rx_stat, stat, sizeof (*stat));
1980 	sc->last_rx_valid = IWN_LAST_RX_VALID;
1981 	/*
1982 	 * The firmware does not send separate RX_PHY
1983 	 * notifications for A-MPDU subframes.
1984 	 */
1985 	if (stat->flags & htole16(IWN_STAT_FLAG_AGG))
1986 		sc->last_rx_valid |= IWN_LAST_RX_AMPDU;
1987 }
1988 
1989 /*
1990  * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification.
1991  * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one.
1992  */
1993 void
1994 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
1995     struct iwn_rx_data *data)
1996 {
1997 	struct iwn_ops *ops = &sc->ops;
1998 	struct ieee80211com *ic = &sc->sc_ic;
1999 	struct ifnet *ifp = &ic->ic_if;
2000 	struct iwn_rx_ring *ring = &sc->rxq;
2001 	struct ieee80211_frame *wh;
2002 	struct ieee80211_rxinfo rxi;
2003 	struct ieee80211_node *ni;
2004 	struct ieee80211_channel *bss_chan = NULL;
2005 	struct mbuf *m, *m1;
2006 	struct iwn_rx_stat *stat;
2007 	caddr_t head;
2008 	uint32_t flags;
2009 	int error, len, rssi;
2010 	uint16_t chan;
2011 
2012 	if (desc->type == IWN_MPDU_RX_DONE) {
2013 		/* Check for prior RX_PHY notification. */
2014 		if (!sc->last_rx_valid) {
2015 			DPRINTF(("missing RX_PHY\n"));
2016 			return;
2017 		}
2018 		sc->last_rx_valid &= ~IWN_LAST_RX_VALID;
2019 		stat = &sc->last_rx_stat;
2020 		if ((sc->last_rx_valid & IWN_LAST_RX_AMPDU) &&
2021 		    (stat->flags & htole16(IWN_STAT_FLAG_AGG)) == 0) {
2022 			DPRINTF(("missing RX_PHY (expecting A-MPDU)\n"));
2023 			return;
2024 		}
2025 		if ((sc->last_rx_valid & IWN_LAST_RX_AMPDU) == 0 &&
2026 		    (stat->flags & htole16(IWN_STAT_FLAG_AGG))) {
2027 			DPRINTF(("missing RX_PHY (unexpected A-MPDU)\n"));
2028 			return;
2029 		}
2030 	} else
2031 		stat = (struct iwn_rx_stat *)(desc + 1);
2032 
2033 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWN_RBUF_SIZE,
2034 	    BUS_DMASYNC_POSTREAD);
2035 
2036 	if (stat->cfg_phy_len > IWN_STAT_MAXLEN) {
2037 		printf("%s: invalid RX statistic header\n",
2038 		    sc->sc_dev.dv_xname);
2039 		return;
2040 	}
2041 	if (desc->type == IWN_MPDU_RX_DONE) {
2042 		struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1);
2043 		head = (caddr_t)(mpdu + 1);
2044 		len = letoh16(mpdu->len);
2045 	} else {
2046 		head = (caddr_t)(stat + 1) + stat->cfg_phy_len;
2047 		len = letoh16(stat->len);
2048 	}
2049 
2050 	flags = letoh32(*(uint32_t *)(head + len));
2051 
2052 	/* Discard frames with a bad FCS early. */
2053 	if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) {
2054 		DPRINTFN(2, ("RX flags error %x\n", flags));
2055 		ifp->if_ierrors++;
2056 		return;
2057 	}
2058 	/* Discard frames that are too short. */
2059 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
2060 		/* Allow control frames in monitor mode. */
2061 		if (len < sizeof (struct ieee80211_frame_cts)) {
2062 			DPRINTF(("frame too short: %d\n", len));
2063 			ic->ic_stats.is_rx_tooshort++;
2064 			ifp->if_ierrors++;
2065 			return;
2066 		}
2067 	} else if (len < sizeof (*wh)) {
2068 		DPRINTF(("frame too short: %d\n", len));
2069 		ic->ic_stats.is_rx_tooshort++;
2070 		ifp->if_ierrors++;
2071 		return;
2072 	}
2073 
2074 	m1 = MCLGETI(NULL, M_DONTWAIT, NULL, IWN_RBUF_SIZE);
2075 	if (m1 == NULL) {
2076 		ic->ic_stats.is_rx_nombuf++;
2077 		ifp->if_ierrors++;
2078 		return;
2079 	}
2080 	bus_dmamap_unload(sc->sc_dmat, data->map);
2081 
2082 	error = bus_dmamap_load(sc->sc_dmat, data->map, mtod(m1, void *),
2083 	    IWN_RBUF_SIZE, NULL, BUS_DMA_NOWAIT | BUS_DMA_READ);
2084 	if (error != 0) {
2085 		m_freem(m1);
2086 
2087 		/* Try to reload the old mbuf. */
2088 		error = bus_dmamap_load(sc->sc_dmat, data->map,
2089 		    mtod(data->m, void *), IWN_RBUF_SIZE, NULL,
2090 		    BUS_DMA_NOWAIT | BUS_DMA_READ);
2091 		if (error != 0) {
2092 			panic("%s: could not load old RX mbuf",
2093 			    sc->sc_dev.dv_xname);
2094 		}
2095 		/* Physical address may have changed. */
2096 		ring->desc[ring->cur] =
2097 		    htole32(data->map->dm_segs[0].ds_addr >> 8);
2098 		bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
2099 		    ring->cur * sizeof (uint32_t), sizeof (uint32_t),
2100 		    BUS_DMASYNC_PREWRITE);
2101 		ifp->if_ierrors++;
2102 		return;
2103 	}
2104 
2105 	m = data->m;
2106 	data->m = m1;
2107 	/* Update RX descriptor. */
2108 	ring->desc[ring->cur] = htole32(data->map->dm_segs[0].ds_addr >> 8);
2109 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
2110 	    ring->cur * sizeof (uint32_t), sizeof (uint32_t),
2111 	    BUS_DMASYNC_PREWRITE);
2112 
2113 	/* Finalize mbuf. */
2114 	m->m_data = head;
2115 	m->m_pkthdr.len = m->m_len = len;
2116 
2117 	/*
2118 	 * Grab a reference to the source node. Note that control frames are
2119 	 * shorter than struct ieee80211_frame but ieee80211_find_rxnode()
2120 	 * is being careful about control frames.
2121 	 */
2122 	wh = mtod(m, struct ieee80211_frame *);
2123 	if (len < sizeof (*wh) &&
2124 	   (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) {
2125 		ic->ic_stats.is_rx_tooshort++;
2126 		ifp->if_ierrors++;
2127 		m_freem(m);
2128 		return;
2129 	}
2130 	ni = ieee80211_find_rxnode(ic, wh);
2131 
2132 	rxi.rxi_flags = 0;
2133 	if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL)
2134 	    && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) &&
2135 	    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
2136 	    (ni->ni_flags & IEEE80211_NODE_RXPROT) &&
2137 	    ni->ni_pairwise_key.k_cipher == IEEE80211_CIPHER_CCMP) {
2138 		if ((flags & IWN_RX_CIPHER_MASK) != IWN_RX_CIPHER_CCMP) {
2139 			ic->ic_stats.is_ccmp_dec_errs++;
2140 			ifp->if_ierrors++;
2141 			m_freem(m);
2142 			return;
2143 		}
2144 		/* Check whether decryption was successful or not. */
2145 		if ((desc->type == IWN_MPDU_RX_DONE &&
2146 		     (flags & (IWN_RX_MPDU_DEC | IWN_RX_MPDU_MIC_OK)) !=
2147 		      (IWN_RX_MPDU_DEC | IWN_RX_MPDU_MIC_OK)) ||
2148 		    (desc->type != IWN_MPDU_RX_DONE &&
2149 		     (flags & IWN_RX_DECRYPT_MASK) != IWN_RX_DECRYPT_OK)) {
2150 			DPRINTF(("CCMP decryption failed 0x%x\n", flags));
2151 			ic->ic_stats.is_ccmp_dec_errs++;
2152 			ifp->if_ierrors++;
2153 			m_freem(m);
2154 			return;
2155 		}
2156 		if (iwn_ccmp_decap(sc, m, ni) != 0) {
2157 			ifp->if_ierrors++;
2158 			m_freem(m);
2159 			return;
2160 		}
2161 		rxi.rxi_flags |= IEEE80211_RXI_HWDEC;
2162 	}
2163 
2164 	rssi = ops->get_rssi(stat);
2165 
2166 	chan = stat->chan;
2167 	if (chan > IEEE80211_CHAN_MAX)
2168 		chan = IEEE80211_CHAN_MAX;
2169 
2170 	if (ni == ic->ic_bss) {
2171 		bss_chan = ni->ni_chan;
2172 		/* Fix current channel. */
2173 		ni->ni_chan = &ic->ic_channels[chan];
2174 	}
2175 
2176 #if NBPFILTER > 0
2177 	if (sc->sc_drvbpf != NULL) {
2178 		struct mbuf mb;
2179 		struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap;
2180 		uint16_t chan_flags;
2181 
2182 		tap->wr_flags = 0;
2183 		if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE))
2184 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2185 		tap->wr_chan_freq = htole16(ic->ic_channels[chan].ic_freq);
2186 		chan_flags = ic->ic_channels[chan].ic_flags;
2187 		if (ic->ic_curmode != IEEE80211_MODE_11N)
2188 			chan_flags &= ~IEEE80211_CHAN_HT;
2189 		tap->wr_chan_flags = htole16(chan_flags);
2190 		tap->wr_dbm_antsignal = (int8_t)rssi;
2191 		tap->wr_dbm_antnoise = (int8_t)sc->noise;
2192 		tap->wr_tsft = stat->tstamp;
2193 		if (stat->rflags & IWN_RFLAG_MCS) {
2194 			tap->wr_rate = (0x80 | stat->rate); /* HT MCS index */
2195 		} else {
2196 			switch (stat->rate) {
2197 			/* CCK rates. */
2198 			case  10: tap->wr_rate =   2; break;
2199 			case  20: tap->wr_rate =   4; break;
2200 			case  55: tap->wr_rate =  11; break;
2201 			case 110: tap->wr_rate =  22; break;
2202 			/* OFDM rates. */
2203 			case 0xd: tap->wr_rate =  12; break;
2204 			case 0xf: tap->wr_rate =  18; break;
2205 			case 0x5: tap->wr_rate =  24; break;
2206 			case 0x7: tap->wr_rate =  36; break;
2207 			case 0x9: tap->wr_rate =  48; break;
2208 			case 0xb: tap->wr_rate =  72; break;
2209 			case 0x1: tap->wr_rate =  96; break;
2210 			case 0x3: tap->wr_rate = 108; break;
2211 			/* Unknown rate: should not happen. */
2212 			default:  tap->wr_rate =  0;
2213 			}
2214 		}
2215 
2216 		mb.m_data = (caddr_t)tap;
2217 		mb.m_len = sc->sc_rxtap_len;
2218 		mb.m_next = m;
2219 		mb.m_nextpkt = NULL;
2220 		mb.m_type = 0;
2221 		mb.m_flags = 0;
2222 		bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN);
2223 	}
2224 #endif
2225 
2226 	/* Send the frame to the 802.11 layer. */
2227 	rxi.rxi_rssi = rssi;
2228 	rxi.rxi_tstamp = 0;	/* unused */
2229 	ieee80211_input(ifp, m, ni, &rxi);
2230 
2231 	/* Restore BSS channel. */
2232 	if (ni == ic->ic_bss)
2233 		ni->ni_chan = bss_chan;
2234 
2235 	/* Node is no longer needed. */
2236 	ieee80211_release_node(ic, ni);
2237 }
2238 
2239 /* Process an incoming Compressed BlockAck. */
2240 void
2241 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2242     struct iwn_rx_data *data)
2243 {
2244 	struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1);
2245 	struct iwn_tx_ring *txq;
2246 
2247 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), sizeof (*ba),
2248 	    BUS_DMASYNC_POSTREAD);
2249 
2250 	txq = &sc->txq[letoh16(ba->qid)];
2251 	/* XXX TBD */
2252 }
2253 
2254 /*
2255  * Process a CALIBRATION_RESULT notification sent by the initialization
2256  * firmware on response to a CMD_CALIB_CONFIG command (5000 only).
2257  */
2258 void
2259 iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2260     struct iwn_rx_data *data)
2261 {
2262 	struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1);
2263 	int len, idx = -1;
2264 
2265 	/* Runtime firmware should not send such a notification. */
2266 	if (sc->sc_flags & IWN_FLAG_CALIB_DONE)
2267 		return;
2268 
2269 	len = (letoh32(desc->len) & 0x3fff) - 4;
2270 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), len,
2271 	    BUS_DMASYNC_POSTREAD);
2272 
2273 	switch (calib->code) {
2274 	case IWN5000_PHY_CALIB_DC:
2275 		if (sc->hw_type == IWN_HW_REV_TYPE_5150 ||
2276 		    sc->hw_type == IWN_HW_REV_TYPE_2030 ||
2277 		    sc->hw_type == IWN_HW_REV_TYPE_2000 ||
2278 		    sc->hw_type == IWN_HW_REV_TYPE_135 ||
2279 		    sc->hw_type == IWN_HW_REV_TYPE_105)
2280 			idx = 0;
2281 		break;
2282 	case IWN5000_PHY_CALIB_LO:
2283 		idx = 1;
2284 		break;
2285 	case IWN5000_PHY_CALIB_TX_IQ:
2286 		idx = 2;
2287 		break;
2288 	case IWN5000_PHY_CALIB_TX_IQ_PERIODIC:
2289 		if (sc->hw_type < IWN_HW_REV_TYPE_6000 &&
2290 		    sc->hw_type != IWN_HW_REV_TYPE_5150)
2291 			idx = 3;
2292 		break;
2293 	case IWN5000_PHY_CALIB_BASE_BAND:
2294 		idx = 4;
2295 		break;
2296 	}
2297 	if (idx == -1)	/* Ignore other results. */
2298 		return;
2299 
2300 	/* Save calibration result. */
2301 	if (sc->calibcmd[idx].buf != NULL)
2302 		free(sc->calibcmd[idx].buf, M_DEVBUF, 0);
2303 	sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT);
2304 	if (sc->calibcmd[idx].buf == NULL) {
2305 		DPRINTF(("not enough memory for calibration result %d\n",
2306 		    calib->code));
2307 		return;
2308 	}
2309 	DPRINTF(("saving calibration result code=%d len=%d\n",
2310 	    calib->code, len));
2311 	sc->calibcmd[idx].len = len;
2312 	memcpy(sc->calibcmd[idx].buf, calib, len);
2313 }
2314 
2315 /*
2316  * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification.
2317  * The latter is sent by the firmware after each received beacon.
2318  */
2319 void
2320 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2321     struct iwn_rx_data *data)
2322 {
2323 	struct iwn_ops *ops = &sc->ops;
2324 	struct ieee80211com *ic = &sc->sc_ic;
2325 	struct iwn_calib_state *calib = &sc->calib;
2326 	struct iwn_stats *stats = (struct iwn_stats *)(desc + 1);
2327 	int temp;
2328 
2329 	/* Ignore statistics received during a scan. */
2330 	if (ic->ic_state != IEEE80211_S_RUN)
2331 		return;
2332 
2333 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2334 	    sizeof (*stats), BUS_DMASYNC_POSTREAD);
2335 
2336 	DPRINTFN(3, ("received statistics (cmd=%d)\n", desc->type));
2337 	sc->calib_cnt = 0;	/* Reset TX power calibration timeout. */
2338 
2339 	/* Test if temperature has changed. */
2340 	if (stats->general.temp != sc->rawtemp) {
2341 		/* Convert "raw" temperature to degC. */
2342 		sc->rawtemp = stats->general.temp;
2343 		temp = ops->get_temperature(sc);
2344 		DPRINTFN(2, ("temperature=%dC\n", temp));
2345 
2346 		/* Update TX power if need be (4965AGN only). */
2347 		if (sc->hw_type == IWN_HW_REV_TYPE_4965)
2348 			iwn4965_power_calibration(sc, temp);
2349 	}
2350 
2351 	if (desc->type != IWN_BEACON_STATISTICS)
2352 		return;	/* Reply to a statistics request. */
2353 
2354 	sc->noise = iwn_get_noise(&stats->rx.general);
2355 
2356 	/* Test that RSSI and noise are present in stats report. */
2357 	if (letoh32(stats->rx.general.flags) != 1) {
2358 		DPRINTF(("received statistics without RSSI\n"));
2359 		return;
2360 	}
2361 
2362 	/*
2363 	 * XXX Differential gain calibration makes the 6005 firmware
2364 	 * crap out, so skip it for now.  This effectively disables
2365 	 * sensitivity tuning as well.
2366 	 */
2367 	if (sc->hw_type == IWN_HW_REV_TYPE_6005)
2368 		return;
2369 
2370 	if (calib->state == IWN_CALIB_STATE_ASSOC)
2371 		iwn_collect_noise(sc, &stats->rx.general);
2372 	else if (calib->state == IWN_CALIB_STATE_RUN)
2373 		iwn_tune_sensitivity(sc, &stats->rx);
2374 }
2375 
2376 /*
2377  * Process a TX_DONE firmware notification.  Unfortunately, the 4965AGN
2378  * and 5000 adapters have different incompatible TX status formats.
2379  */
2380 void
2381 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2382     struct iwn_rx_data *data)
2383 {
2384 	struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1);
2385 	struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2386 	struct iwn_tx_data *txdata = &ring->data[desc->idx];
2387 	/* XXX 4965 does not report byte count */
2388 	uint16_t len = txdata->totlen + IEEE80211_CRC_LEN;
2389 
2390 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2391 	    sizeof (*stat), BUS_DMASYNC_POSTREAD);
2392 	iwn_tx_done(sc, desc, stat->nframes, stat->ackfailcnt,
2393 	    letoh32(stat->status) & 0xff, len);
2394 }
2395 
2396 void
2397 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2398     struct iwn_rx_data *data)
2399 {
2400 	struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1);
2401 
2402 #ifdef notyet
2403 	/* Reset TX scheduler slot. */
2404 	iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx);
2405 #endif
2406 
2407 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2408 	    sizeof (*stat), BUS_DMASYNC_POSTREAD);
2409 	iwn_tx_done(sc, desc, stat->nframes, stat->ackfailcnt,
2410 	    letoh16(stat->status) & 0xff, letoh16(stat->len));
2411 }
2412 
2413 /*
2414  * Adapter-independent backend for TX_DONE firmware notifications.
2415  */
2416 void
2417 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, uint8_t nframes,
2418     uint8_t ackfailcnt, uint8_t status, uint16_t len)
2419 {
2420 	struct ieee80211com *ic = &sc->sc_ic;
2421 	struct ifnet *ifp = &ic->ic_if;
2422 	struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2423 	struct iwn_tx_data *data = &ring->data[desc->idx];
2424 	struct iwn_node *wn = (void *)data->ni;
2425 	int txfail = (status != 1 && status != 2);
2426 
2427 	KASSERT(nframes == 1); /* We don't support aggregation yet. */
2428 
2429 	/* Update rate control statistics. */
2430 	if (data->ni->ni_flags & IEEE80211_NODE_HT) {
2431 		wn->mn.frames += nframes;
2432 		wn->mn.ampdu_size = len;
2433 		wn->mn.agglen = nframes;
2434 		if (ackfailcnt > 0)
2435 			wn->mn.retries += ackfailcnt;
2436 		if (txfail)
2437 			wn->mn.txfail += nframes;
2438 		if (ic->ic_state == IEEE80211_S_RUN)
2439 			ieee80211_mira_choose(&wn->mn, ic, data->ni);
2440 	} else {
2441 		wn->amn.amn_txcnt++;
2442 		if (ackfailcnt > 0)
2443 			wn->amn.amn_retrycnt++;
2444 	}
2445 	if (txfail) {
2446 		DPRINTF(("%s: status=0x%x\n", __func__, status));
2447 		ifp->if_oerrors++;
2448 	}
2449 
2450 	/* Unmap and free mbuf. */
2451 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
2452 	    BUS_DMASYNC_POSTWRITE);
2453 	bus_dmamap_unload(sc->sc_dmat, data->map);
2454 	m_freem(data->m);
2455 	data->m = NULL;
2456 	ieee80211_release_node(ic, data->ni);
2457 	data->ni = NULL;
2458 
2459 	sc->sc_tx_timer = 0;
2460 	if (--ring->queued < IWN_TX_RING_LOMARK) {
2461 		sc->qfullmsk &= ~(1 << ring->qid);
2462 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
2463 			ifq_clr_oactive(&ifp->if_snd);
2464 			(*ifp->if_start)(ifp);
2465 		}
2466 	}
2467 }
2468 
2469 /*
2470  * Process a "command done" firmware notification.  This is where we wakeup
2471  * processes waiting for a synchronous command completion.
2472  */
2473 void
2474 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc)
2475 {
2476 	struct iwn_tx_ring *ring = &sc->txq[4];
2477 	struct iwn_tx_data *data;
2478 
2479 	if ((desc->qid & 0xf) != 4)
2480 		return;	/* Not a command ack. */
2481 
2482 	data = &ring->data[desc->idx];
2483 
2484 	/* If the command was mapped in an mbuf, free it. */
2485 	if (data->m != NULL) {
2486 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
2487 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2488 		bus_dmamap_unload(sc->sc_dmat, data->map);
2489 		m_freem(data->m);
2490 		data->m = NULL;
2491 	}
2492 	wakeup(&ring->desc[desc->idx]);
2493 }
2494 
2495 /*
2496  * Process an INT_FH_RX or INT_SW_RX interrupt.
2497  */
2498 void
2499 iwn_notif_intr(struct iwn_softc *sc)
2500 {
2501 	struct iwn_ops *ops = &sc->ops;
2502 	struct ieee80211com *ic = &sc->sc_ic;
2503 	struct ifnet *ifp = &ic->ic_if;
2504 	uint16_t hw;
2505 
2506 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
2507 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
2508 
2509 	hw = letoh16(sc->rxq.stat->closed_count) & 0xfff;
2510 	while (sc->rxq.cur != hw) {
2511 		struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur];
2512 		struct iwn_rx_desc *desc;
2513 
2514 		bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof (*desc),
2515 		    BUS_DMASYNC_POSTREAD);
2516 		desc = mtod(data->m, struct iwn_rx_desc *);
2517 
2518 		DPRINTFN(4, ("notification qid=%d idx=%d flags=%x type=%d\n",
2519 		    desc->qid & 0xf, desc->idx, desc->flags, desc->type));
2520 
2521 		if (!(desc->qid & 0x80))	/* Reply to a command. */
2522 			iwn_cmd_done(sc, desc);
2523 
2524 		switch (desc->type) {
2525 		case IWN_RX_PHY:
2526 			iwn_rx_phy(sc, desc, data);
2527 			break;
2528 
2529 		case IWN_RX_DONE:		/* 4965AGN only. */
2530 		case IWN_MPDU_RX_DONE:
2531 			/* An 802.11 frame has been received. */
2532 			iwn_rx_done(sc, desc, data);
2533 			break;
2534 		case IWN_RX_COMPRESSED_BA:
2535 			/* A Compressed BlockAck has been received. */
2536 			iwn_rx_compressed_ba(sc, desc, data);
2537 			break;
2538 		case IWN_TX_DONE:
2539 			/* An 802.11 frame has been transmitted. */
2540 			ops->tx_done(sc, desc, data);
2541 			break;
2542 
2543 		case IWN_RX_STATISTICS:
2544 		case IWN_BEACON_STATISTICS:
2545 			iwn_rx_statistics(sc, desc, data);
2546 			break;
2547 
2548 		case IWN_BEACON_MISSED:
2549 		{
2550 			struct iwn_beacon_missed *miss =
2551 			    (struct iwn_beacon_missed *)(desc + 1);
2552 			uint32_t missed;
2553 
2554 			if ((ic->ic_opmode != IEEE80211_M_STA) ||
2555 			    (ic->ic_state != IEEE80211_S_RUN))
2556 				break;
2557 
2558 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2559 			    sizeof (*miss), BUS_DMASYNC_POSTREAD);
2560 			missed = letoh32(miss->consecutive);
2561 
2562 			/*
2563 			 * If more than 5 consecutive beacons are missed,
2564 			 * reinitialize the sensitivity state machine.
2565 			 */
2566 			if (missed > 5)
2567 				(void)iwn_init_sensitivity(sc);
2568 
2569 			/*
2570 			 * Rather than go directly to scan state, try to send a
2571 			 * directed probe request first. If that fails then the
2572 			 * state machine will drop us into scanning after timing
2573 			 * out waiting for a probe response.
2574 			 */
2575 			if (missed > ic->ic_bmissthres && !ic->ic_mgt_timer)
2576 				IEEE80211_SEND_MGMT(ic, ic->ic_bss,
2577 				    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
2578 			break;
2579 		}
2580 		case IWN_UC_READY:
2581 		{
2582 			struct iwn_ucode_info *uc =
2583 			    (struct iwn_ucode_info *)(desc + 1);
2584 
2585 			/* The microcontroller is ready. */
2586 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2587 			    sizeof (*uc), BUS_DMASYNC_POSTREAD);
2588 			DPRINTF(("microcode alive notification version=%d.%d "
2589 			    "subtype=%x alive=%x\n", uc->major, uc->minor,
2590 			    uc->subtype, letoh32(uc->valid)));
2591 
2592 			if (letoh32(uc->valid) != 1) {
2593 				printf("%s: microcontroller initialization "
2594 				    "failed\n", sc->sc_dev.dv_xname);
2595 				break;
2596 			}
2597 			if (uc->subtype == IWN_UCODE_INIT) {
2598 				/* Save microcontroller report. */
2599 				memcpy(&sc->ucode_info, uc, sizeof (*uc));
2600 			}
2601 			/* Save the address of the error log in SRAM. */
2602 			sc->errptr = letoh32(uc->errptr);
2603 			break;
2604 		}
2605 		case IWN_STATE_CHANGED:
2606 		{
2607 			uint32_t *status = (uint32_t *)(desc + 1);
2608 
2609 			/* Enabled/disabled notification. */
2610 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2611 			    sizeof (*status), BUS_DMASYNC_POSTREAD);
2612 			DPRINTF(("state changed to %x\n", letoh32(*status)));
2613 
2614 			if (letoh32(*status) & 1) {
2615 				/* Radio transmitter is off, power down. */
2616 				iwn_stop(ifp, 1);
2617 				return;	/* No further processing. */
2618 			}
2619 			break;
2620 		}
2621 		case IWN_START_SCAN:
2622 		{
2623 			struct iwn_start_scan *scan =
2624 			    (struct iwn_start_scan *)(desc + 1);
2625 
2626 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2627 			    sizeof (*scan), BUS_DMASYNC_POSTREAD);
2628 			DPRINTFN(2, ("scanning channel %d status %x\n",
2629 			    scan->chan, letoh32(scan->status)));
2630 
2631 			if (sc->sc_flags & IWN_FLAG_BGSCAN)
2632 				break;
2633 
2634 			/* Fix current channel. */
2635 			ic->ic_bss->ni_chan = &ic->ic_channels[scan->chan];
2636 			break;
2637 		}
2638 		case IWN_STOP_SCAN:
2639 		{
2640 			struct iwn_stop_scan *scan =
2641 			    (struct iwn_stop_scan *)(desc + 1);
2642 
2643 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2644 			    sizeof (*scan), BUS_DMASYNC_POSTREAD);
2645 			DPRINTF(("scan finished nchan=%d status=%d chan=%d\n",
2646 			    scan->nchan, scan->status, scan->chan));
2647 
2648 			if (scan->status == 1 && scan->chan <= 14 &&
2649 			    (sc->sc_flags & IWN_FLAG_HAS_5GHZ)) {
2650 			    	int error;
2651 				/*
2652 				 * We just finished scanning 2GHz channels,
2653 				 * start scanning 5GHz ones.
2654 				 */
2655 				error = iwn_scan(sc, IEEE80211_CHAN_5GHZ,
2656 				    (sc->sc_flags & IWN_FLAG_BGSCAN) ? 1 : 0);
2657 				if (error == 0)
2658 					break;
2659 			}
2660 			sc->sc_flags &= ~IWN_FLAG_SCANNING;
2661 			sc->sc_flags &= ~IWN_FLAG_BGSCAN;
2662 			ieee80211_end_scan(ifp);
2663 			break;
2664 		}
2665 		case IWN5000_CALIBRATION_RESULT:
2666 			iwn5000_rx_calib_results(sc, desc, data);
2667 			break;
2668 
2669 		case IWN5000_CALIBRATION_DONE:
2670 			sc->sc_flags |= IWN_FLAG_CALIB_DONE;
2671 			wakeup(sc);
2672 			break;
2673 		}
2674 
2675 		sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT;
2676 	}
2677 
2678 	/* Tell the firmware what we have processed. */
2679 	hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1;
2680 	IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7);
2681 }
2682 
2683 /*
2684  * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up
2685  * from power-down sleep mode.
2686  */
2687 void
2688 iwn_wakeup_intr(struct iwn_softc *sc)
2689 {
2690 	int qid;
2691 
2692 	DPRINTF(("ucode wakeup from power-down sleep\n"));
2693 
2694 	/* Wakeup RX and TX rings. */
2695 	IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7);
2696 	for (qid = 0; qid < sc->ntxqs; qid++) {
2697 		struct iwn_tx_ring *ring = &sc->txq[qid];
2698 		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur);
2699 	}
2700 }
2701 
2702 #ifdef IWN_DEBUG
2703 /*
2704  * Dump the error log of the firmware when a firmware panic occurs.  Although
2705  * we can't debug the firmware because it is neither open source nor free, it
2706  * can help us to identify certain classes of problems.
2707  */
2708 void
2709 iwn_fatal_intr(struct iwn_softc *sc)
2710 {
2711 	struct iwn_fw_dump dump;
2712 	int i;
2713 
2714 	/* Check that the error log address is valid. */
2715 	if (sc->errptr < IWN_FW_DATA_BASE ||
2716 	    sc->errptr + sizeof (dump) >
2717 	    IWN_FW_DATA_BASE + sc->fw_data_maxsz) {
2718 		printf("%s: bad firmware error log address 0x%08x\n",
2719 		    sc->sc_dev.dv_xname, sc->errptr);
2720 		return;
2721 	}
2722 	if (iwn_nic_lock(sc) != 0) {
2723 		printf("%s: could not read firmware error log\n",
2724 		    sc->sc_dev.dv_xname);
2725 		return;
2726 	}
2727 	/* Read firmware error log from SRAM. */
2728 	iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump,
2729 	    sizeof (dump) / sizeof (uint32_t));
2730 	iwn_nic_unlock(sc);
2731 
2732 	if (dump.valid == 0) {
2733 		printf("%s: firmware error log is empty\n",
2734 		    sc->sc_dev.dv_xname);
2735 		return;
2736 	}
2737 	printf("firmware error log:\n");
2738 	printf("  error type      = \"%s\" (0x%08X)\n",
2739 	    (dump.id < nitems(iwn_fw_errmsg)) ?
2740 		iwn_fw_errmsg[dump.id] : "UNKNOWN",
2741 	    dump.id);
2742 	printf("  program counter = 0x%08X\n", dump.pc);
2743 	printf("  source line     = 0x%08X\n", dump.src_line);
2744 	printf("  error data      = 0x%08X%08X\n",
2745 	    dump.error_data[0], dump.error_data[1]);
2746 	printf("  branch link     = 0x%08X%08X\n",
2747 	    dump.branch_link[0], dump.branch_link[1]);
2748 	printf("  interrupt link  = 0x%08X%08X\n",
2749 	    dump.interrupt_link[0], dump.interrupt_link[1]);
2750 	printf("  time            = %u\n", dump.time[0]);
2751 
2752 	/* Dump driver status (TX and RX rings) while we're here. */
2753 	printf("driver status:\n");
2754 	for (i = 0; i < sc->ntxqs; i++) {
2755 		struct iwn_tx_ring *ring = &sc->txq[i];
2756 		printf("  tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n",
2757 		    i, ring->qid, ring->cur, ring->queued);
2758 	}
2759 	printf("  rx ring: cur=%d\n", sc->rxq.cur);
2760 	printf("  802.11 state %d\n", sc->sc_ic.ic_state);
2761 }
2762 #endif
2763 
2764 int
2765 iwn_intr(void *arg)
2766 {
2767 	struct iwn_softc *sc = arg;
2768 	struct ifnet *ifp = &sc->sc_ic.ic_if;
2769 	uint32_t r1, r2, tmp;
2770 
2771 	/* Disable interrupts. */
2772 	IWN_WRITE(sc, IWN_INT_MASK, 0);
2773 
2774 	/* Read interrupts from ICT (fast) or from registers (slow). */
2775 	if (sc->sc_flags & IWN_FLAG_USE_ICT) {
2776 		tmp = 0;
2777 		while (sc->ict[sc->ict_cur] != 0) {
2778 			tmp |= sc->ict[sc->ict_cur];
2779 			sc->ict[sc->ict_cur] = 0;	/* Acknowledge. */
2780 			sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT;
2781 		}
2782 		tmp = letoh32(tmp);
2783 		if (tmp == 0xffffffff)	/* Shouldn't happen. */
2784 			tmp = 0;
2785 		else if (tmp & 0xc0000)	/* Workaround a HW bug. */
2786 			tmp |= 0x8000;
2787 		r1 = (tmp & 0xff00) << 16 | (tmp & 0xff);
2788 		r2 = 0;	/* Unused. */
2789 	} else {
2790 		r1 = IWN_READ(sc, IWN_INT);
2791 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
2792 			return 0;	/* Hardware gone! */
2793 		r2 = IWN_READ(sc, IWN_FH_INT);
2794 	}
2795 	if (r1 == 0 && r2 == 0) {
2796 		if (ifp->if_flags & IFF_UP)
2797 			IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
2798 		return 0;	/* Interrupt not for us. */
2799 	}
2800 
2801 	/* Acknowledge interrupts. */
2802 	IWN_WRITE(sc, IWN_INT, r1);
2803 	if (!(sc->sc_flags & IWN_FLAG_USE_ICT))
2804 		IWN_WRITE(sc, IWN_FH_INT, r2);
2805 
2806 	if (r1 & IWN_INT_RF_TOGGLED) {
2807 		tmp = IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL;
2808 		printf("%s: RF switch: radio %s\n", sc->sc_dev.dv_xname,
2809 		    tmp ? "enabled" : "disabled");
2810 		if (tmp)
2811 			task_add(systq, &sc->init_task);
2812 	}
2813 	if (r1 & IWN_INT_CT_REACHED) {
2814 		printf("%s: critical temperature reached!\n",
2815 		    sc->sc_dev.dv_xname);
2816 	}
2817 	if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) {
2818 		printf("%s: fatal firmware error\n", sc->sc_dev.dv_xname);
2819 
2820 		/* Force a complete recalibration on next init. */
2821 		sc->sc_flags &= ~IWN_FLAG_CALIB_DONE;
2822 
2823 		/* Dump firmware error log and stop. */
2824 #ifdef IWN_DEBUG
2825 		iwn_fatal_intr(sc);
2826 #endif
2827 		iwn_stop(ifp, 1);
2828 		task_add(systq, &sc->init_task);
2829 		return 1;
2830 	}
2831 	if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) ||
2832 	    (r2 & IWN_FH_INT_RX)) {
2833 		if (sc->sc_flags & IWN_FLAG_USE_ICT) {
2834 			if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX))
2835 				IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX);
2836 			IWN_WRITE_1(sc, IWN_INT_PERIODIC,
2837 			    IWN_INT_PERIODIC_DIS);
2838 			iwn_notif_intr(sc);
2839 			if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) {
2840 				IWN_WRITE_1(sc, IWN_INT_PERIODIC,
2841 				    IWN_INT_PERIODIC_ENA);
2842 			}
2843 		} else
2844 			iwn_notif_intr(sc);
2845 	}
2846 
2847 	if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) {
2848 		if (sc->sc_flags & IWN_FLAG_USE_ICT)
2849 			IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX);
2850 		wakeup(sc);	/* FH DMA transfer completed. */
2851 	}
2852 
2853 	if (r1 & IWN_INT_ALIVE)
2854 		wakeup(sc);	/* Firmware is alive. */
2855 
2856 	if (r1 & IWN_INT_WAKEUP)
2857 		iwn_wakeup_intr(sc);
2858 
2859 	/* Re-enable interrupts. */
2860 	if (ifp->if_flags & IFF_UP)
2861 		IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
2862 
2863 	return 1;
2864 }
2865 
2866 /*
2867  * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and
2868  * 5000 adapters use a slightly different format).
2869  */
2870 void
2871 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
2872     uint16_t len)
2873 {
2874 	uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx];
2875 
2876 	*w = htole16(len + 8);
2877 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
2878 	    (caddr_t)w - sc->sched_dma.vaddr, sizeof (uint16_t),
2879 	    BUS_DMASYNC_PREWRITE);
2880 	if (idx < IWN_SCHED_WINSZ) {
2881 		*(w + IWN_TX_RING_COUNT) = *w;
2882 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
2883 		    (caddr_t)(w + IWN_TX_RING_COUNT) - sc->sched_dma.vaddr,
2884 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
2885 	}
2886 }
2887 
2888 void
2889 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
2890     uint16_t len)
2891 {
2892 	uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
2893 
2894 	*w = htole16(id << 12 | (len + 8));
2895 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
2896 	    (caddr_t)w - sc->sched_dma.vaddr, sizeof (uint16_t),
2897 	    BUS_DMASYNC_PREWRITE);
2898 	if (idx < IWN_SCHED_WINSZ) {
2899 		*(w + IWN_TX_RING_COUNT) = *w;
2900 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
2901 		    (caddr_t)(w + IWN_TX_RING_COUNT) - sc->sched_dma.vaddr,
2902 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
2903 	}
2904 }
2905 
2906 void
2907 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx)
2908 {
2909 	uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
2910 
2911 	*w = (*w & htole16(0xf000)) | htole16(1);
2912 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
2913 	    (caddr_t)w - sc->sched_dma.vaddr, sizeof (uint16_t),
2914 	    BUS_DMASYNC_PREWRITE);
2915 	if (idx < IWN_SCHED_WINSZ) {
2916 		*(w + IWN_TX_RING_COUNT) = *w;
2917 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
2918 		    (caddr_t)(w + IWN_TX_RING_COUNT) - sc->sched_dma.vaddr,
2919 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
2920 	}
2921 }
2922 
2923 int
2924 iwn_rval2ridx(int rval)
2925 {
2926 	int ridx;
2927 
2928 	for (ridx = 0; ridx < nitems(iwn_rates); ridx++) {
2929 		if (rval == iwn_rates[ridx].rate)
2930 			break;
2931 	}
2932 
2933 	return ridx;
2934 }
2935 
2936 int
2937 iwn_tx(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
2938 {
2939 	struct ieee80211com *ic = &sc->sc_ic;
2940 	struct iwn_node *wn = (void *)ni;
2941 	struct iwn_tx_ring *ring;
2942 	struct iwn_tx_desc *desc;
2943 	struct iwn_tx_data *data;
2944 	struct iwn_tx_cmd *cmd;
2945 	struct iwn_cmd_data *tx;
2946 	const struct iwn_rate *rinfo;
2947 	struct ieee80211_frame *wh;
2948 	struct ieee80211_key *k = NULL;
2949 	enum ieee80211_edca_ac ac;
2950 	uint32_t flags;
2951 	uint16_t qos;
2952 	u_int hdrlen;
2953 	bus_dma_segment_t *seg;
2954 	uint8_t *ivp, tid, ridx, txant, type;
2955 	int i, totlen, hasqos, error, pad;
2956 
2957 	wh = mtod(m, struct ieee80211_frame *);
2958 	hdrlen = ieee80211_get_hdrlen(wh);
2959 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2960 
2961 	/* Select EDCA Access Category and TX ring for this frame. */
2962 	if ((hasqos = ieee80211_has_qos(wh))) {
2963 		qos = ieee80211_get_qos(wh);
2964 		tid = qos & IEEE80211_QOS_TID;
2965 		ac = ieee80211_up_to_ac(ic, tid);
2966 	} else {
2967 		qos = 0;
2968 		tid = 0;
2969 		ac = EDCA_AC_BE;
2970 	}
2971 
2972 	ring = &sc->txq[ac];
2973 	desc = &ring->desc[ring->cur];
2974 	data = &ring->data[ring->cur];
2975 
2976 	/* Choose a TX rate index. */
2977 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
2978 	    type != IEEE80211_FC0_TYPE_DATA)
2979 		ridx = iwn_rval2ridx(ieee80211_min_basic_rate(ic));
2980 	else if (ic->ic_fixed_mcs != -1)
2981 		ridx = sc->fixed_ridx;
2982 	else if (ic->ic_fixed_rate != -1)
2983 		ridx = sc->fixed_ridx;
2984 	else {
2985 		if (ni->ni_flags & IEEE80211_NODE_HT)
2986 			ridx = iwn_mcs2ridx[ni->ni_txmcs];
2987 		else
2988 			ridx = wn->ridx[ni->ni_txrate];
2989 	}
2990 	rinfo = &iwn_rates[ridx];
2991 #if NBPFILTER > 0
2992 	if (sc->sc_drvbpf != NULL) {
2993 		struct mbuf mb;
2994 		struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
2995 		uint16_t chan_flags;
2996 
2997 		tap->wt_flags = 0;
2998 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
2999 		chan_flags = ni->ni_chan->ic_flags;
3000 		if (ic->ic_curmode != IEEE80211_MODE_11N)
3001 			chan_flags &= ~IEEE80211_CHAN_HT;
3002 		tap->wt_chan_flags = htole16(chan_flags);
3003 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
3004 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
3005 		    type == IEEE80211_FC0_TYPE_DATA) {
3006 			tap->wt_rate = (0x80 | ni->ni_txmcs);
3007 		} else
3008 			tap->wt_rate = rinfo->rate;
3009 		tap->wt_hwqueue = ac;
3010 		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
3011 		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
3012 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3013 
3014 		mb.m_data = (caddr_t)tap;
3015 		mb.m_len = sc->sc_txtap_len;
3016 		mb.m_next = m;
3017 		mb.m_nextpkt = NULL;
3018 		mb.m_type = 0;
3019 		mb.m_flags = 0;
3020 		bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT);
3021 	}
3022 #endif
3023 
3024 	totlen = m->m_pkthdr.len;
3025 
3026 	/* Encrypt the frame if need be. */
3027 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3028 		/* Retrieve key for TX. */
3029 		k = ieee80211_get_txkey(ic, wh, ni);
3030 		if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
3031 			/* Do software encryption. */
3032 			if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
3033 				return ENOBUFS;
3034 			/* 802.11 header may have moved. */
3035 			wh = mtod(m, struct ieee80211_frame *);
3036 			totlen = m->m_pkthdr.len;
3037 
3038 		} else	/* HW appends CCMP MIC. */
3039 			totlen += IEEE80211_CCMP_HDRLEN;
3040 	}
3041 
3042 	data->totlen = totlen;
3043 
3044 	/* Prepare TX firmware command. */
3045 	cmd = &ring->cmd[ring->cur];
3046 	cmd->code = IWN_CMD_TX_DATA;
3047 	cmd->flags = 0;
3048 	cmd->qid = ring->qid;
3049 	cmd->idx = ring->cur;
3050 
3051 	tx = (struct iwn_cmd_data *)cmd->data;
3052 	/* NB: No need to clear tx, all fields are reinitialized here. */
3053 	tx->scratch = 0;	/* clear "scratch" area */
3054 
3055 	flags = 0;
3056 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3057 		/* Unicast frame, check if an ACK is expected. */
3058 		if (!hasqos || (qos & IEEE80211_QOS_ACK_POLICY_MASK) !=
3059 		    IEEE80211_QOS_ACK_POLICY_NOACK)
3060 			flags |= IWN_TX_NEED_ACK;
3061 	}
3062 	if ((wh->i_fc[0] &
3063 	    (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
3064 	    (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR))
3065 		flags |= IWN_TX_IMM_BA;		/* Cannot happen yet. */
3066 
3067 	if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
3068 		flags |= IWN_TX_MORE_FRAG;	/* Cannot happen yet. */
3069 
3070 	/* Check if frame must be protected using RTS/CTS or CTS-to-self. */
3071 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3072 		/* NB: Group frames are sent using CCK in 802.11b/g/n (2GHz). */
3073 		if (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold) {
3074 			flags |= IWN_TX_NEED_RTS;
3075 		} else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
3076 		    ridx >= IWN_RIDX_OFDM6) {
3077 			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
3078 				flags |= IWN_TX_NEED_CTS;
3079 			else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
3080 				flags |= IWN_TX_NEED_RTS;
3081 		}
3082 
3083 		if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) {
3084 			if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3085 				/* 5000 autoselects RTS/CTS or CTS-to-self. */
3086 				flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS);
3087 				flags |= IWN_TX_NEED_PROTECTION;
3088 			} else
3089 				flags |= IWN_TX_FULL_TXOP;
3090 		}
3091 	}
3092 
3093 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3094 	    type != IEEE80211_FC0_TYPE_DATA)
3095 		tx->id = sc->broadcast_id;
3096 	else
3097 		tx->id = wn->id;
3098 
3099 	if (type == IEEE80211_FC0_TYPE_MGT) {
3100 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3101 
3102 #ifndef IEEE80211_STA_ONLY
3103 		/* Tell HW to set timestamp in probe responses. */
3104 		if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
3105 			flags |= IWN_TX_INSERT_TSTAMP;
3106 #endif
3107 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3108 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3109 			tx->timeout = htole16(3);
3110 		else
3111 			tx->timeout = htole16(2);
3112 	} else
3113 		tx->timeout = htole16(0);
3114 
3115 	if (hdrlen & 3) {
3116 		/* First segment length must be a multiple of 4. */
3117 		flags |= IWN_TX_NEED_PADDING;
3118 		pad = 4 - (hdrlen & 3);
3119 	} else
3120 		pad = 0;
3121 
3122 	tx->len = htole16(totlen);
3123 	tx->tid = tid;
3124 	tx->rts_ntries = 60;
3125 	tx->data_ntries = 15;
3126 	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
3127 
3128 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
3129 	    tx->id != sc->broadcast_id)
3130 		tx->plcp = rinfo->ht_plcp;
3131 	else
3132 		tx->plcp = rinfo->plcp;
3133 
3134 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
3135 	    tx->id != sc->broadcast_id) {
3136 		tx->rflags = rinfo->ht_flags;
3137 		if (ni->ni_htcaps & IEEE80211_HTCAP_SGI20)
3138 			tx->rflags |= IWN_RFLAG_SGI;
3139 	}
3140 	else
3141 		tx->rflags = rinfo->flags;
3142 	if (tx->id == sc->broadcast_id) {
3143 		/* Group or management frame. */
3144 		tx->linkq = 0;
3145 		/* XXX Alternate between antenna A and B? */
3146 		txant = IWN_LSB(sc->txchainmask);
3147 		tx->rflags |= IWN_RFLAG_ANT(txant);
3148 	} else {
3149 		if (ni->ni_flags & IEEE80211_NODE_HT)
3150 			tx->linkq = 7 - ni->ni_txmcs; /* XXX revisit for MIMO */
3151 		else
3152 			tx->linkq = ni->ni_rates.rs_nrates - ni->ni_txrate - 1;
3153 		flags |= IWN_TX_LINKQ;	/* enable MRR */
3154 	}
3155 	/* Set physical address of "scratch area". */
3156 	tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
3157 	tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
3158 
3159 	/* Copy 802.11 header in TX command. */
3160 	memcpy((uint8_t *)(tx + 1), wh, hdrlen);
3161 
3162 	if (k != NULL && k->k_cipher == IEEE80211_CIPHER_CCMP) {
3163 		/* Trim 802.11 header and prepend CCMP IV. */
3164 		m_adj(m, hdrlen - IEEE80211_CCMP_HDRLEN);
3165 		ivp = mtod(m, uint8_t *);
3166 		k->k_tsc++;
3167 		ivp[0] = k->k_tsc;
3168 		ivp[1] = k->k_tsc >> 8;
3169 		ivp[2] = 0;
3170 		ivp[3] = k->k_id << 6 | IEEE80211_WEP_EXTIV;
3171 		ivp[4] = k->k_tsc >> 16;
3172 		ivp[5] = k->k_tsc >> 24;
3173 		ivp[6] = k->k_tsc >> 32;
3174 		ivp[7] = k->k_tsc >> 40;
3175 
3176 		tx->security = IWN_CIPHER_CCMP;
3177 		/* XXX flags |= IWN_TX_AMPDU_CCMP; */
3178 		memcpy(tx->key, k->k_key, k->k_len);
3179 
3180 		/* TX scheduler includes CCMP MIC len w/5000 Series. */
3181 		if (sc->hw_type != IWN_HW_REV_TYPE_4965)
3182 			totlen += IEEE80211_CCMP_MICLEN;
3183 	} else {
3184 		/* Trim 802.11 header. */
3185 		m_adj(m, hdrlen);
3186 		tx->security = 0;
3187 	}
3188 	tx->flags = htole32(flags);
3189 
3190 	error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3191 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3192 	if (error != 0 && error != EFBIG) {
3193 		printf("%s: can't map mbuf (error %d)\n",
3194 		    sc->sc_dev.dv_xname, error);
3195 		m_freem(m);
3196 		return error;
3197 	}
3198 	if (error != 0) {
3199 		/* Too many DMA segments, linearize mbuf. */
3200 		if (m_defrag(m, M_DONTWAIT)) {
3201 			m_freem(m);
3202 			return ENOBUFS;
3203 		}
3204 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3205 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3206 		if (error != 0) {
3207 			printf("%s: can't map mbuf (error %d)\n",
3208 			    sc->sc_dev.dv_xname, error);
3209 			m_freem(m);
3210 			return error;
3211 		}
3212 	}
3213 
3214 	data->m = m;
3215 	data->ni = ni;
3216 
3217 	DPRINTFN(4, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n",
3218 	    ring->qid, ring->cur, m->m_pkthdr.len, data->map->dm_nsegs));
3219 
3220 	/* Fill TX descriptor. */
3221 	desc->nsegs = 1 + data->map->dm_nsegs;
3222 	/* First DMA segment is used by the TX command. */
3223 	desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
3224 	desc->segs[0].len  = htole16(IWN_HIADDR(data->cmd_paddr) |
3225 	    (4 + sizeof (*tx) + hdrlen + pad) << 4);
3226 	/* Other DMA segments are for data payload. */
3227 	seg = data->map->dm_segs;
3228 	for (i = 1; i <= data->map->dm_nsegs; i++) {
3229 		desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr));
3230 		desc->segs[i].len  = htole16(IWN_HIADDR(seg->ds_addr) |
3231 		    seg->ds_len << 4);
3232 		seg++;
3233 	}
3234 
3235 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
3236 	    BUS_DMASYNC_PREWRITE);
3237 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
3238 	    (caddr_t)cmd - ring->cmd_dma.vaddr, sizeof (*cmd),
3239 	    BUS_DMASYNC_PREWRITE);
3240 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3241 	    (caddr_t)desc - ring->desc_dma.vaddr, sizeof (*desc),
3242 	    BUS_DMASYNC_PREWRITE);
3243 
3244 #ifdef notyet
3245 	/* Update TX scheduler. */
3246 	ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
3247 #endif
3248 
3249 	/* Kick TX ring. */
3250 	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3251 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3252 
3253 	/* Mark TX ring as full if we reach a certain threshold. */
3254 	if (++ring->queued > IWN_TX_RING_HIMARK)
3255 		sc->qfullmsk |= 1 << ring->qid;
3256 
3257 	return 0;
3258 }
3259 
3260 void
3261 iwn_start(struct ifnet *ifp)
3262 {
3263 	struct iwn_softc *sc = ifp->if_softc;
3264 	struct ieee80211com *ic = &sc->sc_ic;
3265 	struct ieee80211_node *ni;
3266 	struct mbuf *m;
3267 
3268 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
3269 		return;
3270 
3271 	for (;;) {
3272 		if (sc->qfullmsk != 0) {
3273 			ifq_set_oactive(&ifp->if_snd);
3274 			break;
3275 		}
3276 		/* Send pending management frames first. */
3277 		m = mq_dequeue(&ic->ic_mgtq);
3278 		if (m != NULL) {
3279 			ni = m->m_pkthdr.ph_cookie;
3280 			goto sendit;
3281 		}
3282 		if (ic->ic_state != IEEE80211_S_RUN ||
3283 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
3284 			break;
3285 
3286 		/* Encapsulate and send data frames. */
3287 		IFQ_DEQUEUE(&ifp->if_snd, m);
3288 		if (m == NULL)
3289 			break;
3290 #if NBPFILTER > 0
3291 		if (ifp->if_bpf != NULL)
3292 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
3293 #endif
3294 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL)
3295 			continue;
3296 sendit:
3297 #if NBPFILTER > 0
3298 		if (ic->ic_rawbpf != NULL)
3299 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
3300 #endif
3301 		if (iwn_tx(sc, m, ni) != 0) {
3302 			ieee80211_release_node(ic, ni);
3303 			ifp->if_oerrors++;
3304 			continue;
3305 		}
3306 
3307 		sc->sc_tx_timer = 5;
3308 		ifp->if_timer = 1;
3309 	}
3310 }
3311 
3312 void
3313 iwn_watchdog(struct ifnet *ifp)
3314 {
3315 	struct iwn_softc *sc = ifp->if_softc;
3316 
3317 	ifp->if_timer = 0;
3318 
3319 	if (sc->sc_tx_timer > 0) {
3320 		if (--sc->sc_tx_timer == 0) {
3321 			printf("%s: device timeout\n", sc->sc_dev.dv_xname);
3322 			iwn_stop(ifp, 1);
3323 			ifp->if_oerrors++;
3324 			return;
3325 		}
3326 		ifp->if_timer = 1;
3327 	}
3328 
3329 	ieee80211_watchdog(ifp);
3330 }
3331 
3332 int
3333 iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
3334 {
3335 	struct iwn_softc *sc = ifp->if_softc;
3336 	struct ieee80211com *ic = &sc->sc_ic;
3337 	int s, error = 0;
3338 
3339 	error = rw_enter(&sc->sc_rwlock, RW_WRITE | RW_INTR);
3340 	if (error)
3341 		return error;
3342 	s = splnet();
3343 
3344 	switch (cmd) {
3345 	case SIOCSIFADDR:
3346 		ifp->if_flags |= IFF_UP;
3347 		/* FALLTHROUGH */
3348 	case SIOCSIFFLAGS:
3349 		if (ifp->if_flags & IFF_UP) {
3350 			if (!(ifp->if_flags & IFF_RUNNING))
3351 				error = iwn_init(ifp);
3352 		} else {
3353 			if (ifp->if_flags & IFF_RUNNING)
3354 				iwn_stop(ifp, 1);
3355 		}
3356 		break;
3357 
3358 	case SIOCS80211POWER:
3359 		error = ieee80211_ioctl(ifp, cmd, data);
3360 		if (error != ENETRESET)
3361 			break;
3362 		if (ic->ic_state == IEEE80211_S_RUN &&
3363 		    sc->calib.state == IWN_CALIB_STATE_RUN) {
3364 			if (ic->ic_flags & IEEE80211_F_PMGTON)
3365 				error = iwn_set_pslevel(sc, 0, 3, 0);
3366 			else	/* back to CAM */
3367 				error = iwn_set_pslevel(sc, 0, 0, 0);
3368 		} else {
3369 			/* Defer until transition to IWN_CALIB_STATE_RUN. */
3370 			error = 0;
3371 		}
3372 		break;
3373 
3374 	default:
3375 		error = ieee80211_ioctl(ifp, cmd, data);
3376 	}
3377 
3378 	if (error == ENETRESET) {
3379 		error = 0;
3380 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
3381 		    (IFF_UP | IFF_RUNNING)) {
3382 			iwn_stop(ifp, 0);
3383 			error = iwn_init(ifp);
3384 		}
3385 	}
3386 
3387 	splx(s);
3388 	rw_exit_write(&sc->sc_rwlock);
3389 	return error;
3390 }
3391 
3392 /*
3393  * Send a command to the firmware.
3394  */
3395 int
3396 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async)
3397 {
3398 	struct iwn_tx_ring *ring = &sc->txq[4];
3399 	struct iwn_tx_desc *desc;
3400 	struct iwn_tx_data *data;
3401 	struct iwn_tx_cmd *cmd;
3402 	struct mbuf *m;
3403 	bus_addr_t paddr;
3404 	int totlen, error;
3405 
3406 	desc = &ring->desc[ring->cur];
3407 	data = &ring->data[ring->cur];
3408 	totlen = 4 + size;
3409 
3410 	if (size > sizeof cmd->data) {
3411 		/* Command is too large to fit in a descriptor. */
3412 		if (totlen > MCLBYTES)
3413 			return EINVAL;
3414 		MGETHDR(m, M_DONTWAIT, MT_DATA);
3415 		if (m == NULL)
3416 			return ENOMEM;
3417 		if (totlen > MHLEN) {
3418 			MCLGET(m, M_DONTWAIT);
3419 			if (!(m->m_flags & M_EXT)) {
3420 				m_freem(m);
3421 				return ENOMEM;
3422 			}
3423 		}
3424 		cmd = mtod(m, struct iwn_tx_cmd *);
3425 		error = bus_dmamap_load(sc->sc_dmat, data->map, cmd, totlen,
3426 		    NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3427 		if (error != 0) {
3428 			m_freem(m);
3429 			return error;
3430 		}
3431 		data->m = m;
3432 		paddr = data->map->dm_segs[0].ds_addr;
3433 	} else {
3434 		cmd = &ring->cmd[ring->cur];
3435 		paddr = data->cmd_paddr;
3436 	}
3437 
3438 	cmd->code = code;
3439 	cmd->flags = 0;
3440 	cmd->qid = ring->qid;
3441 	cmd->idx = ring->cur;
3442 	memcpy(cmd->data, buf, size);
3443 
3444 	desc->nsegs = 1;
3445 	desc->segs[0].addr = htole32(IWN_LOADDR(paddr));
3446 	desc->segs[0].len  = htole16(IWN_HIADDR(paddr) | totlen << 4);
3447 
3448 	if (size > sizeof cmd->data) {
3449 		bus_dmamap_sync(sc->sc_dmat, data->map, 0, totlen,
3450 		    BUS_DMASYNC_PREWRITE);
3451 	} else {
3452 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
3453 		    (caddr_t)cmd - ring->cmd_dma.vaddr, totlen,
3454 		    BUS_DMASYNC_PREWRITE);
3455 	}
3456 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3457 	    (caddr_t)desc - ring->desc_dma.vaddr, sizeof (*desc),
3458 	    BUS_DMASYNC_PREWRITE);
3459 
3460 #ifdef notyet
3461 	/* Update TX scheduler. */
3462 	ops->update_sched(sc, ring->qid, ring->cur, 0, 0);
3463 #endif
3464 
3465 	/* Kick command ring. */
3466 	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3467 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3468 
3469 	return async ? 0 : tsleep(desc, PCATCH, "iwncmd", hz);
3470 }
3471 
3472 int
3473 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
3474 {
3475 	struct iwn4965_node_info hnode;
3476 	caddr_t src, dst;
3477 
3478 	/*
3479 	 * We use the node structure for 5000 Series internally (it is
3480 	 * a superset of the one for 4965AGN). We thus copy the common
3481 	 * fields before sending the command.
3482 	 */
3483 	src = (caddr_t)node;
3484 	dst = (caddr_t)&hnode;
3485 	memcpy(dst, src, 48);
3486 	/* Skip TSC, RX MIC and TX MIC fields from ``src''. */
3487 	memcpy(dst + 48, src + 72, 20);
3488 	return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async);
3489 }
3490 
3491 int
3492 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
3493 {
3494 	/* Direct mapping. */
3495 	return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async);
3496 }
3497 
3498 int
3499 iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni)
3500 {
3501 	struct ieee80211com *ic = &sc->sc_ic;
3502 	struct iwn_node *wn = (void *)ni;
3503 	struct ieee80211_rateset *rs = &ni->ni_rates;
3504 	struct iwn_cmd_link_quality linkq;
3505 	const struct iwn_rate *rinfo;
3506 	uint8_t txant;
3507 	int i, txrate;
3508 
3509 	/* Use the first valid TX antenna. */
3510 	txant = IWN_LSB(sc->txchainmask);
3511 
3512 	memset(&linkq, 0, sizeof linkq);
3513 	linkq.id = wn->id;
3514 	linkq.antmsk_1stream = txant;
3515 	linkq.antmsk_2stream = IWN_ANT_AB;
3516 	linkq.ampdu_max = IWN_AMPDU_MAX;
3517 	linkq.ampdu_threshold = 3;
3518 	linkq.ampdu_limit = htole16(4000);	/* 4ms */
3519 
3520 	if (ni->ni_flags & IEEE80211_NODE_HT) {
3521 		/* Fill LQ table with MCS 7 - 0 (XXX revisit for MIMO) */
3522 		i = 0;
3523 		for (txrate = 7; txrate >= 0; txrate--) {
3524 			rinfo = &iwn_rates[iwn_mcs2ridx[txrate]];
3525 			linkq.retry[i].plcp = rinfo->ht_plcp;
3526 			linkq.retry[i].rflags = rinfo->ht_flags;
3527 
3528 			if (ni->ni_htcaps & IEEE80211_HTCAP_SGI20)
3529 				linkq.retry[i].rflags |= IWN_RFLAG_SGI;
3530 
3531 			/* XXX set correct ant mask for MIMO rates here */
3532 			linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant);
3533 
3534 			if (++i >= IWN_MAX_TX_RETRIES)
3535 				break;
3536 		}
3537 
3538 		/* Fill the rest with the lowest basic rate. */
3539 		rinfo = &iwn_rates[iwn_rval2ridx(ieee80211_min_basic_rate(ic))];
3540 		while (i < IWN_MAX_TX_RETRIES) {
3541 			linkq.retry[i].plcp = rinfo->plcp;
3542 			linkq.retry[i].rflags = rinfo->flags;
3543 			linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant);
3544 			i++;
3545 		}
3546 	} else {
3547 		/* Start at highest available bit-rate. */
3548 		txrate = rs->rs_nrates - 1;
3549 		for (i = 0; i < IWN_MAX_TX_RETRIES; i++) {
3550 			rinfo = &iwn_rates[wn->ridx[txrate]];
3551 			linkq.retry[i].plcp = rinfo->plcp;
3552 			linkq.retry[i].rflags = rinfo->flags;
3553 			linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant);
3554 			/* Next retry at immediate lower bit-rate. */
3555 			if (txrate > 0)
3556 				txrate--;
3557 		}
3558 	}
3559 
3560 	return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1);
3561 }
3562 
3563 /*
3564  * Broadcast node is used to send group-addressed and management frames.
3565  */
3566 int
3567 iwn_add_broadcast_node(struct iwn_softc *sc, int async, int ridx)
3568 {
3569 	struct iwn_ops *ops = &sc->ops;
3570 	struct iwn_node_info node;
3571 	struct iwn_cmd_link_quality linkq;
3572 	const struct iwn_rate *rinfo;
3573 	uint8_t txant;
3574 	int i, error;
3575 
3576 	memset(&node, 0, sizeof node);
3577 	IEEE80211_ADDR_COPY(node.macaddr, etherbroadcastaddr);
3578 	node.id = sc->broadcast_id;
3579 	DPRINTF(("adding broadcast node\n"));
3580 	if ((error = ops->add_node(sc, &node, async)) != 0)
3581 		return error;
3582 
3583 	/* Use the first valid TX antenna. */
3584 	txant = IWN_LSB(sc->txchainmask);
3585 
3586 	memset(&linkq, 0, sizeof linkq);
3587 	linkq.id = sc->broadcast_id;
3588 	linkq.antmsk_1stream = txant;
3589 	linkq.antmsk_2stream = IWN_ANT_AB;
3590 	linkq.ampdu_max = IWN_AMPDU_MAX_NO_AGG;
3591 	linkq.ampdu_threshold = 3;
3592 	linkq.ampdu_limit = htole16(4000);	/* 4ms */
3593 
3594 	/* Use lowest mandatory bit-rate. */
3595 	rinfo = &iwn_rates[ridx];
3596 	linkq.retry[0].plcp = rinfo->plcp;
3597 	linkq.retry[0].rflags = rinfo->flags;
3598 	linkq.retry[0].rflags |= IWN_RFLAG_ANT(txant);
3599 	/* Use same bit-rate for all TX retries. */
3600 	for (i = 1; i < IWN_MAX_TX_RETRIES; i++) {
3601 		linkq.retry[i].plcp = linkq.retry[0].plcp;
3602 		linkq.retry[i].rflags = linkq.retry[0].rflags;
3603 	}
3604 	return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async);
3605 }
3606 
3607 void
3608 iwn_updateedca(struct ieee80211com *ic)
3609 {
3610 #define IWN_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
3611 	struct iwn_softc *sc = ic->ic_softc;
3612 	struct iwn_edca_params cmd;
3613 	int aci;
3614 
3615 	memset(&cmd, 0, sizeof cmd);
3616 	cmd.flags = htole32(IWN_EDCA_UPDATE);
3617 	for (aci = 0; aci < EDCA_NUM_AC; aci++) {
3618 		const struct ieee80211_edca_ac_params *ac =
3619 		    &ic->ic_edca_ac[aci];
3620 		cmd.ac[aci].aifsn = ac->ac_aifsn;
3621 		cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->ac_ecwmin));
3622 		cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->ac_ecwmax));
3623 		cmd.ac[aci].txoplimit =
3624 		    htole16(IEEE80211_TXOP_TO_US(ac->ac_txoplimit));
3625 	}
3626 	(void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1);
3627 #undef IWN_EXP2
3628 }
3629 
3630 void
3631 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on)
3632 {
3633 	struct iwn_cmd_led led;
3634 
3635 	/* Clear microcode LED ownership. */
3636 	IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL);
3637 
3638 	led.which = which;
3639 	led.unit = htole32(10000);	/* on/off in unit of 100ms */
3640 	led.off = off;
3641 	led.on = on;
3642 	(void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1);
3643 }
3644 
3645 /*
3646  * Set the critical temperature at which the firmware will stop the radio
3647  * and notify us.
3648  */
3649 int
3650 iwn_set_critical_temp(struct iwn_softc *sc)
3651 {
3652 	struct iwn_critical_temp crit;
3653 	int32_t temp;
3654 
3655 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF);
3656 
3657 	if (sc->hw_type == IWN_HW_REV_TYPE_5150)
3658 		temp = (IWN_CTOK(110) - sc->temp_off) * -5;
3659 	else if (sc->hw_type == IWN_HW_REV_TYPE_4965)
3660 		temp = IWN_CTOK(110);
3661 	else
3662 		temp = 110;
3663 	memset(&crit, 0, sizeof crit);
3664 	crit.tempR = htole32(temp);
3665 	DPRINTF(("setting critical temperature to %d\n", temp));
3666 	return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0);
3667 }
3668 
3669 int
3670 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni)
3671 {
3672 	struct iwn_cmd_timing cmd;
3673 	uint64_t val, mod;
3674 
3675 	memset(&cmd, 0, sizeof cmd);
3676 	memcpy(&cmd.tstamp, ni->ni_tstamp, sizeof (uint64_t));
3677 	cmd.bintval = htole16(ni->ni_intval);
3678 	cmd.lintval = htole16(10);
3679 
3680 	/* Compute remaining time until next beacon. */
3681 	val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU;
3682 	mod = letoh64(cmd.tstamp) % val;
3683 	cmd.binitval = htole32((uint32_t)(val - mod));
3684 
3685 	DPRINTF(("timing bintval=%u, tstamp=%llu, init=%u\n",
3686 	    ni->ni_intval, letoh64(cmd.tstamp), (uint32_t)(val - mod)));
3687 
3688 	return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1);
3689 }
3690 
3691 void
3692 iwn4965_power_calibration(struct iwn_softc *sc, int temp)
3693 {
3694 	/* Adjust TX power if need be (delta >= 3 degC). */
3695 	DPRINTF(("temperature %d->%d\n", sc->temp, temp));
3696 	if (abs(temp - sc->temp) >= 3) {
3697 		/* Record temperature of last calibration. */
3698 		sc->temp = temp;
3699 		(void)iwn4965_set_txpower(sc, 1);
3700 	}
3701 }
3702 
3703 /*
3704  * Set TX power for current channel (each rate has its own power settings).
3705  * This function takes into account the regulatory information from EEPROM,
3706  * the current temperature and the current voltage.
3707  */
3708 int
3709 iwn4965_set_txpower(struct iwn_softc *sc, int async)
3710 {
3711 /* Fixed-point arithmetic division using a n-bit fractional part. */
3712 #define fdivround(a, b, n)	\
3713 	((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n))
3714 /* Linear interpolation. */
3715 #define interpolate(x, x1, y1, x2, y2, n)	\
3716 	((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n))
3717 
3718 	static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 };
3719 	struct ieee80211com *ic = &sc->sc_ic;
3720 	struct iwn_ucode_info *uc = &sc->ucode_info;
3721 	struct ieee80211_channel *ch;
3722 	struct iwn4965_cmd_txpower cmd;
3723 	struct iwn4965_eeprom_chan_samples *chans;
3724 	const uint8_t *rf_gain, *dsp_gain;
3725 	int32_t vdiff, tdiff;
3726 	int i, c, grp, maxpwr;
3727 	uint8_t chan;
3728 
3729 	/* Retrieve current channel from last RXON. */
3730 	chan = sc->rxon.chan;
3731 	DPRINTF(("setting TX power for channel %d\n", chan));
3732 	ch = &ic->ic_channels[chan];
3733 
3734 	memset(&cmd, 0, sizeof cmd);
3735 	cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1;
3736 	cmd.chan = chan;
3737 
3738 	if (IEEE80211_IS_CHAN_5GHZ(ch)) {
3739 		maxpwr   = sc->maxpwr5GHz;
3740 		rf_gain  = iwn4965_rf_gain_5ghz;
3741 		dsp_gain = iwn4965_dsp_gain_5ghz;
3742 	} else {
3743 		maxpwr   = sc->maxpwr2GHz;
3744 		rf_gain  = iwn4965_rf_gain_2ghz;
3745 		dsp_gain = iwn4965_dsp_gain_2ghz;
3746 	}
3747 
3748 	/* Compute voltage compensation. */
3749 	vdiff = ((int32_t)letoh32(uc->volt) - sc->eeprom_voltage) / 7;
3750 	if (vdiff > 0)
3751 		vdiff *= 2;
3752 	if (abs(vdiff) > 2)
3753 		vdiff = 0;
3754 	DPRINTF(("voltage compensation=%d (UCODE=%d, EEPROM=%d)\n",
3755 	    vdiff, letoh32(uc->volt), sc->eeprom_voltage));
3756 
3757 	/* Get channel attenuation group. */
3758 	if (chan <= 20)		/* 1-20 */
3759 		grp = 4;
3760 	else if (chan <= 43)	/* 34-43 */
3761 		grp = 0;
3762 	else if (chan <= 70)	/* 44-70 */
3763 		grp = 1;
3764 	else if (chan <= 124)	/* 71-124 */
3765 		grp = 2;
3766 	else			/* 125-200 */
3767 		grp = 3;
3768 	DPRINTF(("chan %d, attenuation group=%d\n", chan, grp));
3769 
3770 	/* Get channel sub-band. */
3771 	for (i = 0; i < IWN_NBANDS; i++)
3772 		if (sc->bands[i].lo != 0 &&
3773 		    sc->bands[i].lo <= chan && chan <= sc->bands[i].hi)
3774 			break;
3775 	if (i == IWN_NBANDS)	/* Can't happen in real-life. */
3776 		return EINVAL;
3777 	chans = sc->bands[i].chans;
3778 	DPRINTF(("chan %d sub-band=%d\n", chan, i));
3779 
3780 	for (c = 0; c < 2; c++) {
3781 		uint8_t power, gain, temp;
3782 		int maxchpwr, pwr, ridx, idx;
3783 
3784 		power = interpolate(chan,
3785 		    chans[0].num, chans[0].samples[c][1].power,
3786 		    chans[1].num, chans[1].samples[c][1].power, 1);
3787 		gain  = interpolate(chan,
3788 		    chans[0].num, chans[0].samples[c][1].gain,
3789 		    chans[1].num, chans[1].samples[c][1].gain, 1);
3790 		temp  = interpolate(chan,
3791 		    chans[0].num, chans[0].samples[c][1].temp,
3792 		    chans[1].num, chans[1].samples[c][1].temp, 1);
3793 		DPRINTF(("TX chain %d: power=%d gain=%d temp=%d\n",
3794 		    c, power, gain, temp));
3795 
3796 		/* Compute temperature compensation. */
3797 		tdiff = ((sc->temp - temp) * 2) / tdiv[grp];
3798 		DPRINTF(("temperature compensation=%d (current=%d, "
3799 		    "EEPROM=%d)\n", tdiff, sc->temp, temp));
3800 
3801 		for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) {
3802 			/* Convert dBm to half-dBm. */
3803 			maxchpwr = sc->maxpwr[chan] * 2;
3804 #ifdef notyet
3805 			if (ridx > iwn_mcs2ridx[7] && ridx < iwn_mcs2ridx[16])
3806 				maxchpwr -= 6;	/* MIMO 2T: -3dB */
3807 #endif
3808 
3809 			pwr = maxpwr;
3810 
3811 			/* Adjust TX power based on rate. */
3812 			if ((ridx % 8) == 5)
3813 				pwr -= 15;	/* OFDM48: -7.5dB */
3814 			else if ((ridx % 8) == 6)
3815 				pwr -= 17;	/* OFDM54: -8.5dB */
3816 			else if ((ridx % 8) == 7)
3817 				pwr -= 20;	/* OFDM60: -10dB */
3818 			else
3819 				pwr -= 10;	/* Others: -5dB */
3820 
3821 			/* Do not exceed channel max TX power. */
3822 			if (pwr > maxchpwr)
3823 				pwr = maxchpwr;
3824 
3825 			idx = gain - (pwr - power) - tdiff - vdiff;
3826 			if (ridx > iwn_mcs2ridx[7]) /* MIMO */
3827 				idx += (int32_t)letoh32(uc->atten[grp][c]);
3828 
3829 			if (cmd.band == 0)
3830 				idx += 9;	/* 5GHz */
3831 			if (ridx == IWN_RIDX_MAX)
3832 				idx += 5;	/* CCK */
3833 
3834 			/* Make sure idx stays in a valid range. */
3835 			if (idx < 0)
3836 				idx = 0;
3837 			else if (idx > IWN4965_MAX_PWR_INDEX)
3838 				idx = IWN4965_MAX_PWR_INDEX;
3839 
3840 			DPRINTF(("TX chain %d, rate idx %d: power=%d\n",
3841 			    c, ridx, idx));
3842 			cmd.power[ridx].rf_gain[c] = rf_gain[idx];
3843 			cmd.power[ridx].dsp_gain[c] = dsp_gain[idx];
3844 		}
3845 	}
3846 
3847 	DPRINTF(("setting TX power for chan %d\n", chan));
3848 	return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async);
3849 
3850 #undef interpolate
3851 #undef fdivround
3852 }
3853 
3854 int
3855 iwn5000_set_txpower(struct iwn_softc *sc, int async)
3856 {
3857 	struct iwn5000_cmd_txpower cmd;
3858 
3859 	/*
3860 	 * TX power calibration is handled automatically by the firmware
3861 	 * for 5000 Series.
3862 	 */
3863 	memset(&cmd, 0, sizeof cmd);
3864 	cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM;	/* 16 dBm */
3865 	cmd.flags = IWN5000_TXPOWER_NO_CLOSED;
3866 	cmd.srv_limit = IWN5000_TXPOWER_AUTO;
3867 	DPRINTF(("setting TX power\n"));
3868 	return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async);
3869 }
3870 
3871 /*
3872  * Retrieve the maximum RSSI (in dBm) among receivers.
3873  */
3874 int
3875 iwn4965_get_rssi(const struct iwn_rx_stat *stat)
3876 {
3877 	struct iwn4965_rx_phystat *phy = (void *)stat->phybuf;
3878 	uint8_t mask, agc;
3879 	int rssi;
3880 
3881 	mask = (letoh16(phy->antenna) >> 4) & IWN_ANT_ABC;
3882 	agc  = (letoh16(phy->agc) >> 7) & 0x7f;
3883 
3884 	rssi = 0;
3885 	if (mask & IWN_ANT_A)
3886 		rssi = MAX(rssi, phy->rssi[0]);
3887 	if (mask & IWN_ANT_B)
3888 		rssi = MAX(rssi, phy->rssi[2]);
3889 	if (mask & IWN_ANT_C)
3890 		rssi = MAX(rssi, phy->rssi[4]);
3891 
3892 	return rssi - agc - IWN_RSSI_TO_DBM;
3893 }
3894 
3895 int
3896 iwn5000_get_rssi(const struct iwn_rx_stat *stat)
3897 {
3898 	struct iwn5000_rx_phystat *phy = (void *)stat->phybuf;
3899 	uint8_t agc;
3900 	int rssi;
3901 
3902 	agc = (letoh32(phy->agc) >> 9) & 0x7f;
3903 
3904 	rssi = MAX(letoh16(phy->rssi[0]) & 0xff,
3905 		   letoh16(phy->rssi[1]) & 0xff);
3906 	rssi = MAX(letoh16(phy->rssi[2]) & 0xff, rssi);
3907 
3908 	return rssi - agc - IWN_RSSI_TO_DBM;
3909 }
3910 
3911 /*
3912  * Retrieve the average noise (in dBm) among receivers.
3913  */
3914 int
3915 iwn_get_noise(const struct iwn_rx_general_stats *stats)
3916 {
3917 	int i, total, nbant, noise;
3918 
3919 	total = nbant = 0;
3920 	for (i = 0; i < 3; i++) {
3921 		if ((noise = letoh32(stats->noise[i]) & 0xff) == 0)
3922 			continue;
3923 		total += noise;
3924 		nbant++;
3925 	}
3926 	/* There should be at least one antenna but check anyway. */
3927 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3928 }
3929 
3930 /*
3931  * Compute temperature (in degC) from last received statistics.
3932  */
3933 int
3934 iwn4965_get_temperature(struct iwn_softc *sc)
3935 {
3936 	struct iwn_ucode_info *uc = &sc->ucode_info;
3937 	int32_t r1, r2, r3, r4, temp;
3938 
3939 	r1 = letoh32(uc->temp[0].chan20MHz);
3940 	r2 = letoh32(uc->temp[1].chan20MHz);
3941 	r3 = letoh32(uc->temp[2].chan20MHz);
3942 	r4 = letoh32(sc->rawtemp);
3943 
3944 	if (r1 == r3)	/* Prevents division by 0 (should not happen). */
3945 		return 0;
3946 
3947 	/* Sign-extend 23-bit R4 value to 32-bit. */
3948 	r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000;
3949 	/* Compute temperature in Kelvin. */
3950 	temp = (259 * (r4 - r2)) / (r3 - r1);
3951 	temp = (temp * 97) / 100 + 8;
3952 
3953 	DPRINTF(("temperature %dK/%dC\n", temp, IWN_KTOC(temp)));
3954 	return IWN_KTOC(temp);
3955 }
3956 
3957 int
3958 iwn5000_get_temperature(struct iwn_softc *sc)
3959 {
3960 	int32_t temp;
3961 
3962 	/*
3963 	 * Temperature is not used by the driver for 5000 Series because
3964 	 * TX power calibration is handled by firmware.
3965 	 */
3966 	temp = letoh32(sc->rawtemp);
3967 	if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
3968 		temp = (temp / -5) + sc->temp_off;
3969 		temp = IWN_KTOC(temp);
3970 	}
3971 	return temp;
3972 }
3973 
3974 /*
3975  * Initialize sensitivity calibration state machine.
3976  */
3977 int
3978 iwn_init_sensitivity(struct iwn_softc *sc)
3979 {
3980 	struct iwn_ops *ops = &sc->ops;
3981 	struct iwn_calib_state *calib = &sc->calib;
3982 	uint32_t flags;
3983 	int error;
3984 
3985 	/* Reset calibration state machine. */
3986 	memset(calib, 0, sizeof (*calib));
3987 	calib->state = IWN_CALIB_STATE_INIT;
3988 	calib->cck_state = IWN_CCK_STATE_HIFA;
3989 	/* Set initial correlation values. */
3990 	calib->ofdm_x1     = sc->limits->min_ofdm_x1;
3991 	calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1;
3992 	calib->ofdm_x4     = sc->limits->min_ofdm_x4;
3993 	calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4;
3994 	calib->cck_x4      = 125;
3995 	calib->cck_mrc_x4  = sc->limits->min_cck_mrc_x4;
3996 	calib->energy_cck  = sc->limits->energy_cck;
3997 
3998 	/* Write initial sensitivity. */
3999 	if ((error = iwn_send_sensitivity(sc)) != 0)
4000 		return error;
4001 
4002 	/* Write initial gains. */
4003 	if ((error = ops->init_gains(sc)) != 0)
4004 		return error;
4005 
4006 	/* Request statistics at each beacon interval. */
4007 	flags = 0;
4008 	DPRINTFN(2, ("sending request for statistics\n"));
4009 	return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1);
4010 }
4011 
4012 /*
4013  * Collect noise and RSSI statistics for the first 20 beacons received
4014  * after association and use them to determine connected antennas and
4015  * to set differential gains.
4016  */
4017 void
4018 iwn_collect_noise(struct iwn_softc *sc,
4019     const struct iwn_rx_general_stats *stats)
4020 {
4021 	struct iwn_ops *ops = &sc->ops;
4022 	struct iwn_calib_state *calib = &sc->calib;
4023 	uint32_t val;
4024 	int i;
4025 
4026 	/* Accumulate RSSI and noise for all 3 antennas. */
4027 	for (i = 0; i < 3; i++) {
4028 		calib->rssi[i] += letoh32(stats->rssi[i]) & 0xff;
4029 		calib->noise[i] += letoh32(stats->noise[i]) & 0xff;
4030 	}
4031 	/* NB: We update differential gains only once after 20 beacons. */
4032 	if (++calib->nbeacons < 20)
4033 		return;
4034 
4035 	/* Determine highest average RSSI. */
4036 	val = MAX(calib->rssi[0], calib->rssi[1]);
4037 	val = MAX(calib->rssi[2], val);
4038 
4039 	/* Determine which antennas are connected. */
4040 	sc->chainmask = sc->rxchainmask;
4041 	for (i = 0; i < 3; i++)
4042 		if (val - calib->rssi[i] > 15 * 20)
4043 			sc->chainmask &= ~(1 << i);
4044 	DPRINTF(("RX chains mask: theoretical=0x%x, actual=0x%x\n",
4045 	    sc->rxchainmask, sc->chainmask));
4046 
4047 	/* If none of the TX antennas are connected, keep at least one. */
4048 	if ((sc->chainmask & sc->txchainmask) == 0)
4049 		sc->chainmask |= IWN_LSB(sc->txchainmask);
4050 
4051 	(void)ops->set_gains(sc);
4052 	calib->state = IWN_CALIB_STATE_RUN;
4053 
4054 #ifdef notyet
4055 	/* XXX Disable RX chains with no antennas connected. */
4056 	sc->rxon.rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask));
4057 	(void)iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
4058 #endif
4059 
4060 	/* Enable power-saving mode if requested by user. */
4061 	if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON)
4062 		(void)iwn_set_pslevel(sc, 0, 3, 1);
4063 }
4064 
4065 int
4066 iwn4965_init_gains(struct iwn_softc *sc)
4067 {
4068 	struct iwn_phy_calib_gain cmd;
4069 
4070 	memset(&cmd, 0, sizeof cmd);
4071 	cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
4072 	/* Differential gains initially set to 0 for all 3 antennas. */
4073 	DPRINTF(("setting initial differential gains\n"));
4074 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4075 }
4076 
4077 int
4078 iwn5000_init_gains(struct iwn_softc *sc)
4079 {
4080 	struct iwn_phy_calib cmd;
4081 
4082 	memset(&cmd, 0, sizeof cmd);
4083 	cmd.code = sc->reset_noise_gain;
4084 	cmd.ngroups = 1;
4085 	cmd.isvalid = 1;
4086 	DPRINTF(("setting initial differential gains\n"));
4087 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4088 }
4089 
4090 int
4091 iwn4965_set_gains(struct iwn_softc *sc)
4092 {
4093 	struct iwn_calib_state *calib = &sc->calib;
4094 	struct iwn_phy_calib_gain cmd;
4095 	int i, delta, noise;
4096 
4097 	/* Get minimal noise among connected antennas. */
4098 	noise = INT_MAX;	/* NB: There's at least one antenna. */
4099 	for (i = 0; i < 3; i++)
4100 		if (sc->chainmask & (1 << i))
4101 			noise = MIN(calib->noise[i], noise);
4102 
4103 	memset(&cmd, 0, sizeof cmd);
4104 	cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
4105 	/* Set differential gains for connected antennas. */
4106 	for (i = 0; i < 3; i++) {
4107 		if (sc->chainmask & (1 << i)) {
4108 			/* Compute attenuation (in unit of 1.5dB). */
4109 			delta = (noise - (int32_t)calib->noise[i]) / 30;
4110 			/* NB: delta <= 0 */
4111 			/* Limit to [-4.5dB,0]. */
4112 			cmd.gain[i] = MIN(abs(delta), 3);
4113 			if (delta < 0)
4114 				cmd.gain[i] |= 1 << 2;	/* sign bit */
4115 		}
4116 	}
4117 	DPRINTF(("setting differential gains Ant A/B/C: %x/%x/%x (%x)\n",
4118 	    cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask));
4119 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4120 }
4121 
4122 int
4123 iwn5000_set_gains(struct iwn_softc *sc)
4124 {
4125 	struct iwn_calib_state *calib = &sc->calib;
4126 	struct iwn_phy_calib_gain cmd;
4127 	int i, ant, div, delta;
4128 
4129 	/* We collected 20 beacons and !=6050 need a 1.5 factor. */
4130 	div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30;
4131 
4132 	memset(&cmd, 0, sizeof cmd);
4133 	cmd.code = sc->noise_gain;
4134 	cmd.ngroups = 1;
4135 	cmd.isvalid = 1;
4136 	/* Get first available RX antenna as referential. */
4137 	ant = IWN_LSB(sc->rxchainmask);
4138 	/* Set differential gains for other antennas. */
4139 	for (i = ant + 1; i < 3; i++) {
4140 		if (sc->chainmask & (1 << i)) {
4141 			/* The delta is relative to antenna "ant". */
4142 			delta = ((int32_t)calib->noise[ant] -
4143 			    (int32_t)calib->noise[i]) / div;
4144 			/* Limit to [-4.5dB,+4.5dB]. */
4145 			cmd.gain[i - 1] = MIN(abs(delta), 3);
4146 			if (delta < 0)
4147 				cmd.gain[i - 1] |= 1 << 2;	/* sign bit */
4148 		}
4149 	}
4150 	DPRINTF(("setting differential gains: %x/%x (%x)\n",
4151 	    cmd.gain[0], cmd.gain[1], sc->chainmask));
4152 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4153 }
4154 
4155 /*
4156  * Tune RF RX sensitivity based on the number of false alarms detected
4157  * during the last beacon period.
4158  */
4159 void
4160 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats)
4161 {
4162 #define inc(val, inc, max)			\
4163 	if ((val) < (max)) {			\
4164 		if ((val) < (max) - (inc))	\
4165 			(val) += (inc);		\
4166 		else				\
4167 			(val) = (max);		\
4168 		needs_update = 1;		\
4169 	}
4170 #define dec(val, dec, min)			\
4171 	if ((val) > (min)) {			\
4172 		if ((val) > (min) + (dec))	\
4173 			(val) -= (dec);		\
4174 		else				\
4175 			(val) = (min);		\
4176 		needs_update = 1;		\
4177 	}
4178 
4179 	const struct iwn_sensitivity_limits *limits = sc->limits;
4180 	struct iwn_calib_state *calib = &sc->calib;
4181 	uint32_t val, rxena, fa;
4182 	uint32_t energy[3], energy_min;
4183 	uint8_t noise[3], noise_ref;
4184 	int i, needs_update = 0;
4185 
4186 	/* Check that we've been enabled long enough. */
4187 	if ((rxena = letoh32(stats->general.load)) == 0)
4188 		return;
4189 
4190 	/* Compute number of false alarms since last call for OFDM. */
4191 	fa  = letoh32(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm;
4192 	fa += letoh32(stats->ofdm.fa) - calib->fa_ofdm;
4193 	fa *= 200 * IEEE80211_DUR_TU;	/* 200TU */
4194 
4195 	/* Save counters values for next call. */
4196 	calib->bad_plcp_ofdm = letoh32(stats->ofdm.bad_plcp);
4197 	calib->fa_ofdm = letoh32(stats->ofdm.fa);
4198 
4199 	if (fa > 50 * rxena) {
4200 		/* High false alarm count, decrease sensitivity. */
4201 		DPRINTFN(2, ("OFDM high false alarm count: %u\n", fa));
4202 		inc(calib->ofdm_x1,     1, limits->max_ofdm_x1);
4203 		inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1);
4204 		inc(calib->ofdm_x4,     1, limits->max_ofdm_x4);
4205 		inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4);
4206 
4207 	} else if (fa < 5 * rxena) {
4208 		/* Low false alarm count, increase sensitivity. */
4209 		DPRINTFN(2, ("OFDM low false alarm count: %u\n", fa));
4210 		dec(calib->ofdm_x1,     1, limits->min_ofdm_x1);
4211 		dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1);
4212 		dec(calib->ofdm_x4,     1, limits->min_ofdm_x4);
4213 		dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4);
4214 	}
4215 
4216 	/* Compute maximum noise among 3 receivers. */
4217 	for (i = 0; i < 3; i++)
4218 		noise[i] = (letoh32(stats->general.noise[i]) >> 8) & 0xff;
4219 	val = MAX(noise[0], noise[1]);
4220 	val = MAX(noise[2], val);
4221 	/* Insert it into our samples table. */
4222 	calib->noise_samples[calib->cur_noise_sample] = val;
4223 	calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20;
4224 
4225 	/* Compute maximum noise among last 20 samples. */
4226 	noise_ref = calib->noise_samples[0];
4227 	for (i = 1; i < 20; i++)
4228 		noise_ref = MAX(noise_ref, calib->noise_samples[i]);
4229 
4230 	/* Compute maximum energy among 3 receivers. */
4231 	for (i = 0; i < 3; i++)
4232 		energy[i] = letoh32(stats->general.energy[i]);
4233 	val = MIN(energy[0], energy[1]);
4234 	val = MIN(energy[2], val);
4235 	/* Insert it into our samples table. */
4236 	calib->energy_samples[calib->cur_energy_sample] = val;
4237 	calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10;
4238 
4239 	/* Compute minimum energy among last 10 samples. */
4240 	energy_min = calib->energy_samples[0];
4241 	for (i = 1; i < 10; i++)
4242 		energy_min = MAX(energy_min, calib->energy_samples[i]);
4243 	energy_min += 6;
4244 
4245 	/* Compute number of false alarms since last call for CCK. */
4246 	fa  = letoh32(stats->cck.bad_plcp) - calib->bad_plcp_cck;
4247 	fa += letoh32(stats->cck.fa) - calib->fa_cck;
4248 	fa *= 200 * IEEE80211_DUR_TU;	/* 200TU */
4249 
4250 	/* Save counters values for next call. */
4251 	calib->bad_plcp_cck = letoh32(stats->cck.bad_plcp);
4252 	calib->fa_cck = letoh32(stats->cck.fa);
4253 
4254 	if (fa > 50 * rxena) {
4255 		/* High false alarm count, decrease sensitivity. */
4256 		DPRINTFN(2, ("CCK high false alarm count: %u\n", fa));
4257 		calib->cck_state = IWN_CCK_STATE_HIFA;
4258 		calib->low_fa = 0;
4259 
4260 		if (calib->cck_x4 > 160) {
4261 			calib->noise_ref = noise_ref;
4262 			if (calib->energy_cck > 2)
4263 				dec(calib->energy_cck, 2, energy_min);
4264 		}
4265 		if (calib->cck_x4 < 160) {
4266 			calib->cck_x4 = 161;
4267 			needs_update = 1;
4268 		} else
4269 			inc(calib->cck_x4, 3, limits->max_cck_x4);
4270 
4271 		inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4);
4272 
4273 	} else if (fa < 5 * rxena) {
4274 		/* Low false alarm count, increase sensitivity. */
4275 		DPRINTFN(2, ("CCK low false alarm count: %u\n", fa));
4276 		calib->cck_state = IWN_CCK_STATE_LOFA;
4277 		calib->low_fa++;
4278 
4279 		if (calib->cck_state != IWN_CCK_STATE_INIT &&
4280 		    (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 ||
4281 		     calib->low_fa > 100)) {
4282 			inc(calib->energy_cck, 2, limits->min_energy_cck);
4283 			dec(calib->cck_x4,     3, limits->min_cck_x4);
4284 			dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4);
4285 		}
4286 	} else {
4287 		/* Not worth to increase or decrease sensitivity. */
4288 		DPRINTFN(2, ("CCK normal false alarm count: %u\n", fa));
4289 		calib->low_fa = 0;
4290 		calib->noise_ref = noise_ref;
4291 
4292 		if (calib->cck_state == IWN_CCK_STATE_HIFA) {
4293 			/* Previous interval had many false alarms. */
4294 			dec(calib->energy_cck, 8, energy_min);
4295 		}
4296 		calib->cck_state = IWN_CCK_STATE_INIT;
4297 	}
4298 
4299 	if (needs_update)
4300 		(void)iwn_send_sensitivity(sc);
4301 #undef dec
4302 #undef inc
4303 }
4304 
4305 int
4306 iwn_send_sensitivity(struct iwn_softc *sc)
4307 {
4308 	struct iwn_calib_state *calib = &sc->calib;
4309 	struct iwn_enhanced_sensitivity_cmd cmd;
4310 	int len;
4311 
4312 	memset(&cmd, 0, sizeof cmd);
4313 	len = sizeof (struct iwn_sensitivity_cmd);
4314 	cmd.which = IWN_SENSITIVITY_WORKTBL;
4315 	/* OFDM modulation. */
4316 	cmd.corr_ofdm_x1       = htole16(calib->ofdm_x1);
4317 	cmd.corr_ofdm_mrc_x1   = htole16(calib->ofdm_mrc_x1);
4318 	cmd.corr_ofdm_x4       = htole16(calib->ofdm_x4);
4319 	cmd.corr_ofdm_mrc_x4   = htole16(calib->ofdm_mrc_x4);
4320 	cmd.energy_ofdm        = htole16(sc->limits->energy_ofdm);
4321 	cmd.energy_ofdm_th     = htole16(62);
4322 	/* CCK modulation. */
4323 	cmd.corr_cck_x4        = htole16(calib->cck_x4);
4324 	cmd.corr_cck_mrc_x4    = htole16(calib->cck_mrc_x4);
4325 	cmd.energy_cck         = htole16(calib->energy_cck);
4326 	/* Barker modulation: use default values. */
4327 	cmd.corr_barker        = htole16(190);
4328 	cmd.corr_barker_mrc    = htole16(390);
4329 	if (!(sc->sc_flags & IWN_FLAG_ENH_SENS))
4330 		goto send;
4331 	/* Enhanced sensitivity settings. */
4332 	len = sizeof (struct iwn_enhanced_sensitivity_cmd);
4333 	cmd.ofdm_det_slope_mrc = htole16(668);
4334 	cmd.ofdm_det_icept_mrc = htole16(4);
4335 	cmd.ofdm_det_slope     = htole16(486);
4336 	cmd.ofdm_det_icept     = htole16(37);
4337 	cmd.cck_det_slope_mrc  = htole16(853);
4338 	cmd.cck_det_icept_mrc  = htole16(4);
4339 	cmd.cck_det_slope      = htole16(476);
4340 	cmd.cck_det_icept      = htole16(99);
4341 send:
4342 	return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1);
4343 }
4344 
4345 /*
4346  * Set STA mode power saving level (between 0 and 5).
4347  * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving.
4348  */
4349 int
4350 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async)
4351 {
4352 	struct iwn_pmgt_cmd cmd;
4353 	const struct iwn_pmgt *pmgt;
4354 	uint32_t max, skip_dtim;
4355 	pcireg_t reg;
4356 	int i;
4357 
4358 	/* Select which PS parameters to use. */
4359 	if (dtim <= 2)
4360 		pmgt = &iwn_pmgt[0][level];
4361 	else if (dtim <= 10)
4362 		pmgt = &iwn_pmgt[1][level];
4363 	else
4364 		pmgt = &iwn_pmgt[2][level];
4365 
4366 	memset(&cmd, 0, sizeof cmd);
4367 	if (level != 0)	/* not CAM */
4368 		cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP);
4369 	if (level == 5)
4370 		cmd.flags |= htole16(IWN_PS_FAST_PD);
4371 	/* Retrieve PCIe Active State Power Management (ASPM). */
4372 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
4373 	    sc->sc_cap_off + PCI_PCIE_LCSR);
4374 	if (!(reg & PCI_PCIE_LCSR_ASPM_L0S))	/* L0s Entry disabled. */
4375 		cmd.flags |= htole16(IWN_PS_PCI_PMGT);
4376 	cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024);
4377 	cmd.txtimeout = htole32(pmgt->txtimeout * 1024);
4378 
4379 	if (dtim == 0) {
4380 		dtim = 1;
4381 		skip_dtim = 0;
4382 	} else
4383 		skip_dtim = pmgt->skip_dtim;
4384 	if (skip_dtim != 0) {
4385 		cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM);
4386 		max = pmgt->intval[4];
4387 		if (max == (uint32_t)-1)
4388 			max = dtim * (skip_dtim + 1);
4389 		else if (max > dtim)
4390 			max = (max / dtim) * dtim;
4391 	} else
4392 		max = dtim;
4393 	for (i = 0; i < 5; i++)
4394 		cmd.intval[i] = htole32(MIN(max, pmgt->intval[i]));
4395 
4396 	DPRINTF(("setting power saving level to %d\n", level));
4397 	return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async);
4398 }
4399 
4400 int
4401 iwn_send_btcoex(struct iwn_softc *sc)
4402 {
4403 	struct iwn_bluetooth cmd;
4404 
4405 	memset(&cmd, 0, sizeof cmd);
4406 	cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO;
4407 	cmd.lead_time = IWN_BT_LEAD_TIME_DEF;
4408 	cmd.max_kill = IWN_BT_MAX_KILL_DEF;
4409 	DPRINTF(("configuring bluetooth coexistence\n"));
4410 	return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0);
4411 }
4412 
4413 int
4414 iwn_send_advanced_btcoex(struct iwn_softc *sc)
4415 {
4416 	static const uint32_t btcoex_3wire[12] = {
4417 		0xaaaaaaaa, 0xaaaaaaaa, 0xaeaaaaaa, 0xaaaaaaaa,
4418 		0xcc00ff28, 0x0000aaaa, 0xcc00aaaa, 0x0000aaaa,
4419 		0xc0004000, 0x00004000, 0xf0005000, 0xf0005000,
4420 	};
4421 	struct iwn_btcoex_priotable btprio;
4422 	struct iwn_btcoex_prot btprot;
4423 	int error, i;
4424 
4425 	if (sc->hw_type == IWN_HW_REV_TYPE_2030 ||
4426 	    sc->hw_type == IWN_HW_REV_TYPE_135) {
4427 		struct iwn2000_btcoex_config btconfig;
4428 
4429 		memset(&btconfig, 0, sizeof btconfig);
4430 		btconfig.flags = IWN_BT_COEX6000_CHAN_INHIBITION |
4431 		    (IWN_BT_COEX6000_MODE_3W << IWN_BT_COEX6000_MODE_SHIFT) |
4432 		    IWN_BT_SYNC_2_BT_DISABLE;
4433 		btconfig.max_kill = 5;
4434 		btconfig.bt3_t7_timer = 1;
4435 		btconfig.kill_ack = htole32(0xffff0000);
4436 		btconfig.kill_cts = htole32(0xffff0000);
4437 		btconfig.sample_time = 2;
4438 		btconfig.bt3_t2_timer = 0xc;
4439 		for (i = 0; i < 12; i++)
4440 			btconfig.lookup_table[i] = htole32(btcoex_3wire[i]);
4441 		btconfig.valid = htole16(0xff);
4442 		btconfig.prio_boost = htole32(0xf0);
4443 		DPRINTF(("configuring advanced bluetooth coexistence\n"));
4444 		error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig,
4445 		    sizeof(btconfig), 1);
4446 		if (error != 0)
4447 			return (error);
4448 	} else {
4449 		struct iwn6000_btcoex_config btconfig;
4450 
4451 		memset(&btconfig, 0, sizeof btconfig);
4452 		btconfig.flags = IWN_BT_COEX6000_CHAN_INHIBITION |
4453 		    (IWN_BT_COEX6000_MODE_3W << IWN_BT_COEX6000_MODE_SHIFT) |
4454 		    IWN_BT_SYNC_2_BT_DISABLE;
4455 		btconfig.max_kill = 5;
4456 		btconfig.bt3_t7_timer = 1;
4457 		btconfig.kill_ack = htole32(0xffff0000);
4458 		btconfig.kill_cts = htole32(0xffff0000);
4459 		btconfig.sample_time = 2;
4460 		btconfig.bt3_t2_timer = 0xc;
4461 		for (i = 0; i < 12; i++)
4462 			btconfig.lookup_table[i] = htole32(btcoex_3wire[i]);
4463 		btconfig.valid = htole16(0xff);
4464 		btconfig.prio_boost = 0xf0;
4465 		DPRINTF(("configuring advanced bluetooth coexistence\n"));
4466 		error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig,
4467 		    sizeof(btconfig), 1);
4468 		if (error != 0)
4469 			return (error);
4470 	}
4471 
4472 	memset(&btprio, 0, sizeof btprio);
4473 	btprio.calib_init1 = 0x6;
4474 	btprio.calib_init2 = 0x7;
4475 	btprio.calib_periodic_low1 = 0x2;
4476 	btprio.calib_periodic_low2 = 0x3;
4477 	btprio.calib_periodic_high1 = 0x4;
4478 	btprio.calib_periodic_high2 = 0x5;
4479 	btprio.dtim = 0x6;
4480 	btprio.scan52 = 0x8;
4481 	btprio.scan24 = 0xa;
4482 	error = iwn_cmd(sc, IWN_CMD_BT_COEX_PRIOTABLE, &btprio, sizeof(btprio),
4483 	    1);
4484 	if (error != 0)
4485 		return (error);
4486 
4487 	/* Force BT state machine change */
4488 	memset(&btprot, 0, sizeof btprot);
4489 	btprot.open = 1;
4490 	btprot.type = 1;
4491 	error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1);
4492 	if (error != 0)
4493 		return (error);
4494 
4495 	btprot.open = 0;
4496 	return (iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1));
4497 }
4498 
4499 int
4500 iwn5000_runtime_calib(struct iwn_softc *sc)
4501 {
4502 	struct iwn5000_calib_config cmd;
4503 
4504 	memset(&cmd, 0, sizeof cmd);
4505 	cmd.ucode.once.enable = 0xffffffff;
4506 	cmd.ucode.once.start = IWN5000_CALIB_DC;
4507 	DPRINTF(("configuring runtime calibration\n"));
4508 	return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0);
4509 }
4510 
4511 int
4512 iwn_config(struct iwn_softc *sc)
4513 {
4514 	struct iwn_ops *ops = &sc->ops;
4515 	struct ieee80211com *ic = &sc->sc_ic;
4516 	struct ifnet *ifp = &ic->ic_if;
4517 	uint32_t txmask;
4518 	uint16_t rxchain;
4519 	int error, ridx;
4520 
4521 	/* Set radio temperature sensor offset. */
4522 	if (sc->hw_type == IWN_HW_REV_TYPE_6005) {
4523 		error = iwn6000_temp_offset_calib(sc);
4524 		if (error != 0) {
4525 			printf("%s: could not set temperature offset\n",
4526 			    sc->sc_dev.dv_xname);
4527 			return error;
4528 		}
4529 	}
4530 
4531 	if (sc->hw_type == IWN_HW_REV_TYPE_2030 ||
4532 	    sc->hw_type == IWN_HW_REV_TYPE_2000 ||
4533 	    sc->hw_type == IWN_HW_REV_TYPE_135 ||
4534 	    sc->hw_type == IWN_HW_REV_TYPE_105) {
4535 		error = iwn2000_temp_offset_calib(sc);
4536 		if (error != 0) {
4537 			printf("%s: could not set temperature offset\n",
4538 			    sc->sc_dev.dv_xname);
4539 			return error;
4540 		}
4541 	}
4542 
4543 	if (sc->hw_type == IWN_HW_REV_TYPE_6050 ||
4544 	    sc->hw_type == IWN_HW_REV_TYPE_6005) {
4545 		/* Configure runtime DC calibration. */
4546 		error = iwn5000_runtime_calib(sc);
4547 		if (error != 0) {
4548 			printf("%s: could not configure runtime calibration\n",
4549 			    sc->sc_dev.dv_xname);
4550 			return error;
4551 		}
4552 	}
4553 
4554 	/* Configure valid TX chains for >=5000 Series. */
4555 	if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
4556 		txmask = htole32(sc->txchainmask);
4557 		DPRINTF(("configuring valid TX chains 0x%x\n", txmask));
4558 		error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask,
4559 		    sizeof txmask, 0);
4560 		if (error != 0) {
4561 			printf("%s: could not configure valid TX chains\n",
4562 			    sc->sc_dev.dv_xname);
4563 			return error;
4564 		}
4565 	}
4566 
4567 	/* Configure bluetooth coexistence. */
4568 	if (sc->sc_flags & IWN_FLAG_ADV_BT_COEX)
4569 		error = iwn_send_advanced_btcoex(sc);
4570 	else
4571 		error = iwn_send_btcoex(sc);
4572 	if (error != 0) {
4573 		printf("%s: could not configure bluetooth coexistence\n",
4574 		    sc->sc_dev.dv_xname);
4575 		return error;
4576 	}
4577 
4578 	/* Set mode, channel, RX filter and enable RX. */
4579 	memset(&sc->rxon, 0, sizeof (struct iwn_rxon));
4580 	IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl));
4581 	IEEE80211_ADDR_COPY(sc->rxon.myaddr, ic->ic_myaddr);
4582 	IEEE80211_ADDR_COPY(sc->rxon.wlap, ic->ic_myaddr);
4583 	sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
4584 	sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
4585 	if (IEEE80211_IS_CHAN_2GHZ(ic->ic_ibss_chan)) {
4586 		sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
4587 		if (ic->ic_flags & IEEE80211_F_USEPROT)
4588 			sc->rxon.flags |= htole32(IWN_RXON_TGG_PROT);
4589 		DPRINTF(("%s: 2ghz prot 0x%x\n", __func__,
4590 		    le32toh(sc->rxon.flags)));
4591 	}
4592 	switch (ic->ic_opmode) {
4593 	case IEEE80211_M_STA:
4594 		sc->rxon.mode = IWN_MODE_STA;
4595 		sc->rxon.filter = htole32(IWN_FILTER_MULTICAST);
4596 		break;
4597 	case IEEE80211_M_MONITOR:
4598 		sc->rxon.mode = IWN_MODE_MONITOR;
4599 		sc->rxon.filter = htole32(IWN_FILTER_MULTICAST |
4600 		    IWN_FILTER_CTL | IWN_FILTER_PROMISC);
4601 		break;
4602 	default:
4603 		/* Should not get there. */
4604 		break;
4605 	}
4606 	sc->rxon.cck_mask  = 0x0f;	/* not yet negotiated */
4607 	sc->rxon.ofdm_mask = 0xff;	/* not yet negotiated */
4608 	sc->rxon.ht_single_mask = 0xff;
4609 	sc->rxon.ht_dual_mask = 0xff;
4610 	sc->rxon.ht_triple_mask = 0xff;
4611 	rxchain =
4612 	    IWN_RXCHAIN_VALID(sc->rxchainmask) |
4613 	    IWN_RXCHAIN_MIMO_COUNT(sc->nrxchains) |
4614 	    IWN_RXCHAIN_IDLE_COUNT(sc->nrxchains);
4615 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4616 		rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask);
4617 		rxchain |= IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask);
4618 	    	rxchain |= (IWN_RXCHAIN_DRIVER_FORCE | IWN_RXCHAIN_MIMO_FORCE);
4619 	}
4620 	sc->rxon.rxchain = htole16(rxchain);
4621 	DPRINTF(("setting configuration\n"));
4622 	DPRINTF(("%s: rxon chan %d flags %x cck %x ofdm %x rxchain %x\n",
4623 	    __func__, sc->rxon.chan, le32toh(sc->rxon.flags), sc->rxon.cck_mask,
4624 	    sc->rxon.ofdm_mask, sc->rxon.rxchain));
4625 	error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 0);
4626 	if (error != 0) {
4627 		printf("%s: RXON command failed\n", sc->sc_dev.dv_xname);
4628 		return error;
4629 	}
4630 
4631 	ridx = (sc->sc_ic.ic_curmode == IEEE80211_MODE_11A) ?
4632 	    IWN_RIDX_OFDM6 : IWN_RIDX_CCK1;
4633 	if ((error = iwn_add_broadcast_node(sc, 0, ridx)) != 0) {
4634 		printf("%s: could not add broadcast node\n",
4635 		    sc->sc_dev.dv_xname);
4636 		return error;
4637 	}
4638 
4639 	/* Configuration has changed, set TX power accordingly. */
4640 	if ((error = ops->set_txpower(sc, 0)) != 0) {
4641 		printf("%s: could not set TX power\n", sc->sc_dev.dv_xname);
4642 		return error;
4643 	}
4644 
4645 	if ((error = iwn_set_critical_temp(sc)) != 0) {
4646 		printf("%s: could not set critical temperature\n",
4647 		    sc->sc_dev.dv_xname);
4648 		return error;
4649 	}
4650 
4651 	/* Set power saving level to CAM during initialization. */
4652 	if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) {
4653 		printf("%s: could not set power saving level\n",
4654 		    sc->sc_dev.dv_xname);
4655 		return error;
4656 	}
4657 	return 0;
4658 }
4659 
4660 uint16_t
4661 iwn_get_active_dwell_time(struct iwn_softc *sc,
4662     uint16_t flags, uint8_t n_probes)
4663 {
4664 	/* No channel? Default to 2GHz settings */
4665 	if (flags & IEEE80211_CHAN_2GHZ) {
4666 		return (IWN_ACTIVE_DWELL_TIME_2GHZ +
4667 		IWN_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1));
4668 	}
4669 
4670 	/* 5GHz dwell time */
4671 	return (IWN_ACTIVE_DWELL_TIME_5GHZ +
4672 	    IWN_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1));
4673 }
4674 
4675 /*
4676  * Limit the total dwell time to 85% of the beacon interval.
4677  *
4678  * Returns the dwell time in milliseconds.
4679  */
4680 uint16_t
4681 iwn_limit_dwell(struct iwn_softc *sc, uint16_t dwell_time)
4682 {
4683 	struct ieee80211com *ic = &sc->sc_ic;
4684 	struct ieee80211_node *ni = ic->ic_bss;
4685 	int bintval = 0;
4686 
4687 	/* bintval is in TU (1.024mS) */
4688 	if (ni != NULL)
4689 		bintval = ni->ni_intval;
4690 
4691 	/*
4692 	 * If it's non-zero, we should calculate the minimum of
4693 	 * it and the DWELL_BASE.
4694 	 *
4695 	 * XXX Yes, the math should take into account that bintval
4696 	 * is 1.024mS, not 1mS..
4697 	 */
4698 	if (ic->ic_state == IEEE80211_S_RUN && bintval > 0)
4699 		return (MIN(IWN_PASSIVE_DWELL_BASE, ((bintval * 85) / 100)));
4700 
4701 	/* No association context? Default */
4702 	return (IWN_PASSIVE_DWELL_BASE);
4703 }
4704 
4705 uint16_t
4706 iwn_get_passive_dwell_time(struct iwn_softc *sc, uint16_t flags)
4707 {
4708 	uint16_t passive;
4709 	if (flags & IEEE80211_CHAN_2GHZ) {
4710 		passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_2GHZ;
4711 	} else {
4712 		passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_5GHZ;
4713 	}
4714 
4715 	/* Clamp to the beacon interval if we're associated */
4716 	return (iwn_limit_dwell(sc, passive));
4717 }
4718 
4719 int
4720 iwn_scan(struct iwn_softc *sc, uint16_t flags, int bgscan)
4721 {
4722 	struct ieee80211com *ic = &sc->sc_ic;
4723 	struct iwn_scan_hdr *hdr;
4724 	struct iwn_cmd_data *tx;
4725 	struct iwn_scan_essid *essid;
4726 	struct iwn_scan_chan *chan;
4727 	struct ieee80211_frame *wh;
4728 	struct ieee80211_rateset *rs;
4729 	struct ieee80211_channel *c;
4730 	struct ifnet *ifp = &ic->ic_if;
4731 	uint8_t *buf, *frm;
4732 	uint16_t rxchain, dwell_active, dwell_passive;
4733 	uint8_t txant;
4734 	int buflen, error, is_active;
4735 
4736 	buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO);
4737 	if (buf == NULL) {
4738 		printf("%s: could not allocate buffer for scan command\n",
4739 		    sc->sc_dev.dv_xname);
4740 		return ENOMEM;
4741 	}
4742 	hdr = (struct iwn_scan_hdr *)buf;
4743 	/*
4744 	 * Move to the next channel if no frames are received within 10ms
4745 	 * after sending the probe request.
4746 	 */
4747 	hdr->quiet_time = htole16(10);		/* timeout in milliseconds */
4748 	hdr->quiet_threshold = htole16(1);	/* min # of packets */
4749 
4750 	if (bgscan) {
4751 		int bintval;
4752 
4753 		/* Set maximum off-channel time. */
4754 		hdr->max_out = htole32(200 * 1024);
4755 
4756 		/* Configure scan pauses which service on-channel traffic. */
4757 		bintval = ic->ic_bss->ni_intval ? ic->ic_bss->ni_intval : 100;
4758 		hdr->pause_scan = htole32(((100 / bintval) << 22) |
4759 		    ((100 % bintval) * 1024));
4760 	}
4761 
4762 	/* Select antennas for scanning. */
4763 	rxchain =
4764 	    IWN_RXCHAIN_VALID(sc->rxchainmask) |
4765 	    IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) |
4766 	    IWN_RXCHAIN_DRIVER_FORCE;
4767 	if ((flags & IEEE80211_CHAN_5GHZ) &&
4768 	    sc->hw_type == IWN_HW_REV_TYPE_4965) {
4769 		/*
4770 		 * On 4965 ant A and C must be avoided in 5GHz because of a
4771 		 * HW bug which causes very weak RSSI values being reported.
4772 		 */
4773 		rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_B);
4774 	} else	/* Use all available RX antennas. */
4775 		rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask);
4776 	hdr->rxchain = htole16(rxchain);
4777 	hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON);
4778 
4779 	tx = (struct iwn_cmd_data *)(hdr + 1);
4780 	tx->flags = htole32(IWN_TX_AUTO_SEQ);
4781 	tx->id = sc->broadcast_id;
4782 	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
4783 
4784 	if (flags & IEEE80211_CHAN_5GHZ) {
4785 		/* Send probe requests at 6Mbps. */
4786 		tx->plcp = iwn_rates[IWN_RIDX_OFDM6].plcp;
4787 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
4788 	} else {
4789 		hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO);
4790 		if (bgscan && sc->hw_type == IWN_HW_REV_TYPE_4965 &&
4791 		    sc->rxon.chan > 14) {
4792 			/*
4793 			 * 4965 firmware can crash when sending probe requests
4794 			 * with CCK rates while associated to a 5GHz AP.
4795 			 * Send probe requests at 6Mbps OFDM as a workaround.
4796 			 */
4797 			tx->plcp = iwn_rates[IWN_RIDX_OFDM6].plcp;
4798 		} else {
4799 			/* Send probe requests at 1Mbps. */
4800 			tx->plcp = iwn_rates[IWN_RIDX_CCK1].plcp;
4801 			tx->rflags = IWN_RFLAG_CCK;
4802 		}
4803 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
4804 	}
4805 	/* Use the first valid TX antenna. */
4806 	txant = IWN_LSB(sc->txchainmask);
4807 	tx->rflags |= IWN_RFLAG_ANT(txant);
4808 
4809 	/*
4810 	 * Only do active scanning if we're announcing a probe request
4811 	 * for a given SSID (or more, if we ever add it to the driver.)
4812 	 */
4813 	is_active = 0;
4814 
4815 	/*
4816 	 * If we're scanning for a specific SSID, add it to the command.
4817 	 */
4818 	essid = (struct iwn_scan_essid *)(tx + 1);
4819 	if (ic->ic_des_esslen != 0) {
4820 		essid[0].id = IEEE80211_ELEMID_SSID;
4821 		essid[0].len = ic->ic_des_esslen;
4822 		memcpy(essid[0].data, ic->ic_des_essid, ic->ic_des_esslen);
4823 
4824 		is_active = 1;
4825 	}
4826 	/*
4827 	 * Build a probe request frame.  Most of the following code is a
4828 	 * copy & paste of what is done in net80211.
4829 	 */
4830 	wh = (struct ieee80211_frame *)(essid + 20);
4831 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4832 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4833 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4834 	IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl));
4835 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
4836 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
4837 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
4838 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
4839 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
4840 
4841 	frm = (uint8_t *)(wh + 1);
4842 	frm = ieee80211_add_ssid(frm, NULL, 0);
4843 	frm = ieee80211_add_rates(frm, rs);
4844 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4845 		frm = ieee80211_add_xrates(frm, rs);
4846 	if (ic->ic_flags & IEEE80211_F_HTON)
4847 		frm = ieee80211_add_htcaps(frm, ic);
4848 
4849 	/* Set length of probe request. */
4850 	tx->len = htole16(frm - (uint8_t *)wh);
4851 
4852 	/*
4853 	 * If active scanning is requested but a certain channel is
4854 	 * marked passive, we can do active scanning if we detect
4855 	 * transmissions.
4856 	 *
4857 	 * There is an issue with some firmware versions that triggers
4858 	 * a sysassert on a "good CRC threshold" of zero (== disabled),
4859 	 * on a radar channel even though this means that we should NOT
4860 	 * send probes.
4861 	 *
4862 	 * The "good CRC threshold" is the number of frames that we
4863 	 * need to receive during our dwell time on a channel before
4864 	 * sending out probes -- setting this to a huge value will
4865 	 * mean we never reach it, but at the same time work around
4866 	 * the aforementioned issue. Thus use IWN_GOOD_CRC_TH_NEVER
4867 	 * here instead of IWN_GOOD_CRC_TH_DISABLED.
4868 	 *
4869 	 * This was fixed in later versions along with some other
4870 	 * scan changes, and the threshold behaves as a flag in those
4871 	 * versions.
4872 	 */
4873 
4874 	/*
4875 	 * If we're doing active scanning, set the crc_threshold
4876 	 * to a suitable value.  This is different to active veruss
4877 	 * passive scanning depending upon the channel flags; the
4878 	 * firmware will obey that particular check for us.
4879 	 */
4880 	if (sc->tlv_feature_flags & IWN_UCODE_TLV_FLAGS_NEWSCAN)
4881 		hdr->crc_threshold = is_active ?
4882 		    IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_DISABLED;
4883 	else
4884 		hdr->crc_threshold = is_active ?
4885 		    IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_NEVER;
4886 
4887 	chan = (struct iwn_scan_chan *)frm;
4888 	for (c  = &ic->ic_channels[1];
4889 	     c <= &ic->ic_channels[IEEE80211_CHAN_MAX]; c++) {
4890 		if ((c->ic_flags & flags) != flags)
4891 			continue;
4892 
4893 		chan->chan = htole16(ieee80211_chan2ieee(ic, c));
4894 		DPRINTFN(2, ("adding channel %d\n", chan->chan));
4895 		chan->flags = 0;
4896 		if (ic->ic_des_esslen != 0)
4897 			chan->flags |= htole32(IWN_CHAN_NPBREQS(1));
4898 
4899 		if (c->ic_flags & IEEE80211_CHAN_PASSIVE)
4900 			chan->flags |= htole32(IWN_CHAN_PASSIVE);
4901 		else
4902 			chan->flags |= htole32(IWN_CHAN_ACTIVE);
4903 
4904 		/*
4905 		 * Calculate the active/passive dwell times.
4906 		 */
4907 
4908 		dwell_active = iwn_get_active_dwell_time(sc, flags, is_active);
4909 		dwell_passive = iwn_get_passive_dwell_time(sc, flags);
4910 
4911 		/* Make sure they're valid */
4912 		if (dwell_passive <= dwell_active)
4913 			dwell_passive = dwell_active + 1;
4914 
4915 		chan->active = htole16(dwell_active);
4916 		chan->passive = htole16(dwell_passive);
4917 
4918 		chan->dsp_gain = 0x6e;
4919 		if (IEEE80211_IS_CHAN_5GHZ(c)) {
4920 			chan->rf_gain = 0x3b;
4921 		} else {
4922 			chan->rf_gain = 0x28;
4923 		}
4924 		hdr->nchan++;
4925 		chan++;
4926 	}
4927 
4928 	buflen = (uint8_t *)chan - buf;
4929 	hdr->len = htole16(buflen);
4930 
4931 	DPRINTF(("sending scan command nchan=%d\n", hdr->nchan));
4932 	error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1);
4933 	if (error == 0) {
4934 		sc->sc_flags |= IWN_FLAG_SCANNING;
4935 		if (bgscan)
4936 			sc->sc_flags |= IWN_FLAG_BGSCAN;
4937 	}
4938 	free(buf, M_DEVBUF, IWN_SCAN_MAXSZ);
4939 	return error;
4940 }
4941 
4942 void
4943 iwn_scan_abort(struct iwn_softc *sc)
4944 {
4945 	iwn_cmd(sc, IWN_CMD_SCAN_ABORT, NULL, 0, 1);
4946 
4947 	/* XXX Cannot wait for status response in interrupt context. */
4948 	DELAY(100);
4949 
4950 	sc->sc_flags &= ~IWN_FLAG_SCANNING;
4951 	sc->sc_flags &= ~IWN_FLAG_BGSCAN;
4952 }
4953 
4954 int
4955 iwn_bgscan(struct ieee80211com *ic)
4956 {
4957 	struct iwn_softc *sc = ic->ic_softc;
4958 	int error;
4959 
4960 	if (sc->sc_flags & IWN_FLAG_SCANNING)
4961 		return 0;
4962 
4963 	error = iwn_scan(sc, IEEE80211_CHAN_2GHZ, 1);
4964 	if (error)
4965 		printf("%s: could not initiate background scan\n",
4966 		    sc->sc_dev.dv_xname);
4967 	return error;
4968 }
4969 
4970 int
4971 iwn_auth(struct iwn_softc *sc, int arg)
4972 {
4973 	struct iwn_ops *ops = &sc->ops;
4974 	struct ieee80211com *ic = &sc->sc_ic;
4975 	struct ieee80211_node *ni = ic->ic_bss;
4976 	int error, ridx;
4977 	int bss_switch =
4978 	    (!IEEE80211_ADDR_EQ(sc->bss_node_addr, etheranyaddr) &&
4979 	    !IEEE80211_ADDR_EQ(sc->bss_node_addr, ni->ni_macaddr));
4980 
4981 	/* Update adapter configuration. */
4982 	IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
4983 	sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan);
4984 	sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
4985 	if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
4986 		sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
4987 		if (ic->ic_flags & IEEE80211_F_USEPROT)
4988 			sc->rxon.flags |= htole32(IWN_RXON_TGG_PROT);
4989 		DPRINTF(("%s: 2ghz prot 0x%x\n", __func__,
4990 		    le32toh(sc->rxon.flags)));
4991 	}
4992 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
4993 		sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
4994 	else
4995 		sc->rxon.flags &= ~htole32(IWN_RXON_SHSLOT);
4996 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4997 		sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
4998 	else
4999 		sc->rxon.flags &= ~htole32(IWN_RXON_SHPREAMBLE);
5000 	switch (ic->ic_curmode) {
5001 	case IEEE80211_MODE_11A:
5002 		sc->rxon.cck_mask  = 0;
5003 		sc->rxon.ofdm_mask = 0x15;
5004 		break;
5005 	case IEEE80211_MODE_11B:
5006 		sc->rxon.cck_mask  = 0x03;
5007 		sc->rxon.ofdm_mask = 0;
5008 		break;
5009 	default:	/* Assume 802.11b/g/n. */
5010 		sc->rxon.cck_mask  = 0x0f;
5011 		sc->rxon.ofdm_mask = 0x15;
5012 	}
5013 	DPRINTF(("%s: rxon chan %d flags %x cck %x ofdm %x\n", __func__,
5014 	    sc->rxon.chan, le32toh(sc->rxon.flags), sc->rxon.cck_mask,
5015 	    sc->rxon.ofdm_mask));
5016 	error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
5017 	if (error != 0) {
5018 		printf("%s: RXON command failed\n", sc->sc_dev.dv_xname);
5019 		return error;
5020 	}
5021 
5022 	/* Configuration has changed, set TX power accordingly. */
5023 	if ((error = ops->set_txpower(sc, 1)) != 0) {
5024 		printf("%s: could not set TX power\n", sc->sc_dev.dv_xname);
5025 		return error;
5026 	}
5027 	/*
5028 	 * Reconfiguring RXON clears the firmware nodes table so we must
5029 	 * add the broadcast node again.
5030 	 */
5031 	ridx = IEEE80211_IS_CHAN_5GHZ(ni->ni_chan) ?
5032 	    IWN_RIDX_OFDM6 : IWN_RIDX_CCK1;
5033 	if ((error = iwn_add_broadcast_node(sc, 1, ridx)) != 0) {
5034 		printf("%s: could not add broadcast node\n",
5035 		    sc->sc_dev.dv_xname);
5036 		return error;
5037 	}
5038 
5039 	/*
5040 	 * Make sure the firmware gets to see a beacon before we send
5041 	 * the auth request. Otherwise the Tx attempt can fail due to
5042 	 * the firmware's built-in regulatory domain enforcement.
5043 	 * Delaying here for every incoming deauth frame can result in a DoS.
5044 	 * Don't delay if we're here because of an incoming frame (arg != -1)
5045 	 * or if we're already waiting for a response (ic_mgt_timer != 0).
5046 	 * If we are switching APs after a background scan then net80211 has
5047 	 * just faked the reception of a deauth frame from our old AP, so it
5048 	 * is safe to delay in that case.
5049 	 */
5050 	if ((arg == -1 || bss_switch) && ic->ic_mgt_timer == 0)
5051 		DELAY(ni->ni_intval * 3 * IEEE80211_DUR_TU);
5052 
5053 	/* We can now clear the cached address of our previous AP. */
5054 	memset(sc->bss_node_addr, 0, sizeof(sc->bss_node_addr));
5055 
5056 	return 0;
5057 }
5058 
5059 int
5060 iwn_run(struct iwn_softc *sc)
5061 {
5062 	struct iwn_ops *ops = &sc->ops;
5063 	struct ieee80211com *ic = &sc->sc_ic;
5064 	struct ieee80211_node *ni = ic->ic_bss;
5065 	struct iwn_node *wn = (void *)ni;
5066 	struct iwn_node_info node;
5067 	int error;
5068 
5069 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5070 		/* Link LED blinks while monitoring. */
5071 		iwn_set_led(sc, IWN_LED_LINK, 50, 50);
5072 		return 0;
5073 	}
5074 	if ((error = iwn_set_timing(sc, ni)) != 0) {
5075 		printf("%s: could not set timing\n", sc->sc_dev.dv_xname);
5076 		return error;
5077 	}
5078 
5079 	/* Update adapter configuration. */
5080 	sc->rxon.associd = htole16(IEEE80211_AID(ni->ni_associd));
5081 	/* Short preamble and slot time are negotiated when associating. */
5082 	sc->rxon.flags &= ~htole32(IWN_RXON_SHPREAMBLE | IWN_RXON_SHSLOT);
5083 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
5084 		sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
5085 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5086 		sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
5087 	sc->rxon.filter |= htole32(IWN_FILTER_BSS);
5088 
5089 	/* HT is negotiated when associating. */
5090 	if (ni->ni_flags & IEEE80211_NODE_HT) {
5091 		enum ieee80211_htprot htprot =
5092 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
5093 		DPRINTF(("%s: htprot = %d\n", __func__, htprot));
5094 		sc->rxon.flags |= htole32(IWN_RXON_HT_PROTMODE(htprot));
5095 	} else
5096 		sc->rxon.flags &= ~htole32(IWN_RXON_HT_PROTMODE(3));
5097 
5098 	if (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) {
5099 		/* 11a or 11n 5GHz */
5100 		sc->rxon.cck_mask  = 0;
5101 		sc->rxon.ofdm_mask = 0x15;
5102 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
5103 		/* 11n 2GHz */
5104 		sc->rxon.cck_mask  = 0x0f;
5105 		sc->rxon.ofdm_mask = 0x15;
5106 	} else {
5107 		if (ni->ni_rates.rs_nrates == 4) {
5108 			/* 11b */
5109 			sc->rxon.cck_mask  = 0x03;
5110 			sc->rxon.ofdm_mask = 0;
5111 		} else {
5112 			/* assume 11g */
5113 			sc->rxon.cck_mask  = 0x0f;
5114 			sc->rxon.ofdm_mask = 0x15;
5115 		}
5116 	}
5117 	DPRINTF(("%s: rxon chan %d flags %x cck %x ofdm %x\n", __func__,
5118 	    sc->rxon.chan, le32toh(sc->rxon.flags), sc->rxon.cck_mask,
5119 	    sc->rxon.ofdm_mask));
5120 	error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
5121 	if (error != 0) {
5122 		printf("%s: could not update configuration\n",
5123 		    sc->sc_dev.dv_xname);
5124 		return error;
5125 	}
5126 
5127 	/* Configuration has changed, set TX power accordingly. */
5128 	if ((error = ops->set_txpower(sc, 1)) != 0) {
5129 		printf("%s: could not set TX power\n", sc->sc_dev.dv_xname);
5130 		return error;
5131 	}
5132 
5133 	/* Fake a join to initialize the TX rate. */
5134 	((struct iwn_node *)ni)->id = IWN_ID_BSS;
5135 	iwn_newassoc(ic, ni, 1);
5136 
5137 	/* Add BSS node. */
5138 	memset(&node, 0, sizeof node);
5139 	IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr);
5140 	node.id = IWN_ID_BSS;
5141 	if (ni->ni_flags & IEEE80211_NODE_HT) {
5142 		node.htmask = (IWN_AMDPU_SIZE_FACTOR_MASK |
5143 		    IWN_AMDPU_DENSITY_MASK);
5144 		node.htflags = htole32(
5145 		    IWN_AMDPU_SIZE_FACTOR(
5146 			(ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_LE)) |
5147 		    IWN_AMDPU_DENSITY(
5148 			(ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) >> 2));
5149 	}
5150 	DPRINTF(("adding BSS node\n"));
5151 	error = ops->add_node(sc, &node, 1);
5152 	if (error != 0) {
5153 		printf("%s: could not add BSS node\n", sc->sc_dev.dv_xname);
5154 		return error;
5155 	}
5156 
5157 	/* Cache address of AP in case it changes after a background scan. */
5158 	IEEE80211_ADDR_COPY(sc->bss_node_addr, ni->ni_macaddr);
5159 
5160 	DPRINTF(("setting link quality for node %d\n", node.id));
5161 	if ((error = iwn_set_link_quality(sc, ni)) != 0) {
5162 		printf("%s: could not setup link quality for node %d\n",
5163 		    sc->sc_dev.dv_xname, node.id);
5164 		return error;
5165 	}
5166 
5167 	if ((error = iwn_init_sensitivity(sc)) != 0) {
5168 		printf("%s: could not set sensitivity\n",
5169 		    sc->sc_dev.dv_xname);
5170 		return error;
5171 	}
5172 	/* Start periodic calibration timer. */
5173 	sc->calib.state = IWN_CALIB_STATE_ASSOC;
5174 	sc->calib_cnt = 0;
5175 	timeout_add_msec(&sc->calib_to, 500);
5176 
5177 	ieee80211_mira_node_init(&wn->mn);
5178 
5179 	/* Link LED always on while associated. */
5180 	iwn_set_led(sc, IWN_LED_LINK, 0, 1);
5181 	return 0;
5182 }
5183 
5184 /*
5185  * We support CCMP hardware encryption/decryption of unicast frames only.
5186  * HW support for TKIP really sucks.  We should let TKIP die anyway.
5187  */
5188 int
5189 iwn_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
5190     struct ieee80211_key *k)
5191 {
5192 	struct iwn_softc *sc = ic->ic_softc;
5193 	struct iwn_ops *ops = &sc->ops;
5194 	struct iwn_node *wn = (void *)ni;
5195 	struct iwn_node_info node;
5196 	uint16_t kflags;
5197 
5198 	if ((k->k_flags & IEEE80211_KEY_GROUP) ||
5199 	    k->k_cipher != IEEE80211_CIPHER_CCMP)
5200 		return ieee80211_set_key(ic, ni, k);
5201 
5202 	kflags = IWN_KFLAG_CCMP | IWN_KFLAG_MAP | IWN_KFLAG_KID(k->k_id);
5203 	if (k->k_flags & IEEE80211_KEY_GROUP)
5204 		kflags |= IWN_KFLAG_GROUP;
5205 
5206 	memset(&node, 0, sizeof node);
5207 	node.id = (k->k_flags & IEEE80211_KEY_GROUP) ?
5208 	    sc->broadcast_id : wn->id;
5209 	node.control = IWN_NODE_UPDATE;
5210 	node.flags = IWN_FLAG_SET_KEY;
5211 	node.kflags = htole16(kflags);
5212 	node.kid = k->k_id;
5213 	memcpy(node.key, k->k_key, k->k_len);
5214 	DPRINTF(("set key id=%d for node %d\n", k->k_id, node.id));
5215 	return ops->add_node(sc, &node, 1);
5216 }
5217 
5218 void
5219 iwn_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
5220     struct ieee80211_key *k)
5221 {
5222 	struct iwn_softc *sc = ic->ic_softc;
5223 	struct iwn_ops *ops = &sc->ops;
5224 	struct iwn_node *wn = (void *)ni;
5225 	struct iwn_node_info node;
5226 
5227 	if ((k->k_flags & IEEE80211_KEY_GROUP) ||
5228 	    k->k_cipher != IEEE80211_CIPHER_CCMP) {
5229 		/* See comment about other ciphers above. */
5230 		ieee80211_delete_key(ic, ni, k);
5231 		return;
5232 	}
5233 	if (ic->ic_state != IEEE80211_S_RUN)
5234 		return;	/* Nothing to do. */
5235 	memset(&node, 0, sizeof node);
5236 	node.id = (k->k_flags & IEEE80211_KEY_GROUP) ?
5237 	    sc->broadcast_id : wn->id;
5238 	node.control = IWN_NODE_UPDATE;
5239 	node.flags = IWN_FLAG_SET_KEY;
5240 	node.kflags = htole16(IWN_KFLAG_INVALID);
5241 	node.kid = 0xff;
5242 	DPRINTF(("delete keys for node %d\n", node.id));
5243 	(void)ops->add_node(sc, &node, 1);
5244 }
5245 
5246 /*
5247  * This function is called by upper layer when HT protection settings in
5248  * beacons have changed.
5249  */
5250 void
5251 iwn_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
5252 {
5253 	struct iwn_softc *sc = ic->ic_softc;
5254 	struct iwn_ops *ops = &sc->ops;
5255 	enum ieee80211_htprot htprot;
5256 	struct iwn_rxon_assoc rxon_assoc;
5257 	int s, error;
5258 
5259 	/* Update HT protection mode setting. */
5260 	htprot = (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK) >>
5261 	    IEEE80211_HTOP1_PROT_SHIFT;
5262 	sc->rxon.flags &= ~htole32(IWN_RXON_HT_PROTMODE(3));
5263 	sc->rxon.flags |= htole32(IWN_RXON_HT_PROTMODE(htprot));
5264 
5265 	/* Update RXON config. */
5266 	memset(&rxon_assoc, 0, sizeof(rxon_assoc));
5267 	rxon_assoc.flags = sc->rxon.flags;
5268 	rxon_assoc.filter = sc->rxon.filter;
5269 	rxon_assoc.ofdm_mask = sc->rxon.ofdm_mask;
5270 	rxon_assoc.cck_mask = sc->rxon.cck_mask;
5271 	rxon_assoc.ht_single_mask = sc->rxon.ht_single_mask;
5272 	rxon_assoc.ht_dual_mask = sc->rxon.ht_dual_mask;
5273 	rxon_assoc.ht_triple_mask = sc->rxon.ht_triple_mask;
5274 	rxon_assoc.rxchain = sc->rxon.rxchain;
5275 	rxon_assoc.acquisition = sc->rxon.acquisition;
5276 
5277 	s = splnet();
5278 
5279 	error = iwn_cmd(sc, IWN_CMD_RXON_ASSOC, &rxon_assoc,
5280 	    sizeof(rxon_assoc), 1);
5281 	if (error != 0)
5282 		printf("%s: RXON_ASSOC command failed\n", sc->sc_dev.dv_xname);
5283 
5284 	DELAY(100);
5285 
5286 	/* All RXONs wipe the firmware's txpower table. Restore it. */
5287 	error = ops->set_txpower(sc, 1);
5288 	if (error != 0)
5289 		printf("%s: could not set TX power\n", sc->sc_dev.dv_xname);
5290 
5291 	DELAY(100);
5292 
5293 	/* Restore power saving level */
5294 	if (ic->ic_flags & IEEE80211_F_PMGTON)
5295 		error = iwn_set_pslevel(sc, 0, 3, 1);
5296 	else
5297 		error = iwn_set_pslevel(sc, 0, 0, 1);
5298 	if (error != 0)
5299 		printf("%s: could not set PS level\n", sc->sc_dev.dv_xname);
5300 
5301 	splx(s);
5302 }
5303 
5304 /*
5305  * This function is called by upper layer when an ADDBA request is received
5306  * from another STA and before the ADDBA response is sent.
5307  */
5308 int
5309 iwn_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
5310     uint8_t tid)
5311 {
5312 	struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
5313 	struct iwn_softc *sc = ic->ic_softc;
5314 	struct iwn_ops *ops = &sc->ops;
5315 	struct iwn_node *wn = (void *)ni;
5316 	struct iwn_node_info node;
5317 
5318 	memset(&node, 0, sizeof node);
5319 	node.id = wn->id;
5320 	node.control = IWN_NODE_UPDATE;
5321 	node.flags = IWN_FLAG_SET_ADDBA;
5322 	node.addba_tid = tid;
5323 	node.addba_ssn = htole16(ba->ba_winstart);
5324 	DPRINTF(("ADDBA RA=%d TID=%d SSN=%d\n", wn->id, tid,
5325 	    ba->ba_winstart));
5326 	/* XXX async command, so firmware may still fail to add BA agreement */
5327 	return ops->add_node(sc, &node, 1);
5328 }
5329 
5330 /*
5331  * This function is called by upper layer on teardown of an HT-immediate
5332  * Block Ack agreement (eg. uppon receipt of a DELBA frame).
5333  */
5334 void
5335 iwn_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
5336     uint8_t tid)
5337 {
5338 	struct iwn_softc *sc = ic->ic_softc;
5339 	struct iwn_ops *ops = &sc->ops;
5340 	struct iwn_node *wn = (void *)ni;
5341 	struct iwn_node_info node;
5342 
5343 	memset(&node, 0, sizeof node);
5344 	node.id = wn->id;
5345 	node.control = IWN_NODE_UPDATE;
5346 	node.flags = IWN_FLAG_SET_DELBA;
5347 	node.delba_tid = tid;
5348 	DPRINTF(("DELBA RA=%d TID=%d\n", wn->id, tid));
5349 	(void)ops->add_node(sc, &node, 1);
5350 }
5351 
5352 /*
5353  * This function is called by upper layer when an ADDBA response is received
5354  * from another STA.
5355  */
5356 int
5357 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
5358     uint8_t tid)
5359 {
5360 	struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
5361 	struct iwn_softc *sc = ic->ic_softc;
5362 	struct iwn_ops *ops = &sc->ops;
5363 	struct iwn_node *wn = (void *)ni;
5364 	struct iwn_node_info node;
5365 	int error;
5366 
5367 	/* Enable TX for the specified RA/TID. */
5368 	wn->disable_tid &= ~(1 << tid);
5369 	memset(&node, 0, sizeof node);
5370 	node.id = wn->id;
5371 	node.control = IWN_NODE_UPDATE;
5372 	node.flags = IWN_FLAG_SET_DISABLE_TID;
5373 	node.disable_tid = htole16(wn->disable_tid);
5374 	error = ops->add_node(sc, &node, 1);
5375 	if (error != 0)
5376 		return error;
5377 
5378 	if ((error = iwn_nic_lock(sc)) != 0)
5379 		return error;
5380 	ops->ampdu_tx_start(sc, ni, tid, ba->ba_winstart);
5381 	iwn_nic_unlock(sc);
5382 	return 0;
5383 }
5384 
5385 void
5386 iwn_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
5387     uint8_t tid)
5388 {
5389 	struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
5390 	struct iwn_softc *sc = ic->ic_softc;
5391 	struct iwn_ops *ops = &sc->ops;
5392 
5393 	if (iwn_nic_lock(sc) != 0)
5394 		return;
5395 	ops->ampdu_tx_stop(sc, tid, ba->ba_winstart);
5396 	iwn_nic_unlock(sc);
5397 }
5398 
5399 void
5400 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
5401     uint8_t tid, uint16_t ssn)
5402 {
5403 	struct iwn_node *wn = (void *)ni;
5404 	int qid = 7 + tid;
5405 
5406 	/* Stop TX scheduler while we're changing its configuration. */
5407 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5408 	    IWN4965_TXQ_STATUS_CHGACT);
5409 
5410 	/* Assign RA/TID translation to the queue. */
5411 	iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid),
5412 	    wn->id << 4 | tid);
5413 
5414 	/* Enable chain-building mode for the queue. */
5415 	iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid);
5416 
5417 	/* Set starting sequence number from the ADDBA request. */
5418 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5419 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
5420 
5421 	/* Set scheduler window size. */
5422 	iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid),
5423 	    IWN_SCHED_WINSZ);
5424 	/* Set scheduler frame limit. */
5425 	iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
5426 	    IWN_SCHED_LIMIT << 16);
5427 
5428 	/* Enable interrupts for the queue. */
5429 	iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
5430 
5431 	/* Mark the queue as active. */
5432 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5433 	    IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA |
5434 	    iwn_tid2fifo[tid] << 1);
5435 }
5436 
5437 void
5438 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn)
5439 {
5440 	int qid = 7 + tid;
5441 
5442 	/* Stop TX scheduler while we're changing its configuration. */
5443 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5444 	    IWN4965_TXQ_STATUS_CHGACT);
5445 
5446 	/* Set starting sequence number from the ADDBA request. */
5447 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5448 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
5449 
5450 	/* Disable interrupts for the queue. */
5451 	iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
5452 
5453 	/* Mark the queue as inactive. */
5454 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5455 	    IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1);
5456 }
5457 
5458 void
5459 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
5460     uint8_t tid, uint16_t ssn)
5461 {
5462 	struct iwn_node *wn = (void *)ni;
5463 	int qid = 10 + tid;
5464 
5465 	/* Stop TX scheduler while we're changing its configuration. */
5466 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5467 	    IWN5000_TXQ_STATUS_CHGACT);
5468 
5469 	/* Assign RA/TID translation to the queue. */
5470 	iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid),
5471 	    wn->id << 4 | tid);
5472 
5473 	/* Enable chain-building mode for the queue. */
5474 	iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid);
5475 
5476 	/* Enable aggregation for the queue. */
5477 	iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
5478 
5479 	/* Set starting sequence number from the ADDBA request. */
5480 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5481 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
5482 
5483 	/* Set scheduler window size and frame limit. */
5484 	iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
5485 	    IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
5486 
5487 	/* Enable interrupts for the queue. */
5488 	iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
5489 
5490 	/* Mark the queue as active. */
5491 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5492 	    IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]);
5493 }
5494 
5495 void
5496 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn)
5497 {
5498 	int qid = 10 + tid;
5499 
5500 	/* Stop TX scheduler while we're changing its configuration. */
5501 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5502 	    IWN5000_TXQ_STATUS_CHGACT);
5503 
5504 	/* Disable aggregation for the queue. */
5505 	iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
5506 
5507 	/* Set starting sequence number from the ADDBA request. */
5508 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5509 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
5510 
5511 	/* Disable interrupts for the queue. */
5512 	iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
5513 
5514 	/* Mark the queue as inactive. */
5515 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5516 	    IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]);
5517 }
5518 
5519 /*
5520  * Query calibration tables from the initialization firmware.  We do this
5521  * only once at first boot.  Called from a process context.
5522  */
5523 int
5524 iwn5000_query_calibration(struct iwn_softc *sc)
5525 {
5526 	struct iwn5000_calib_config cmd;
5527 	int error;
5528 
5529 	memset(&cmd, 0, sizeof cmd);
5530 	cmd.ucode.once.enable = 0xffffffff;
5531 	cmd.ucode.once.start  = 0xffffffff;
5532 	cmd.ucode.once.send   = 0xffffffff;
5533 	cmd.ucode.flags       = 0xffffffff;
5534 	DPRINTF(("sending calibration query\n"));
5535 	error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0);
5536 	if (error != 0)
5537 		return error;
5538 
5539 	/* Wait at most two seconds for calibration to complete. */
5540 	if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE))
5541 		error = tsleep(sc, PCATCH, "iwncal", 2 * hz);
5542 	return error;
5543 }
5544 
5545 /*
5546  * Send calibration results to the runtime firmware.  These results were
5547  * obtained on first boot from the initialization firmware.
5548  */
5549 int
5550 iwn5000_send_calibration(struct iwn_softc *sc)
5551 {
5552 	int idx, error;
5553 
5554 	for (idx = 0; idx < 5; idx++) {
5555 		if (sc->calibcmd[idx].buf == NULL)
5556 			continue;	/* No results available. */
5557 		DPRINTF(("send calibration result idx=%d len=%d\n",
5558 		    idx, sc->calibcmd[idx].len));
5559 		error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf,
5560 		    sc->calibcmd[idx].len, 0);
5561 		if (error != 0) {
5562 			printf("%s: could not send calibration result\n",
5563 			    sc->sc_dev.dv_xname);
5564 			return error;
5565 		}
5566 	}
5567 	return 0;
5568 }
5569 
5570 int
5571 iwn5000_send_wimax_coex(struct iwn_softc *sc)
5572 {
5573 	struct iwn5000_wimax_coex wimax;
5574 
5575 #ifdef notyet
5576 	if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
5577 		/* Enable WiMAX coexistence for combo adapters. */
5578 		wimax.flags =
5579 		    IWN_WIMAX_COEX_ASSOC_WA_UNMASK |
5580 		    IWN_WIMAX_COEX_UNASSOC_WA_UNMASK |
5581 		    IWN_WIMAX_COEX_STA_TABLE_VALID |
5582 		    IWN_WIMAX_COEX_ENABLE;
5583 		memcpy(wimax.events, iwn6050_wimax_events,
5584 		    sizeof iwn6050_wimax_events);
5585 	} else
5586 #endif
5587 	{
5588 		/* Disable WiMAX coexistence. */
5589 		wimax.flags = 0;
5590 		memset(wimax.events, 0, sizeof wimax.events);
5591 	}
5592 	DPRINTF(("Configuring WiMAX coexistence\n"));
5593 	return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0);
5594 }
5595 
5596 int
5597 iwn5000_crystal_calib(struct iwn_softc *sc)
5598 {
5599 	struct iwn5000_phy_calib_crystal cmd;
5600 
5601 	memset(&cmd, 0, sizeof cmd);
5602 	cmd.code = IWN5000_PHY_CALIB_CRYSTAL;
5603 	cmd.ngroups = 1;
5604 	cmd.isvalid = 1;
5605 	cmd.cap_pin[0] = letoh32(sc->eeprom_crystal) & 0xff;
5606 	cmd.cap_pin[1] = (letoh32(sc->eeprom_crystal) >> 16) & 0xff;
5607 	DPRINTF(("sending crystal calibration %d, %d\n",
5608 	    cmd.cap_pin[0], cmd.cap_pin[1]));
5609 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
5610 }
5611 
5612 int
5613 iwn6000_temp_offset_calib(struct iwn_softc *sc)
5614 {
5615 	struct iwn6000_phy_calib_temp_offset cmd;
5616 
5617 	memset(&cmd, 0, sizeof cmd);
5618 	cmd.code = IWN6000_PHY_CALIB_TEMP_OFFSET;
5619 	cmd.ngroups = 1;
5620 	cmd.isvalid = 1;
5621 	if (sc->eeprom_temp != 0)
5622 		cmd.offset = htole16(sc->eeprom_temp);
5623 	else
5624 		cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET);
5625 	DPRINTF(("setting radio sensor offset to %d\n", letoh16(cmd.offset)));
5626 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
5627 }
5628 
5629 int
5630 iwn2000_temp_offset_calib(struct iwn_softc *sc)
5631 {
5632 	struct iwn2000_phy_calib_temp_offset cmd;
5633 
5634 	memset(&cmd, 0, sizeof cmd);
5635 	cmd.code = IWN2000_PHY_CALIB_TEMP_OFFSET;
5636 	cmd.ngroups = 1;
5637 	cmd.isvalid = 1;
5638 	if (sc->eeprom_rawtemp != 0) {
5639 		cmd.offset_low = htole16(sc->eeprom_rawtemp);
5640 		cmd.offset_high = htole16(sc->eeprom_temp);
5641 	} else {
5642 		cmd.offset_low = htole16(IWN_DEFAULT_TEMP_OFFSET);
5643 		cmd.offset_high = htole16(IWN_DEFAULT_TEMP_OFFSET);
5644 	}
5645 	cmd.burnt_voltage_ref = htole16(sc->eeprom_voltage);
5646 	DPRINTF(("setting radio sensor offset to %d:%d, voltage to %d\n",
5647 	    letoh16(cmd.offset_low), letoh16(cmd.offset_high),
5648 	    letoh16(cmd.burnt_voltage_ref)));
5649 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
5650 }
5651 
5652 /*
5653  * This function is called after the runtime firmware notifies us of its
5654  * readiness (called in a process context).
5655  */
5656 int
5657 iwn4965_post_alive(struct iwn_softc *sc)
5658 {
5659 	int error, qid;
5660 
5661 	if ((error = iwn_nic_lock(sc)) != 0)
5662 		return error;
5663 
5664 	/* Clear TX scheduler state in SRAM. */
5665 	sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
5666 	iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0,
5667 	    IWN4965_SCHED_CTX_LEN / sizeof (uint32_t));
5668 
5669 	/* Set physical address of TX scheduler rings (1KB aligned). */
5670 	iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
5671 
5672 	IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
5673 
5674 	/* Disable chain mode for all our 16 queues. */
5675 	iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0);
5676 
5677 	for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) {
5678 		iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0);
5679 		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
5680 
5681 		/* Set scheduler window size. */
5682 		iwn_mem_write(sc, sc->sched_base +
5683 		    IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ);
5684 		/* Set scheduler frame limit. */
5685 		iwn_mem_write(sc, sc->sched_base +
5686 		    IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
5687 		    IWN_SCHED_LIMIT << 16);
5688 	}
5689 
5690 	/* Enable interrupts for all our 16 queues. */
5691 	iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff);
5692 	/* Identify TX FIFO rings (0-7). */
5693 	iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff);
5694 
5695 	/* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
5696 	for (qid = 0; qid < 7; qid++) {
5697 		static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 };
5698 		iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5699 		    IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1);
5700 	}
5701 	iwn_nic_unlock(sc);
5702 	return 0;
5703 }
5704 
5705 /*
5706  * This function is called after the initialization or runtime firmware
5707  * notifies us of its readiness (called in a process context).
5708  */
5709 int
5710 iwn5000_post_alive(struct iwn_softc *sc)
5711 {
5712 	int error, qid;
5713 
5714 	/* Switch to using ICT interrupt mode. */
5715 	iwn5000_ict_reset(sc);
5716 
5717 	if ((error = iwn_nic_lock(sc)) != 0)
5718 		return error;
5719 
5720 	/* Clear TX scheduler state in SRAM. */
5721 	sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
5722 	iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0,
5723 	    IWN5000_SCHED_CTX_LEN / sizeof (uint32_t));
5724 
5725 	/* Set physical address of TX scheduler rings (1KB aligned). */
5726 	iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
5727 
5728 	IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
5729 
5730 	/* Enable chain mode for all queues, except command queue. */
5731 	iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef);
5732 	iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0);
5733 
5734 	for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) {
5735 		iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0);
5736 		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
5737 
5738 		iwn_mem_write(sc, sc->sched_base +
5739 		    IWN5000_SCHED_QUEUE_OFFSET(qid), 0);
5740 		/* Set scheduler window size and frame limit. */
5741 		iwn_mem_write(sc, sc->sched_base +
5742 		    IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
5743 		    IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
5744 	}
5745 
5746 	/* Enable interrupts for all our 20 queues. */
5747 	iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff);
5748 	/* Identify TX FIFO rings (0-7). */
5749 	iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff);
5750 
5751 	/* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
5752 	for (qid = 0; qid < 7; qid++) {
5753 		static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 };
5754 		iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5755 		    IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]);
5756 	}
5757 	iwn_nic_unlock(sc);
5758 
5759 	/* Configure WiMAX coexistence for combo adapters. */
5760 	error = iwn5000_send_wimax_coex(sc);
5761 	if (error != 0) {
5762 		printf("%s: could not configure WiMAX coexistence\n",
5763 		    sc->sc_dev.dv_xname);
5764 		return error;
5765 	}
5766 	if (sc->hw_type != IWN_HW_REV_TYPE_5150) {
5767 		/* Perform crystal calibration. */
5768 		error = iwn5000_crystal_calib(sc);
5769 		if (error != 0) {
5770 			printf("%s: crystal calibration failed\n",
5771 			    sc->sc_dev.dv_xname);
5772 			return error;
5773 		}
5774 	}
5775 	if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) {
5776 		/* Query calibration from the initialization firmware. */
5777 		if ((error = iwn5000_query_calibration(sc)) != 0) {
5778 			printf("%s: could not query calibration\n",
5779 			    sc->sc_dev.dv_xname);
5780 			return error;
5781 		}
5782 		/*
5783 		 * We have the calibration results now, reboot with the
5784 		 * runtime firmware (call ourselves recursively!)
5785 		 */
5786 		iwn_hw_stop(sc);
5787 		error = iwn_hw_init(sc);
5788 	} else {
5789 		/* Send calibration results to runtime firmware. */
5790 		error = iwn5000_send_calibration(sc);
5791 	}
5792 	return error;
5793 }
5794 
5795 /*
5796  * The firmware boot code is small and is intended to be copied directly into
5797  * the NIC internal memory (no DMA transfer).
5798  */
5799 int
5800 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size)
5801 {
5802 	int error, ntries;
5803 
5804 	size /= sizeof (uint32_t);
5805 
5806 	if ((error = iwn_nic_lock(sc)) != 0)
5807 		return error;
5808 
5809 	/* Copy microcode image into NIC memory. */
5810 	iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE,
5811 	    (const uint32_t *)ucode, size);
5812 
5813 	iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0);
5814 	iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE);
5815 	iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size);
5816 
5817 	/* Start boot load now. */
5818 	iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START);
5819 
5820 	/* Wait for transfer to complete. */
5821 	for (ntries = 0; ntries < 1000; ntries++) {
5822 		if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) &
5823 		    IWN_BSM_WR_CTRL_START))
5824 			break;
5825 		DELAY(10);
5826 	}
5827 	if (ntries == 1000) {
5828 		printf("%s: could not load boot firmware\n",
5829 		    sc->sc_dev.dv_xname);
5830 		iwn_nic_unlock(sc);
5831 		return ETIMEDOUT;
5832 	}
5833 
5834 	/* Enable boot after power up. */
5835 	iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN);
5836 
5837 	iwn_nic_unlock(sc);
5838 	return 0;
5839 }
5840 
5841 int
5842 iwn4965_load_firmware(struct iwn_softc *sc)
5843 {
5844 	struct iwn_fw_info *fw = &sc->fw;
5845 	struct iwn_dma_info *dma = &sc->fw_dma;
5846 	int error;
5847 
5848 	/* Copy initialization sections into pre-allocated DMA-safe memory. */
5849 	memcpy(dma->vaddr, fw->init.data, fw->init.datasz);
5850 	bus_dmamap_sync(sc->sc_dmat, dma->map, 0, fw->init.datasz,
5851 	    BUS_DMASYNC_PREWRITE);
5852 	memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
5853 	    fw->init.text, fw->init.textsz);
5854 	bus_dmamap_sync(sc->sc_dmat, dma->map, IWN4965_FW_DATA_MAXSZ,
5855 	    fw->init.textsz, BUS_DMASYNC_PREWRITE);
5856 
5857 	/* Tell adapter where to find initialization sections. */
5858 	if ((error = iwn_nic_lock(sc)) != 0)
5859 		return error;
5860 	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
5861 	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz);
5862 	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
5863 	    (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
5864 	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz);
5865 	iwn_nic_unlock(sc);
5866 
5867 	/* Load firmware boot code. */
5868 	error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz);
5869 	if (error != 0) {
5870 		printf("%s: could not load boot firmware\n",
5871 		    sc->sc_dev.dv_xname);
5872 		return error;
5873 	}
5874 	/* Now press "execute". */
5875 	IWN_WRITE(sc, IWN_RESET, 0);
5876 
5877 	/* Wait at most one second for first alive notification. */
5878 	if ((error = tsleep(sc, PCATCH, "iwninit", hz)) != 0) {
5879 		printf("%s: timeout waiting for adapter to initialize\n",
5880 		    sc->sc_dev.dv_xname);
5881 		return error;
5882 	}
5883 
5884 	/* Retrieve current temperature for initial TX power calibration. */
5885 	sc->rawtemp = sc->ucode_info.temp[3].chan20MHz;
5886 	sc->temp = iwn4965_get_temperature(sc);
5887 
5888 	/* Copy runtime sections into pre-allocated DMA-safe memory. */
5889 	memcpy(dma->vaddr, fw->main.data, fw->main.datasz);
5890 	bus_dmamap_sync(sc->sc_dmat, dma->map, 0, fw->main.datasz,
5891 	    BUS_DMASYNC_PREWRITE);
5892 	memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
5893 	    fw->main.text, fw->main.textsz);
5894 	bus_dmamap_sync(sc->sc_dmat, dma->map, IWN4965_FW_DATA_MAXSZ,
5895 	    fw->main.textsz, BUS_DMASYNC_PREWRITE);
5896 
5897 	/* Tell adapter where to find runtime sections. */
5898 	if ((error = iwn_nic_lock(sc)) != 0)
5899 		return error;
5900 	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
5901 	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz);
5902 	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
5903 	    (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
5904 	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE,
5905 	    IWN_FW_UPDATED | fw->main.textsz);
5906 	iwn_nic_unlock(sc);
5907 
5908 	return 0;
5909 }
5910 
5911 int
5912 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst,
5913     const uint8_t *section, int size)
5914 {
5915 	struct iwn_dma_info *dma = &sc->fw_dma;
5916 	int error;
5917 
5918 	/* Copy firmware section into pre-allocated DMA-safe memory. */
5919 	memcpy(dma->vaddr, section, size);
5920 	bus_dmamap_sync(sc->sc_dmat, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
5921 
5922 	if ((error = iwn_nic_lock(sc)) != 0)
5923 		return error;
5924 
5925 	IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
5926 	    IWN_FH_TX_CONFIG_DMA_PAUSE);
5927 
5928 	IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst);
5929 	IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL),
5930 	    IWN_LOADDR(dma->paddr));
5931 	IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL),
5932 	    IWN_HIADDR(dma->paddr) << 28 | size);
5933 	IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL),
5934 	    IWN_FH_TXBUF_STATUS_TBNUM(1) |
5935 	    IWN_FH_TXBUF_STATUS_TBIDX(1) |
5936 	    IWN_FH_TXBUF_STATUS_TFBD_VALID);
5937 
5938 	/* Kick Flow Handler to start DMA transfer. */
5939 	IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
5940 	    IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD);
5941 
5942 	iwn_nic_unlock(sc);
5943 
5944 	/* Wait at most five seconds for FH DMA transfer to complete. */
5945 	return tsleep(sc, PCATCH, "iwninit", 5 * hz);
5946 }
5947 
5948 int
5949 iwn5000_load_firmware(struct iwn_softc *sc)
5950 {
5951 	struct iwn_fw_part *fw;
5952 	int error;
5953 
5954 	/* Load the initialization firmware on first boot only. */
5955 	fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ?
5956 	    &sc->fw.main : &sc->fw.init;
5957 
5958 	error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE,
5959 	    fw->text, fw->textsz);
5960 	if (error != 0) {
5961 		printf("%s: could not load firmware %s section\n",
5962 		    sc->sc_dev.dv_xname, ".text");
5963 		return error;
5964 	}
5965 	error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE,
5966 	    fw->data, fw->datasz);
5967 	if (error != 0) {
5968 		printf("%s: could not load firmware %s section\n",
5969 		    sc->sc_dev.dv_xname, ".data");
5970 		return error;
5971 	}
5972 
5973 	/* Now press "execute". */
5974 	IWN_WRITE(sc, IWN_RESET, 0);
5975 	return 0;
5976 }
5977 
5978 /*
5979  * Extract text and data sections from a legacy firmware image.
5980  */
5981 int
5982 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw)
5983 {
5984 	const uint32_t *ptr;
5985 	size_t hdrlen = 24;
5986 	uint32_t rev;
5987 
5988 	ptr = (const uint32_t *)fw->data;
5989 	rev = letoh32(*ptr++);
5990 
5991 	/* Check firmware API version. */
5992 	if (IWN_FW_API(rev) <= 1) {
5993 		printf("%s: bad firmware, need API version >=2\n",
5994 		    sc->sc_dev.dv_xname);
5995 		return EINVAL;
5996 	}
5997 	if (IWN_FW_API(rev) >= 3) {
5998 		/* Skip build number (version 2 header). */
5999 		hdrlen += 4;
6000 		ptr++;
6001 	}
6002 	if (fw->size < hdrlen) {
6003 		printf("%s: firmware too short: %zu bytes\n",
6004 		    sc->sc_dev.dv_xname, fw->size);
6005 		return EINVAL;
6006 	}
6007 	fw->main.textsz = letoh32(*ptr++);
6008 	fw->main.datasz = letoh32(*ptr++);
6009 	fw->init.textsz = letoh32(*ptr++);
6010 	fw->init.datasz = letoh32(*ptr++);
6011 	fw->boot.textsz = letoh32(*ptr++);
6012 
6013 	/* Check that all firmware sections fit. */
6014 	if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz +
6015 	    fw->init.textsz + fw->init.datasz + fw->boot.textsz) {
6016 		printf("%s: firmware too short: %zu bytes\n",
6017 		    sc->sc_dev.dv_xname, fw->size);
6018 		return EINVAL;
6019 	}
6020 
6021 	/* Get pointers to firmware sections. */
6022 	fw->main.text = (const uint8_t *)ptr;
6023 	fw->main.data = fw->main.text + fw->main.textsz;
6024 	fw->init.text = fw->main.data + fw->main.datasz;
6025 	fw->init.data = fw->init.text + fw->init.textsz;
6026 	fw->boot.text = fw->init.data + fw->init.datasz;
6027 	return 0;
6028 }
6029 
6030 /*
6031  * Extract text and data sections from a TLV firmware image.
6032  */
6033 int
6034 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw,
6035     uint16_t alt)
6036 {
6037 	const struct iwn_fw_tlv_hdr *hdr;
6038 	const struct iwn_fw_tlv *tlv;
6039 	const uint8_t *ptr, *end;
6040 	uint64_t altmask;
6041 	uint32_t len;
6042 
6043 	if (fw->size < sizeof (*hdr)) {
6044 		printf("%s: firmware too short: %zu bytes\n",
6045 		    sc->sc_dev.dv_xname, fw->size);
6046 		return EINVAL;
6047 	}
6048 	hdr = (const struct iwn_fw_tlv_hdr *)fw->data;
6049 	if (hdr->signature != htole32(IWN_FW_SIGNATURE)) {
6050 		printf("%s: bad firmware signature 0x%08x\n",
6051 		    sc->sc_dev.dv_xname, letoh32(hdr->signature));
6052 		return EINVAL;
6053 	}
6054 	DPRINTF(("FW: \"%.64s\", build 0x%x\n", hdr->descr,
6055 	    letoh32(hdr->build)));
6056 
6057 	/*
6058 	 * Select the closest supported alternative that is less than
6059 	 * or equal to the specified one.
6060 	 */
6061 	altmask = letoh64(hdr->altmask);
6062 	while (alt > 0 && !(altmask & (1ULL << alt)))
6063 		alt--;	/* Downgrade. */
6064 	DPRINTF(("using alternative %d\n", alt));
6065 
6066 	ptr = (const uint8_t *)(hdr + 1);
6067 	end = (const uint8_t *)(fw->data + fw->size);
6068 
6069 	/* Parse type-length-value fields. */
6070 	while (ptr + sizeof (*tlv) <= end) {
6071 		tlv = (const struct iwn_fw_tlv *)ptr;
6072 		len = letoh32(tlv->len);
6073 
6074 		ptr += sizeof (*tlv);
6075 		if (ptr + len > end) {
6076 			printf("%s: firmware too short: %zu bytes\n",
6077 			    sc->sc_dev.dv_xname, fw->size);
6078 			return EINVAL;
6079 		}
6080 		/* Skip other alternatives. */
6081 		if (tlv->alt != 0 && tlv->alt != htole16(alt))
6082 			goto next;
6083 
6084 		switch (letoh16(tlv->type)) {
6085 		case IWN_FW_TLV_MAIN_TEXT:
6086 			fw->main.text = ptr;
6087 			fw->main.textsz = len;
6088 			break;
6089 		case IWN_FW_TLV_MAIN_DATA:
6090 			fw->main.data = ptr;
6091 			fw->main.datasz = len;
6092 			break;
6093 		case IWN_FW_TLV_INIT_TEXT:
6094 			fw->init.text = ptr;
6095 			fw->init.textsz = len;
6096 			break;
6097 		case IWN_FW_TLV_INIT_DATA:
6098 			fw->init.data = ptr;
6099 			fw->init.datasz = len;
6100 			break;
6101 		case IWN_FW_TLV_BOOT_TEXT:
6102 			fw->boot.text = ptr;
6103 			fw->boot.textsz = len;
6104 			break;
6105 		case IWN_FW_TLV_ENH_SENS:
6106 			if (len !=  0) {
6107 				printf("%s: TLV type %d has invalid size %u\n",
6108 				    sc->sc_dev.dv_xname, letoh16(tlv->type),
6109 				    len);
6110 				goto next;
6111 			}
6112 			sc->sc_flags |= IWN_FLAG_ENH_SENS;
6113 			break;
6114 		case IWN_FW_TLV_PHY_CALIB:
6115 			if (len != sizeof(uint32_t)) {
6116 				printf("%s: TLV type %d has invalid size %u\n",
6117 				    sc->sc_dev.dv_xname, letoh16(tlv->type),
6118 				    len);
6119 				goto next;
6120 			}
6121 			if (letoh32(*ptr) <= IWN5000_PHY_CALIB_MAX) {
6122 				sc->reset_noise_gain = letoh32(*ptr);
6123 				sc->noise_gain = letoh32(*ptr) + 1;
6124 			}
6125 			break;
6126 		case IWN_FW_TLV_FLAGS:
6127 			if (len < sizeof(uint32_t))
6128 				break;
6129 			if (len % sizeof(uint32_t))
6130 				break;
6131 			sc->tlv_feature_flags = letoh32(*ptr);
6132 			DPRINTF(("feature: 0x%08x\n", sc->tlv_feature_flags));
6133 			break;
6134 		default:
6135 			DPRINTF(("TLV type %d not handled\n",
6136 			    letoh16(tlv->type)));
6137 			break;
6138 		}
6139  next:		/* TLV fields are 32-bit aligned. */
6140 		ptr += (len + 3) & ~3;
6141 	}
6142 	return 0;
6143 }
6144 
6145 int
6146 iwn_read_firmware(struct iwn_softc *sc)
6147 {
6148 	struct iwn_fw_info *fw = &sc->fw;
6149 	int error;
6150 
6151 	/*
6152 	 * Some PHY calibration commands are firmware-dependent; these
6153 	 * are the default values that will be overridden if
6154 	 * necessary.
6155 	 */
6156 	sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN;
6157 	sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN;
6158 
6159 	memset(fw, 0, sizeof (*fw));
6160 
6161 	/* Read firmware image from filesystem. */
6162 	if ((error = loadfirmware(sc->fwname, &fw->data, &fw->size)) != 0) {
6163 		printf("%s: could not read firmware %s (error %d)\n",
6164 		    sc->sc_dev.dv_xname, sc->fwname, error);
6165 		return error;
6166 	}
6167 	if (fw->size < sizeof (uint32_t)) {
6168 		printf("%s: firmware too short: %zu bytes\n",
6169 		    sc->sc_dev.dv_xname, fw->size);
6170 		free(fw->data, M_DEVBUF, fw->size);
6171 		return EINVAL;
6172 	}
6173 
6174 	/* Retrieve text and data sections. */
6175 	if (*(const uint32_t *)fw->data != 0)	/* Legacy image. */
6176 		error = iwn_read_firmware_leg(sc, fw);
6177 	else
6178 		error = iwn_read_firmware_tlv(sc, fw, 1);
6179 	if (error != 0) {
6180 		printf("%s: could not read firmware sections\n",
6181 		    sc->sc_dev.dv_xname);
6182 		free(fw->data, M_DEVBUF, fw->size);
6183 		return error;
6184 	}
6185 
6186 	/* Make sure text and data sections fit in hardware memory. */
6187 	if (fw->main.textsz > sc->fw_text_maxsz ||
6188 	    fw->main.datasz > sc->fw_data_maxsz ||
6189 	    fw->init.textsz > sc->fw_text_maxsz ||
6190 	    fw->init.datasz > sc->fw_data_maxsz ||
6191 	    fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ ||
6192 	    (fw->boot.textsz & 3) != 0) {
6193 		printf("%s: firmware sections too large\n",
6194 		    sc->sc_dev.dv_xname);
6195 		free(fw->data, M_DEVBUF, fw->size);
6196 		return EINVAL;
6197 	}
6198 
6199 	/* We can proceed with loading the firmware. */
6200 	return 0;
6201 }
6202 
6203 int
6204 iwn_clock_wait(struct iwn_softc *sc)
6205 {
6206 	int ntries;
6207 
6208 	/* Set "initialization complete" bit. */
6209 	IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
6210 
6211 	/* Wait for clock stabilization. */
6212 	for (ntries = 0; ntries < 2500; ntries++) {
6213 		if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY)
6214 			return 0;
6215 		DELAY(10);
6216 	}
6217 	printf("%s: timeout waiting for clock stabilization\n",
6218 	    sc->sc_dev.dv_xname);
6219 	return ETIMEDOUT;
6220 }
6221 
6222 int
6223 iwn_apm_init(struct iwn_softc *sc)
6224 {
6225 	pcireg_t reg;
6226 	int error;
6227 
6228 	/* Disable L0s exit timer (NMI bug workaround). */
6229 	IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER);
6230 	/* Don't wait for ICH L0s (ICH bug workaround). */
6231 	IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX);
6232 
6233 	/* Set FH wait threshold to max (HW bug under stress workaround). */
6234 	IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000);
6235 
6236 	/* Enable HAP INTA to move adapter from L1a to L0s. */
6237 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A);
6238 
6239 	/* Retrieve PCIe Active State Power Management (ASPM). */
6240 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
6241 	    sc->sc_cap_off + PCI_PCIE_LCSR);
6242 	/* Workaround for HW instability in PCIe L0->L0s->L1 transition. */
6243 	if (reg & PCI_PCIE_LCSR_ASPM_L1)	/* L1 Entry enabled. */
6244 		IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
6245 	else
6246 		IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
6247 
6248 	if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
6249 	    sc->hw_type <= IWN_HW_REV_TYPE_1000)
6250 		IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT);
6251 
6252 	/* Wait for clock stabilization before accessing prph. */
6253 	if ((error = iwn_clock_wait(sc)) != 0)
6254 		return error;
6255 
6256 	if ((error = iwn_nic_lock(sc)) != 0)
6257 		return error;
6258 	if (sc->hw_type == IWN_HW_REV_TYPE_4965) {
6259 		/* Enable DMA and BSM (Bootstrap State Machine). */
6260 		iwn_prph_write(sc, IWN_APMG_CLK_EN,
6261 		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT |
6262 		    IWN_APMG_CLK_CTRL_BSM_CLK_RQT);
6263 	} else {
6264 		/* Enable DMA. */
6265 		iwn_prph_write(sc, IWN_APMG_CLK_EN,
6266 		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
6267 	}
6268 	DELAY(20);
6269 	/* Disable L1-Active. */
6270 	iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS);
6271 	iwn_nic_unlock(sc);
6272 
6273 	return 0;
6274 }
6275 
6276 void
6277 iwn_apm_stop_master(struct iwn_softc *sc)
6278 {
6279 	int ntries;
6280 
6281 	/* Stop busmaster DMA activity. */
6282 	IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER);
6283 	for (ntries = 0; ntries < 100; ntries++) {
6284 		if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED)
6285 			return;
6286 		DELAY(10);
6287 	}
6288 	printf("%s: timeout waiting for master\n", sc->sc_dev.dv_xname);
6289 }
6290 
6291 void
6292 iwn_apm_stop(struct iwn_softc *sc)
6293 {
6294 	iwn_apm_stop_master(sc);
6295 
6296 	/* Reset the entire device. */
6297 	IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW);
6298 	DELAY(10);
6299 	/* Clear "initialization complete" bit. */
6300 	IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
6301 }
6302 
6303 int
6304 iwn4965_nic_config(struct iwn_softc *sc)
6305 {
6306 	if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) {
6307 		/*
6308 		 * I don't believe this to be correct but this is what the
6309 		 * vendor driver is doing. Probably the bits should not be
6310 		 * shifted in IWN_RFCFG_*.
6311 		 */
6312 		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6313 		    IWN_RFCFG_TYPE(sc->rfcfg) |
6314 		    IWN_RFCFG_STEP(sc->rfcfg) |
6315 		    IWN_RFCFG_DASH(sc->rfcfg));
6316 	}
6317 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6318 	    IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
6319 	return 0;
6320 }
6321 
6322 int
6323 iwn5000_nic_config(struct iwn_softc *sc)
6324 {
6325 	uint32_t tmp;
6326 	int error;
6327 
6328 	if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) {
6329 		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6330 		    IWN_RFCFG_TYPE(sc->rfcfg) |
6331 		    IWN_RFCFG_STEP(sc->rfcfg) |
6332 		    IWN_RFCFG_DASH(sc->rfcfg));
6333 	}
6334 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6335 	    IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
6336 
6337 	if ((error = iwn_nic_lock(sc)) != 0)
6338 		return error;
6339 	iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS);
6340 
6341 	if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
6342 		/*
6343 		 * Select first Switching Voltage Regulator (1.32V) to
6344 		 * solve a stability issue related to noisy DC2DC line
6345 		 * in the silicon of 1000 Series.
6346 		 */
6347 		tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR);
6348 		tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK;
6349 		tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32;
6350 		iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp);
6351 	}
6352 	iwn_nic_unlock(sc);
6353 
6354 	if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) {
6355 		/* Use internal power amplifier only. */
6356 		IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA);
6357 	}
6358 	if ((sc->hw_type == IWN_HW_REV_TYPE_6050 ||
6359 	     sc->hw_type == IWN_HW_REV_TYPE_6005) && sc->calib_ver >= 6) {
6360 		/* Indicate that ROM calibration version is >=6. */
6361 		IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6);
6362 	}
6363 	if (sc->hw_type == IWN_HW_REV_TYPE_6005)
6364 		IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_6050_1X2);
6365 	if (sc->hw_type == IWN_HW_REV_TYPE_2030 ||
6366 	    sc->hw_type == IWN_HW_REV_TYPE_2000 ||
6367 	    sc->hw_type == IWN_HW_REV_TYPE_135 ||
6368 	    sc->hw_type == IWN_HW_REV_TYPE_105)
6369 		IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_IQ_INVERT);
6370 	return 0;
6371 }
6372 
6373 /*
6374  * Take NIC ownership over Intel Active Management Technology (AMT).
6375  */
6376 int
6377 iwn_hw_prepare(struct iwn_softc *sc)
6378 {
6379 	int ntries;
6380 
6381 	/* Check if hardware is ready. */
6382 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
6383 	for (ntries = 0; ntries < 5; ntries++) {
6384 		if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
6385 		    IWN_HW_IF_CONFIG_NIC_READY)
6386 			return 0;
6387 		DELAY(10);
6388 	}
6389 
6390 	/* Hardware not ready, force into ready state. */
6391 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE);
6392 	for (ntries = 0; ntries < 15000; ntries++) {
6393 		if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) &
6394 		    IWN_HW_IF_CONFIG_PREPARE_DONE))
6395 			break;
6396 		DELAY(10);
6397 	}
6398 	if (ntries == 15000)
6399 		return ETIMEDOUT;
6400 
6401 	/* Hardware should be ready now. */
6402 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
6403 	for (ntries = 0; ntries < 5; ntries++) {
6404 		if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
6405 		    IWN_HW_IF_CONFIG_NIC_READY)
6406 			return 0;
6407 		DELAY(10);
6408 	}
6409 	return ETIMEDOUT;
6410 }
6411 
6412 int
6413 iwn_hw_init(struct iwn_softc *sc)
6414 {
6415 	struct iwn_ops *ops = &sc->ops;
6416 	int error, chnl, qid;
6417 
6418 	/* Clear pending interrupts. */
6419 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
6420 
6421 	if ((error = iwn_apm_init(sc)) != 0) {
6422 		printf("%s: could not power on adapter\n",
6423 		    sc->sc_dev.dv_xname);
6424 		return error;
6425 	}
6426 
6427 	/* Select VMAIN power source. */
6428 	if ((error = iwn_nic_lock(sc)) != 0)
6429 		return error;
6430 	iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK);
6431 	iwn_nic_unlock(sc);
6432 
6433 	/* Perform adapter-specific initialization. */
6434 	if ((error = ops->nic_config(sc)) != 0)
6435 		return error;
6436 
6437 	/* Initialize RX ring. */
6438 	if ((error = iwn_nic_lock(sc)) != 0)
6439 		return error;
6440 	IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
6441 	IWN_WRITE(sc, IWN_FH_RX_WPTR, 0);
6442 	/* Set physical address of RX ring (256-byte aligned). */
6443 	IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8);
6444 	/* Set physical address of RX status (16-byte aligned). */
6445 	IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4);
6446 	/* Enable RX. */
6447 	IWN_WRITE(sc, IWN_FH_RX_CONFIG,
6448 	    IWN_FH_RX_CONFIG_ENA           |
6449 	    IWN_FH_RX_CONFIG_IGN_RXF_EMPTY |	/* HW bug workaround */
6450 	    IWN_FH_RX_CONFIG_IRQ_DST_HOST  |
6451 	    IWN_FH_RX_CONFIG_SINGLE_FRAME  |
6452 	    IWN_FH_RX_CONFIG_RB_TIMEOUT(0x11) | /* about 1/2 msec */
6453 	    IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG));
6454 	iwn_nic_unlock(sc);
6455 	IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7);
6456 
6457 	if ((error = iwn_nic_lock(sc)) != 0)
6458 		return error;
6459 
6460 	/* Initialize TX scheduler. */
6461 	iwn_prph_write(sc, sc->sched_txfact_addr, 0);
6462 
6463 	/* Set physical address of "keep warm" page (16-byte aligned). */
6464 	IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4);
6465 
6466 	/* Initialize TX rings. */
6467 	for (qid = 0; qid < sc->ntxqs; qid++) {
6468 		struct iwn_tx_ring *txq = &sc->txq[qid];
6469 
6470 		/* Set physical address of TX ring (256-byte aligned). */
6471 		IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid),
6472 		    txq->desc_dma.paddr >> 8);
6473 	}
6474 	iwn_nic_unlock(sc);
6475 
6476 	/* Enable DMA channels. */
6477 	for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
6478 		IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl),
6479 		    IWN_FH_TX_CONFIG_DMA_ENA |
6480 		    IWN_FH_TX_CONFIG_DMA_CREDIT_ENA);
6481 	}
6482 
6483 	/* Clear "radio off" and "commands blocked" bits. */
6484 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
6485 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED);
6486 
6487 	/* Clear pending interrupts. */
6488 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
6489 	/* Enable interrupt coalescing. */
6490 	IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8);
6491 	/* Enable interrupts. */
6492 	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
6493 
6494 	/* _Really_ make sure "radio off" bit is cleared! */
6495 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
6496 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
6497 
6498 	/* Enable shadow registers. */
6499 	if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
6500 		IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff);
6501 
6502 	if ((error = ops->load_firmware(sc)) != 0) {
6503 		printf("%s: could not load firmware\n", sc->sc_dev.dv_xname);
6504 		return error;
6505 	}
6506 	/* Wait at most one second for firmware alive notification. */
6507 	if ((error = tsleep(sc, PCATCH, "iwninit", hz)) != 0) {
6508 		printf("%s: timeout waiting for adapter to initialize\n",
6509 		    sc->sc_dev.dv_xname);
6510 		return error;
6511 	}
6512 	/* Do post-firmware initialization. */
6513 	return ops->post_alive(sc);
6514 }
6515 
6516 void
6517 iwn_hw_stop(struct iwn_softc *sc)
6518 {
6519 	int chnl, qid, ntries;
6520 
6521 	IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO);
6522 
6523 	/* Disable interrupts. */
6524 	IWN_WRITE(sc, IWN_INT_MASK, 0);
6525 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
6526 	IWN_WRITE(sc, IWN_FH_INT, 0xffffffff);
6527 	sc->sc_flags &= ~IWN_FLAG_USE_ICT;
6528 
6529 	/* Make sure we no longer hold the NIC lock. */
6530 	iwn_nic_unlock(sc);
6531 
6532 	/* Stop TX scheduler. */
6533 	iwn_prph_write(sc, sc->sched_txfact_addr, 0);
6534 
6535 	/* Stop all DMA channels. */
6536 	if (iwn_nic_lock(sc) == 0) {
6537 		for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
6538 			IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0);
6539 			for (ntries = 0; ntries < 200; ntries++) {
6540 				if (IWN_READ(sc, IWN_FH_TX_STATUS) &
6541 				    IWN_FH_TX_STATUS_IDLE(chnl))
6542 					break;
6543 				DELAY(10);
6544 			}
6545 		}
6546 		iwn_nic_unlock(sc);
6547 	}
6548 
6549 	/* Stop RX ring. */
6550 	iwn_reset_rx_ring(sc, &sc->rxq);
6551 
6552 	/* Reset all TX rings. */
6553 	for (qid = 0; qid < sc->ntxqs; qid++)
6554 		iwn_reset_tx_ring(sc, &sc->txq[qid]);
6555 
6556 	if (iwn_nic_lock(sc) == 0) {
6557 		iwn_prph_write(sc, IWN_APMG_CLK_DIS,
6558 		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
6559 		iwn_nic_unlock(sc);
6560 	}
6561 	DELAY(5);
6562 	/* Power OFF adapter. */
6563 	iwn_apm_stop(sc);
6564 }
6565 
6566 int
6567 iwn_init(struct ifnet *ifp)
6568 {
6569 	struct iwn_softc *sc = ifp->if_softc;
6570 	struct ieee80211com *ic = &sc->sc_ic;
6571 	int error;
6572 
6573 	memset(sc->bss_node_addr, 0, sizeof(sc->bss_node_addr));
6574 
6575 	if ((error = iwn_hw_prepare(sc)) != 0) {
6576 		printf("%s: hardware not ready\n", sc->sc_dev.dv_xname);
6577 		goto fail;
6578 	}
6579 
6580 	/* Check that the radio is not disabled by hardware switch. */
6581 	if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) {
6582 		printf("%s: radio is disabled by hardware switch\n",
6583 		    sc->sc_dev.dv_xname);
6584 		error = EPERM;	/* :-) */
6585 		goto fail;
6586 	}
6587 
6588 	/* Read firmware images from the filesystem. */
6589 	if ((error = iwn_read_firmware(sc)) != 0) {
6590 		printf("%s: could not read firmware\n", sc->sc_dev.dv_xname);
6591 		goto fail;
6592 	}
6593 
6594 	/* Initialize interrupt mask to default value. */
6595 	sc->int_mask = IWN_INT_MASK_DEF;
6596 	sc->sc_flags &= ~IWN_FLAG_USE_ICT;
6597 
6598 	/* Initialize hardware and upload firmware. */
6599 	error = iwn_hw_init(sc);
6600 	free(sc->fw.data, M_DEVBUF, sc->fw.size);
6601 	if (error != 0) {
6602 		printf("%s: could not initialize hardware\n",
6603 		    sc->sc_dev.dv_xname);
6604 		goto fail;
6605 	}
6606 
6607 	/* Configure adapter now that it is ready. */
6608 	if ((error = iwn_config(sc)) != 0) {
6609 		printf("%s: could not configure device\n",
6610 		    sc->sc_dev.dv_xname);
6611 		goto fail;
6612 	}
6613 
6614 	ifq_clr_oactive(&ifp->if_snd);
6615 	ifp->if_flags |= IFF_RUNNING;
6616 
6617 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
6618 		ieee80211_begin_scan(ifp);
6619 	else
6620 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
6621 
6622 	return 0;
6623 
6624 fail:	iwn_stop(ifp, 1);
6625 	return error;
6626 }
6627 
6628 void
6629 iwn_stop(struct ifnet *ifp, int disable)
6630 {
6631 	struct iwn_softc *sc = ifp->if_softc;
6632 	struct ieee80211com *ic = &sc->sc_ic;
6633 
6634 	timeout_del(&sc->calib_to);
6635 	ifp->if_timer = sc->sc_tx_timer = 0;
6636 	ifp->if_flags &= ~IFF_RUNNING;
6637 	ifq_clr_oactive(&ifp->if_snd);
6638 
6639 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
6640 
6641 	/* Power OFF hardware. */
6642 	iwn_hw_stop(sc);
6643 }
6644