xref: /openbsd-src/sys/dev/pci/if_iwn.c (revision de8cc8edbc71bd3e3bc7fbffa27ba0e564c37d8b)
1 /*	$OpenBSD: if_iwn.c,v 1.244 2020/12/12 11:48:53 jan Exp $	*/
2 
3 /*-
4  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network
21  * adapters.
22  */
23 
24 #include "bpfilter.h"
25 
26 #include <sys/param.h>
27 #include <sys/sockio.h>
28 #include <sys/mbuf.h>
29 #include <sys/kernel.h>
30 #include <sys/rwlock.h>
31 #include <sys/socket.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/conf.h>
35 #include <sys/device.h>
36 #include <sys/task.h>
37 #include <sys/endian.h>
38 
39 #include <machine/bus.h>
40 #include <machine/intr.h>
41 
42 #include <dev/pci/pcireg.h>
43 #include <dev/pci/pcivar.h>
44 #include <dev/pci/pcidevs.h>
45 
46 #if NBPFILTER > 0
47 #include <net/bpf.h>
48 #endif
49 #include <net/if.h>
50 #include <net/if_dl.h>
51 #include <net/if_media.h>
52 
53 #include <netinet/in.h>
54 #include <netinet/if_ether.h>
55 
56 #include <net80211/ieee80211_var.h>
57 #include <net80211/ieee80211_amrr.h>
58 #include <net80211/ieee80211_mira.h>
59 #include <net80211/ieee80211_radiotap.h>
60 #include <net80211/ieee80211_priv.h> /* for SEQ_LT */
61 #undef DPRINTF /* defined in ieee80211_priv.h */
62 
63 #include <dev/pci/if_iwnreg.h>
64 #include <dev/pci/if_iwnvar.h>
65 
66 static const struct pci_matchid iwn_devices[] = {
67 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_4965_1 },
68 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_4965_2 },
69 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5100_1 },
70 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5100_2 },
71 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5150_1 },
72 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5150_2 },
73 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5300_1 },
74 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5300_2 },
75 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5350_1 },
76 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5350_2 },
77 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_1000_1 },
78 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_1000_2 },
79 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6300_1 },
80 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6300_2 },
81 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6200_1 },
82 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6200_2 },
83 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6050_1 },
84 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6050_2 },
85 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6005_1 },
86 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6005_2 },
87 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6030_1 },
88 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6030_2 },
89 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_1030_1 },
90 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_1030_2 },
91 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_100_1 },
92 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_100_2 },
93 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_130_1 },
94 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_130_2 },
95 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6235_1 },
96 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6235_2 },
97 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_2230_1 },
98 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_2230_2 },
99 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_2200_1 },
100 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_2200_2 },
101 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_135_1 },
102 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_135_2 },
103 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_105_1 },
104 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_105_2 },
105 };
106 
107 int		iwn_match(struct device *, void *, void *);
108 void		iwn_attach(struct device *, struct device *, void *);
109 int		iwn4965_attach(struct iwn_softc *, pci_product_id_t);
110 int		iwn5000_attach(struct iwn_softc *, pci_product_id_t);
111 #if NBPFILTER > 0
112 void		iwn_radiotap_attach(struct iwn_softc *);
113 #endif
114 int		iwn_detach(struct device *, int);
115 int		iwn_activate(struct device *, int);
116 void		iwn_wakeup(struct iwn_softc *);
117 void		iwn_init_task(void *);
118 int		iwn_nic_lock(struct iwn_softc *);
119 int		iwn_eeprom_lock(struct iwn_softc *);
120 int		iwn_init_otprom(struct iwn_softc *);
121 int		iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int);
122 int		iwn_dma_contig_alloc(bus_dma_tag_t, struct iwn_dma_info *,
123 		    void **, bus_size_t, bus_size_t);
124 void		iwn_dma_contig_free(struct iwn_dma_info *);
125 int		iwn_alloc_sched(struct iwn_softc *);
126 void		iwn_free_sched(struct iwn_softc *);
127 int		iwn_alloc_kw(struct iwn_softc *);
128 void		iwn_free_kw(struct iwn_softc *);
129 int		iwn_alloc_ict(struct iwn_softc *);
130 void		iwn_free_ict(struct iwn_softc *);
131 int		iwn_alloc_fwmem(struct iwn_softc *);
132 void		iwn_free_fwmem(struct iwn_softc *);
133 int		iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
134 void		iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
135 void		iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
136 int		iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *,
137 		    int);
138 void		iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
139 void		iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
140 void		iwn5000_ict_reset(struct iwn_softc *);
141 int		iwn_read_eeprom(struct iwn_softc *);
142 void		iwn4965_read_eeprom(struct iwn_softc *);
143 void		iwn4965_print_power_group(struct iwn_softc *, int);
144 void		iwn5000_read_eeprom(struct iwn_softc *);
145 void		iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t);
146 void		iwn_read_eeprom_enhinfo(struct iwn_softc *);
147 struct		ieee80211_node *iwn_node_alloc(struct ieee80211com *);
148 void		iwn_newassoc(struct ieee80211com *, struct ieee80211_node *,
149 		    int);
150 int		iwn_media_change(struct ifnet *);
151 int		iwn_newstate(struct ieee80211com *, enum ieee80211_state, int);
152 void		iwn_iter_func(void *, struct ieee80211_node *);
153 void		iwn_calib_timeout(void *);
154 int		iwn_ccmp_decap(struct iwn_softc *, struct mbuf *,
155 		    struct ieee80211_node *);
156 void		iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *,
157 		    struct iwn_rx_data *);
158 void		iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *,
159 		    struct iwn_rx_data *, struct mbuf_list *);
160 void		iwn_mira_choose(struct iwn_softc *, struct ieee80211_node *);
161 void		iwn_ampdu_rate_control(struct iwn_softc *, struct ieee80211_node *,
162 		    struct iwn_tx_ring *, int, uint16_t, uint16_t);
163 void		iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *,
164 		    struct iwn_rx_data *);
165 void		iwn5000_rx_calib_results(struct iwn_softc *,
166 		    struct iwn_rx_desc *, struct iwn_rx_data *);
167 void		iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *,
168 		    struct iwn_rx_data *);
169 void		iwn_ampdu_txq_advance(struct iwn_softc *, struct iwn_tx_ring *,
170 		    int, int);
171 void		iwn_ampdu_tx_done(struct iwn_softc *, struct iwn_tx_ring *,
172 		    struct iwn_rx_desc *, uint16_t, uint8_t, uint8_t, uint8_t,
173 		    int, uint32_t, struct iwn_txagg_status *);
174 void		iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
175 		    struct iwn_rx_data *);
176 void		iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
177 		    struct iwn_rx_data *);
178 void		iwn_tx_done_free_txdata(struct iwn_softc *,
179 		    struct iwn_tx_data *);
180 void		iwn_clear_oactive(struct iwn_softc *, struct iwn_tx_ring *);
181 void		iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
182 		    uint8_t, uint8_t, int, int, uint16_t);
183 void		iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *);
184 void		iwn_notif_intr(struct iwn_softc *);
185 void		iwn_wakeup_intr(struct iwn_softc *);
186 void		iwn_fatal_intr(struct iwn_softc *);
187 int		iwn_intr(void *);
188 void		iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t,
189 		    uint16_t);
190 void		iwn4965_reset_sched(struct iwn_softc *, int, int);
191 void		iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t,
192 		    uint16_t);
193 void		iwn5000_reset_sched(struct iwn_softc *, int, int);
194 int		iwn_tx(struct iwn_softc *, struct mbuf *,
195 		    struct ieee80211_node *);
196 int		iwn_rval2ridx(int);
197 void		iwn_start(struct ifnet *);
198 void		iwn_watchdog(struct ifnet *);
199 int		iwn_ioctl(struct ifnet *, u_long, caddr_t);
200 int		iwn_cmd(struct iwn_softc *, int, const void *, int, int);
201 int		iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *,
202 		    int);
203 int		iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *,
204 		    int);
205 int		iwn_set_link_quality(struct iwn_softc *,
206 		    struct ieee80211_node *);
207 int		iwn_add_broadcast_node(struct iwn_softc *, int, int);
208 void		iwn_updateedca(struct ieee80211com *);
209 void		iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t);
210 int		iwn_set_critical_temp(struct iwn_softc *);
211 int		iwn_set_timing(struct iwn_softc *, struct ieee80211_node *);
212 void		iwn4965_power_calibration(struct iwn_softc *, int);
213 int		iwn4965_set_txpower(struct iwn_softc *, int);
214 int		iwn5000_set_txpower(struct iwn_softc *, int);
215 int		iwn4965_get_rssi(const struct iwn_rx_stat *);
216 int		iwn5000_get_rssi(const struct iwn_rx_stat *);
217 int		iwn_get_noise(const struct iwn_rx_general_stats *);
218 int		iwn4965_get_temperature(struct iwn_softc *);
219 int		iwn5000_get_temperature(struct iwn_softc *);
220 int		iwn_init_sensitivity(struct iwn_softc *);
221 void		iwn_collect_noise(struct iwn_softc *,
222 		    const struct iwn_rx_general_stats *);
223 int		iwn4965_init_gains(struct iwn_softc *);
224 int		iwn5000_init_gains(struct iwn_softc *);
225 int		iwn4965_set_gains(struct iwn_softc *);
226 int		iwn5000_set_gains(struct iwn_softc *);
227 void		iwn_tune_sensitivity(struct iwn_softc *,
228 		    const struct iwn_rx_stats *);
229 int		iwn_send_sensitivity(struct iwn_softc *);
230 int		iwn_set_pslevel(struct iwn_softc *, int, int, int);
231 int		iwn_send_temperature_offset(struct iwn_softc *);
232 int		iwn_send_btcoex(struct iwn_softc *);
233 int		iwn_send_advanced_btcoex(struct iwn_softc *);
234 int		iwn5000_runtime_calib(struct iwn_softc *);
235 int		iwn_config(struct iwn_softc *);
236 uint16_t	iwn_get_active_dwell_time(struct iwn_softc *, uint16_t, uint8_t);
237 uint16_t	iwn_limit_dwell(struct iwn_softc *, uint16_t);
238 uint16_t	iwn_get_passive_dwell_time(struct iwn_softc *, uint16_t);
239 int		iwn_scan(struct iwn_softc *, uint16_t, int);
240 void		iwn_scan_abort(struct iwn_softc *);
241 int		iwn_bgscan(struct ieee80211com *);
242 int		iwn_auth(struct iwn_softc *, int);
243 int		iwn_run(struct iwn_softc *);
244 int		iwn_set_key(struct ieee80211com *, struct ieee80211_node *,
245 		    struct ieee80211_key *);
246 void		iwn_delete_key(struct ieee80211com *, struct ieee80211_node *,
247 		    struct ieee80211_key *);
248 void		iwn_update_htprot(struct ieee80211com *,
249 		    struct ieee80211_node *);
250 int		iwn_ampdu_rx_start(struct ieee80211com *,
251 		    struct ieee80211_node *, uint8_t);
252 void		iwn_ampdu_rx_stop(struct ieee80211com *,
253 		    struct ieee80211_node *, uint8_t);
254 int		iwn_ampdu_tx_start(struct ieee80211com *,
255 		    struct ieee80211_node *, uint8_t);
256 void		iwn_ampdu_tx_stop(struct ieee80211com *,
257 		    struct ieee80211_node *, uint8_t);
258 void		iwn4965_ampdu_tx_start(struct iwn_softc *,
259 		    struct ieee80211_node *, uint8_t, uint16_t);
260 void		iwn4965_ampdu_tx_stop(struct iwn_softc *,
261 		    uint8_t, uint16_t);
262 void		iwn5000_ampdu_tx_start(struct iwn_softc *,
263 		    struct ieee80211_node *, uint8_t, uint16_t);
264 void		iwn5000_ampdu_tx_stop(struct iwn_softc *,
265 		    uint8_t, uint16_t);
266 int		iwn5000_query_calibration(struct iwn_softc *);
267 int		iwn5000_send_calibration(struct iwn_softc *);
268 int		iwn5000_send_wimax_coex(struct iwn_softc *);
269 int		iwn5000_crystal_calib(struct iwn_softc *);
270 int		iwn6000_temp_offset_calib(struct iwn_softc *);
271 int		iwn2000_temp_offset_calib(struct iwn_softc *);
272 int		iwn4965_post_alive(struct iwn_softc *);
273 int		iwn5000_post_alive(struct iwn_softc *);
274 int		iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *,
275 		    int);
276 int		iwn4965_load_firmware(struct iwn_softc *);
277 int		iwn5000_load_firmware_section(struct iwn_softc *, uint32_t,
278 		    const uint8_t *, int);
279 int		iwn5000_load_firmware(struct iwn_softc *);
280 int		iwn_read_firmware_leg(struct iwn_softc *,
281 		    struct iwn_fw_info *);
282 int		iwn_read_firmware_tlv(struct iwn_softc *,
283 		    struct iwn_fw_info *, uint16_t);
284 int		iwn_read_firmware(struct iwn_softc *);
285 int		iwn_clock_wait(struct iwn_softc *);
286 int		iwn_apm_init(struct iwn_softc *);
287 void		iwn_apm_stop_master(struct iwn_softc *);
288 void		iwn_apm_stop(struct iwn_softc *);
289 int		iwn4965_nic_config(struct iwn_softc *);
290 int		iwn5000_nic_config(struct iwn_softc *);
291 int		iwn_hw_prepare(struct iwn_softc *);
292 int		iwn_hw_init(struct iwn_softc *);
293 void		iwn_hw_stop(struct iwn_softc *);
294 int		iwn_init(struct ifnet *);
295 void		iwn_stop(struct ifnet *);
296 
297 #ifdef IWN_DEBUG
298 #define DPRINTF(x)	do { if (iwn_debug > 0) printf x; } while (0)
299 #define DPRINTFN(n, x)	do { if (iwn_debug >= (n)) printf x; } while (0)
300 int iwn_debug = 1;
301 #else
302 #define DPRINTF(x)
303 #define DPRINTFN(n, x)
304 #endif
305 
306 struct cfdriver iwn_cd = {
307 	NULL, "iwn", DV_IFNET
308 };
309 
310 struct cfattach iwn_ca = {
311 	sizeof (struct iwn_softc), iwn_match, iwn_attach, iwn_detach,
312 	iwn_activate
313 };
314 
315 int
316 iwn_match(struct device *parent, void *match, void *aux)
317 {
318 	return pci_matchbyid((struct pci_attach_args *)aux, iwn_devices,
319 	    nitems(iwn_devices));
320 }
321 
322 void
323 iwn_attach(struct device *parent, struct device *self, void *aux)
324 {
325 	struct iwn_softc *sc = (struct iwn_softc *)self;
326 	struct ieee80211com *ic = &sc->sc_ic;
327 	struct ifnet *ifp = &ic->ic_if;
328 	struct pci_attach_args *pa = aux;
329 	const char *intrstr;
330 	pci_intr_handle_t ih;
331 	pcireg_t memtype, reg;
332 	int i, error;
333 
334 	sc->sc_pct = pa->pa_pc;
335 	sc->sc_pcitag = pa->pa_tag;
336 	sc->sc_dmat = pa->pa_dmat;
337 
338 	/*
339 	 * Get the offset of the PCI Express Capability Structure in PCI
340 	 * Configuration Space.
341 	 */
342 	error = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
343 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
344 	if (error == 0) {
345 		printf(": PCIe capability structure not found!\n");
346 		return;
347 	}
348 
349 	/* Clear device-specific "PCI retry timeout" register (41h). */
350 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
351 	if (reg & 0xff00)
352 		pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
353 
354 	/* Hardware bug workaround. */
355 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
356 	if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
357 		DPRINTF(("PCIe INTx Disable set\n"));
358 		reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
359 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
360 		    PCI_COMMAND_STATUS_REG, reg);
361 	}
362 
363 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IWN_PCI_BAR0);
364 	error = pci_mapreg_map(pa, IWN_PCI_BAR0, memtype, 0, &sc->sc_st,
365 	    &sc->sc_sh, NULL, &sc->sc_sz, 0);
366 	if (error != 0) {
367 		printf(": can't map mem space\n");
368 		return;
369 	}
370 
371 	/* Install interrupt handler. */
372 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
373 		printf(": can't map interrupt\n");
374 		return;
375 	}
376 	intrstr = pci_intr_string(sc->sc_pct, ih);
377 	sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET, iwn_intr, sc,
378 	    sc->sc_dev.dv_xname);
379 	if (sc->sc_ih == NULL) {
380 		printf(": can't establish interrupt");
381 		if (intrstr != NULL)
382 			printf(" at %s", intrstr);
383 		printf("\n");
384 		return;
385 	}
386 	printf(": %s", intrstr);
387 
388 	/* Read hardware revision and attach. */
389 	sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> 4) & 0x1f;
390 	if (sc->hw_type == IWN_HW_REV_TYPE_4965)
391 		error = iwn4965_attach(sc, PCI_PRODUCT(pa->pa_id));
392 	else
393 		error = iwn5000_attach(sc, PCI_PRODUCT(pa->pa_id));
394 	if (error != 0) {
395 		printf(": could not attach device\n");
396 		return;
397 	}
398 
399 	if ((error = iwn_hw_prepare(sc)) != 0) {
400 		printf(": hardware not ready\n");
401 		return;
402 	}
403 
404 	/* Read MAC address, channels, etc from EEPROM. */
405 	if ((error = iwn_read_eeprom(sc)) != 0) {
406 		printf(": could not read EEPROM\n");
407 		return;
408 	}
409 
410 	/* Allocate DMA memory for firmware transfers. */
411 	if ((error = iwn_alloc_fwmem(sc)) != 0) {
412 		printf(": could not allocate memory for firmware\n");
413 		return;
414 	}
415 
416 	/* Allocate "Keep Warm" page. */
417 	if ((error = iwn_alloc_kw(sc)) != 0) {
418 		printf(": could not allocate keep warm page\n");
419 		goto fail1;
420 	}
421 
422 	/* Allocate ICT table for 5000 Series. */
423 	if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
424 	    (error = iwn_alloc_ict(sc)) != 0) {
425 		printf(": could not allocate ICT table\n");
426 		goto fail2;
427 	}
428 
429 	/* Allocate TX scheduler "rings". */
430 	if ((error = iwn_alloc_sched(sc)) != 0) {
431 		printf(": could not allocate TX scheduler rings\n");
432 		goto fail3;
433 	}
434 
435 	/* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */
436 	for (i = 0; i < sc->ntxqs; i++) {
437 		if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) {
438 			printf(": could not allocate TX ring %d\n", i);
439 			goto fail4;
440 		}
441 	}
442 
443 	/* Allocate RX ring. */
444 	if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) {
445 		printf(": could not allocate RX ring\n");
446 		goto fail4;
447 	}
448 
449 	/* Clear pending interrupts. */
450 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
451 
452 	/* Count the number of available chains. */
453 	sc->ntxchains =
454 	    ((sc->txchainmask >> 2) & 1) +
455 	    ((sc->txchainmask >> 1) & 1) +
456 	    ((sc->txchainmask >> 0) & 1);
457 	sc->nrxchains =
458 	    ((sc->rxchainmask >> 2) & 1) +
459 	    ((sc->rxchainmask >> 1) & 1) +
460 	    ((sc->rxchainmask >> 0) & 1);
461 	printf(", MIMO %dT%dR, %.4s, address %s\n", sc->ntxchains,
462 	    sc->nrxchains, sc->eeprom_domain, ether_sprintf(ic->ic_myaddr));
463 
464 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
465 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
466 	ic->ic_state = IEEE80211_S_INIT;
467 
468 	/* Set device capabilities. */
469 	ic->ic_caps =
470 	    IEEE80211_C_WEP |		/* WEP */
471 	    IEEE80211_C_RSN |		/* WPA/RSN */
472 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
473 	    IEEE80211_C_SCANALLBAND |	/* driver scans all bands at once */
474 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
475 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
476 	    IEEE80211_C_SHPREAMBLE |	/* short preamble supported */
477 	    IEEE80211_C_PMGT;		/* power saving supported */
478 
479 	/* No optional HT features supported for now, */
480 	ic->ic_htcaps = 0;
481 	ic->ic_htxcaps = 0;
482 	ic->ic_txbfcaps = 0;
483 	ic->ic_aselcaps = 0;
484 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
485 	if (sc->sc_flags & IWN_FLAG_HAS_11N) {
486 		ic->ic_caps |= (IEEE80211_C_QOS | IEEE80211_C_TX_AMPDU);
487 		/* Set HT capabilities. */
488 		ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
489 #ifdef notyet
490 		ic->ic_htcaps |=
491 #if IWN_RBUF_SIZE == 8192
492 		    IEEE80211_HTCAP_AMSDU7935 |
493 #endif
494 		    IEEE80211_HTCAP_CBW20_40 |
495 		    IEEE80211_HTCAP_SGI40;
496 		if (sc->hw_type != IWN_HW_REV_TYPE_4965)
497 			ic->ic_htcaps |= IEEE80211_HTCAP_GF;
498 		if (sc->hw_type == IWN_HW_REV_TYPE_6050)
499 			ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DYN;
500 		else
501 			ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DIS;
502 #endif	/* notyet */
503 	}
504 
505 	/* Set supported legacy rates. */
506 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
507 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
508 	if (sc->sc_flags & IWN_FLAG_HAS_5GHZ) {
509 		ic->ic_sup_rates[IEEE80211_MODE_11A] =
510 		    ieee80211_std_rateset_11a;
511 	}
512 	if (sc->sc_flags & IWN_FLAG_HAS_11N) {
513 		/* Set supported HT rates. */
514 		ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
515 #ifdef notyet
516 		if (sc->nrxchains > 1)
517 			ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
518 		if (sc->nrxchains > 2)
519 			ic->ic_sup_mcs[2] = 0xff;	/* MCS 16-23 */
520 #endif
521 	}
522 
523 	/* IBSS channel undefined for now. */
524 	ic->ic_ibss_chan = &ic->ic_channels[0];
525 
526 	ifp->if_softc = sc;
527 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
528 	ifp->if_ioctl = iwn_ioctl;
529 	ifp->if_start = iwn_start;
530 	ifp->if_watchdog = iwn_watchdog;
531 	memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
532 
533 	if_attach(ifp);
534 	ieee80211_ifattach(ifp);
535 	ic->ic_node_alloc = iwn_node_alloc;
536 	ic->ic_bgscan_start = iwn_bgscan;
537 	ic->ic_newassoc = iwn_newassoc;
538 	ic->ic_updateedca = iwn_updateedca;
539 	ic->ic_set_key = iwn_set_key;
540 	ic->ic_delete_key = iwn_delete_key;
541 	ic->ic_update_htprot = iwn_update_htprot;
542 	ic->ic_ampdu_rx_start = iwn_ampdu_rx_start;
543 	ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop;
544 	ic->ic_ampdu_tx_start = iwn_ampdu_tx_start;
545 	ic->ic_ampdu_tx_stop = iwn_ampdu_tx_stop;
546 
547 	/* Override 802.11 state transition machine. */
548 	sc->sc_newstate = ic->ic_newstate;
549 	ic->ic_newstate = iwn_newstate;
550 	ieee80211_media_init(ifp, iwn_media_change, ieee80211_media_status);
551 
552 	sc->amrr.amrr_min_success_threshold =  1;
553 	sc->amrr.amrr_max_success_threshold = 15;
554 
555 #if NBPFILTER > 0
556 	iwn_radiotap_attach(sc);
557 #endif
558 	timeout_set(&sc->calib_to, iwn_calib_timeout, sc);
559 	rw_init(&sc->sc_rwlock, "iwnlock");
560 	task_set(&sc->init_task, iwn_init_task, sc);
561 	return;
562 
563 	/* Free allocated memory if something failed during attachment. */
564 fail4:	while (--i >= 0)
565 		iwn_free_tx_ring(sc, &sc->txq[i]);
566 	iwn_free_sched(sc);
567 fail3:	if (sc->ict != NULL)
568 		iwn_free_ict(sc);
569 fail2:	iwn_free_kw(sc);
570 fail1:	iwn_free_fwmem(sc);
571 }
572 
573 int
574 iwn4965_attach(struct iwn_softc *sc, pci_product_id_t pid)
575 {
576 	struct iwn_ops *ops = &sc->ops;
577 
578 	ops->load_firmware = iwn4965_load_firmware;
579 	ops->read_eeprom = iwn4965_read_eeprom;
580 	ops->post_alive = iwn4965_post_alive;
581 	ops->nic_config = iwn4965_nic_config;
582 	ops->reset_sched = iwn4965_reset_sched;
583 	ops->update_sched = iwn4965_update_sched;
584 	ops->get_temperature = iwn4965_get_temperature;
585 	ops->get_rssi = iwn4965_get_rssi;
586 	ops->set_txpower = iwn4965_set_txpower;
587 	ops->init_gains = iwn4965_init_gains;
588 	ops->set_gains = iwn4965_set_gains;
589 	ops->add_node = iwn4965_add_node;
590 	ops->tx_done = iwn4965_tx_done;
591 	ops->ampdu_tx_start = iwn4965_ampdu_tx_start;
592 	ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop;
593 	sc->ntxqs = IWN4965_NTXQUEUES;
594 	sc->first_agg_txq = IWN4965_FIRST_AGG_TXQUEUE;
595 	sc->ndmachnls = IWN4965_NDMACHNLS;
596 	sc->broadcast_id = IWN4965_ID_BROADCAST;
597 	sc->rxonsz = IWN4965_RXONSZ;
598 	sc->schedsz = IWN4965_SCHEDSZ;
599 	sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ;
600 	sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ;
601 	sc->fwsz = IWN4965_FWSZ;
602 	sc->sched_txfact_addr = IWN4965_SCHED_TXFACT;
603 	sc->limits = &iwn4965_sensitivity_limits;
604 	sc->fwname = "iwn-4965";
605 	/* Override chains masks, ROM is known to be broken. */
606 	sc->txchainmask = IWN_ANT_AB;
607 	sc->rxchainmask = IWN_ANT_ABC;
608 
609 	return 0;
610 }
611 
612 int
613 iwn5000_attach(struct iwn_softc *sc, pci_product_id_t pid)
614 {
615 	struct iwn_ops *ops = &sc->ops;
616 
617 	ops->load_firmware = iwn5000_load_firmware;
618 	ops->read_eeprom = iwn5000_read_eeprom;
619 	ops->post_alive = iwn5000_post_alive;
620 	ops->nic_config = iwn5000_nic_config;
621 	ops->reset_sched = iwn5000_reset_sched;
622 	ops->update_sched = iwn5000_update_sched;
623 	ops->get_temperature = iwn5000_get_temperature;
624 	ops->get_rssi = iwn5000_get_rssi;
625 	ops->set_txpower = iwn5000_set_txpower;
626 	ops->init_gains = iwn5000_init_gains;
627 	ops->set_gains = iwn5000_set_gains;
628 	ops->add_node = iwn5000_add_node;
629 	ops->tx_done = iwn5000_tx_done;
630 	ops->ampdu_tx_start = iwn5000_ampdu_tx_start;
631 	ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop;
632 	sc->ntxqs = IWN5000_NTXQUEUES;
633 	sc->first_agg_txq = IWN5000_FIRST_AGG_TXQUEUE;
634 	sc->ndmachnls = IWN5000_NDMACHNLS;
635 	sc->broadcast_id = IWN5000_ID_BROADCAST;
636 	sc->rxonsz = IWN5000_RXONSZ;
637 	sc->schedsz = IWN5000_SCHEDSZ;
638 	sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ;
639 	sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ;
640 	sc->fwsz = IWN5000_FWSZ;
641 	sc->sched_txfact_addr = IWN5000_SCHED_TXFACT;
642 
643 	switch (sc->hw_type) {
644 	case IWN_HW_REV_TYPE_5100:
645 		sc->limits = &iwn5000_sensitivity_limits;
646 		sc->fwname = "iwn-5000";
647 		/* Override chains masks, ROM is known to be broken. */
648 		sc->txchainmask = IWN_ANT_B;
649 		sc->rxchainmask = IWN_ANT_AB;
650 		break;
651 	case IWN_HW_REV_TYPE_5150:
652 		sc->limits = &iwn5150_sensitivity_limits;
653 		sc->fwname = "iwn-5150";
654 		break;
655 	case IWN_HW_REV_TYPE_5300:
656 	case IWN_HW_REV_TYPE_5350:
657 		sc->limits = &iwn5000_sensitivity_limits;
658 		sc->fwname = "iwn-5000";
659 		break;
660 	case IWN_HW_REV_TYPE_1000:
661 		sc->limits = &iwn1000_sensitivity_limits;
662 		sc->fwname = "iwn-1000";
663 		break;
664 	case IWN_HW_REV_TYPE_6000:
665 		sc->limits = &iwn6000_sensitivity_limits;
666 		sc->fwname = "iwn-6000";
667 		if (pid == PCI_PRODUCT_INTEL_WL_6200_1 ||
668 		    pid == PCI_PRODUCT_INTEL_WL_6200_2) {
669 			sc->sc_flags |= IWN_FLAG_INTERNAL_PA;
670 			/* Override chains masks, ROM is known to be broken. */
671 			sc->txchainmask = IWN_ANT_BC;
672 			sc->rxchainmask = IWN_ANT_BC;
673 		}
674 		break;
675 	case IWN_HW_REV_TYPE_6050:
676 		sc->limits = &iwn6000_sensitivity_limits;
677 		sc->fwname = "iwn-6050";
678 		break;
679 	case IWN_HW_REV_TYPE_6005:
680 		sc->limits = &iwn6000_sensitivity_limits;
681 		if (pid != PCI_PRODUCT_INTEL_WL_6005_1 &&
682 		    pid != PCI_PRODUCT_INTEL_WL_6005_2) {
683 			sc->fwname = "iwn-6030";
684 			sc->sc_flags |= IWN_FLAG_ADV_BT_COEX;
685 		} else
686 			sc->fwname = "iwn-6005";
687 		break;
688 	case IWN_HW_REV_TYPE_2030:
689 		sc->limits = &iwn2000_sensitivity_limits;
690 		sc->fwname = "iwn-2030";
691 		sc->sc_flags |= IWN_FLAG_ADV_BT_COEX;
692 		break;
693 	case IWN_HW_REV_TYPE_2000:
694 		sc->limits = &iwn2000_sensitivity_limits;
695 		sc->fwname = "iwn-2000";
696 		break;
697 	case IWN_HW_REV_TYPE_135:
698 		sc->limits = &iwn2000_sensitivity_limits;
699 		sc->fwname = "iwn-135";
700 		sc->sc_flags |= IWN_FLAG_ADV_BT_COEX;
701 		break;
702 	case IWN_HW_REV_TYPE_105:
703 		sc->limits = &iwn2000_sensitivity_limits;
704 		sc->fwname = "iwn-105";
705 		break;
706 	default:
707 		printf(": adapter type %d not supported\n", sc->hw_type);
708 		return ENOTSUP;
709 	}
710 	return 0;
711 }
712 
713 #if NBPFILTER > 0
714 /*
715  * Attach the interface to 802.11 radiotap.
716  */
717 void
718 iwn_radiotap_attach(struct iwn_softc *sc)
719 {
720 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
721 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
722 
723 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
724 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
725 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWN_RX_RADIOTAP_PRESENT);
726 
727 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
728 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
729 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWN_TX_RADIOTAP_PRESENT);
730 }
731 #endif
732 
733 int
734 iwn_detach(struct device *self, int flags)
735 {
736 	struct iwn_softc *sc = (struct iwn_softc *)self;
737 	struct ifnet *ifp = &sc->sc_ic.ic_if;
738 	int qid;
739 
740 	timeout_del(&sc->calib_to);
741 	task_del(systq, &sc->init_task);
742 
743 	/* Uninstall interrupt handler. */
744 	if (sc->sc_ih != NULL)
745 		pci_intr_disestablish(sc->sc_pct, sc->sc_ih);
746 
747 	/* Free DMA resources. */
748 	iwn_free_rx_ring(sc, &sc->rxq);
749 	for (qid = 0; qid < sc->ntxqs; qid++)
750 		iwn_free_tx_ring(sc, &sc->txq[qid]);
751 	iwn_free_sched(sc);
752 	iwn_free_kw(sc);
753 	if (sc->ict != NULL)
754 		iwn_free_ict(sc);
755 	iwn_free_fwmem(sc);
756 
757 	bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz);
758 
759 	ieee80211_ifdetach(ifp);
760 	if_detach(ifp);
761 
762 	return 0;
763 }
764 
765 int
766 iwn_activate(struct device *self, int act)
767 {
768 	struct iwn_softc *sc = (struct iwn_softc *)self;
769 	struct ifnet *ifp = &sc->sc_ic.ic_if;
770 
771 	switch (act) {
772 	case DVACT_SUSPEND:
773 		if (ifp->if_flags & IFF_RUNNING)
774 			iwn_stop(ifp);
775 		break;
776 	case DVACT_WAKEUP:
777 		iwn_wakeup(sc);
778 		break;
779 	}
780 
781 	return 0;
782 }
783 
784 void
785 iwn_wakeup(struct iwn_softc *sc)
786 {
787 	pcireg_t reg;
788 
789 	/* Clear device-specific "PCI retry timeout" register (41h). */
790 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
791 	if (reg & 0xff00)
792 		pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
793 	iwn_init_task(sc);
794 }
795 
796 void
797 iwn_init_task(void *arg1)
798 {
799 	struct iwn_softc *sc = arg1;
800 	struct ifnet *ifp = &sc->sc_ic.ic_if;
801 	int s;
802 
803 	rw_enter_write(&sc->sc_rwlock);
804 	s = splnet();
805 
806 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
807 		iwn_init(ifp);
808 
809 	splx(s);
810 	rw_exit_write(&sc->sc_rwlock);
811 }
812 
813 int
814 iwn_nic_lock(struct iwn_softc *sc)
815 {
816 	int ntries;
817 
818 	/* Request exclusive access to NIC. */
819 	IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
820 
821 	/* Spin until we actually get the lock. */
822 	for (ntries = 0; ntries < 1000; ntries++) {
823 		if ((IWN_READ(sc, IWN_GP_CNTRL) &
824 		     (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) ==
825 		    IWN_GP_CNTRL_MAC_ACCESS_ENA)
826 			return 0;
827 		DELAY(10);
828 	}
829 	return ETIMEDOUT;
830 }
831 
832 static __inline void
833 iwn_nic_unlock(struct iwn_softc *sc)
834 {
835 	IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
836 }
837 
838 static __inline uint32_t
839 iwn_prph_read(struct iwn_softc *sc, uint32_t addr)
840 {
841 	IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr);
842 	IWN_BARRIER_READ_WRITE(sc);
843 	return IWN_READ(sc, IWN_PRPH_RDATA);
844 }
845 
846 static __inline void
847 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
848 {
849 	IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr);
850 	IWN_BARRIER_WRITE(sc);
851 	IWN_WRITE(sc, IWN_PRPH_WDATA, data);
852 }
853 
854 static __inline void
855 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
856 {
857 	iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask);
858 }
859 
860 static __inline void
861 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
862 {
863 	iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask);
864 }
865 
866 static __inline void
867 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr,
868     const uint32_t *data, int count)
869 {
870 	for (; count > 0; count--, data++, addr += 4)
871 		iwn_prph_write(sc, addr, *data);
872 }
873 
874 static __inline uint32_t
875 iwn_mem_read(struct iwn_softc *sc, uint32_t addr)
876 {
877 	IWN_WRITE(sc, IWN_MEM_RADDR, addr);
878 	IWN_BARRIER_READ_WRITE(sc);
879 	return IWN_READ(sc, IWN_MEM_RDATA);
880 }
881 
882 static __inline void
883 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
884 {
885 	IWN_WRITE(sc, IWN_MEM_WADDR, addr);
886 	IWN_BARRIER_WRITE(sc);
887 	IWN_WRITE(sc, IWN_MEM_WDATA, data);
888 }
889 
890 static __inline void
891 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data)
892 {
893 	uint32_t tmp;
894 
895 	tmp = iwn_mem_read(sc, addr & ~3);
896 	if (addr & 3)
897 		tmp = (tmp & 0x0000ffff) | data << 16;
898 	else
899 		tmp = (tmp & 0xffff0000) | data;
900 	iwn_mem_write(sc, addr & ~3, tmp);
901 }
902 
903 #ifdef IWN_DEBUG
904 
905 static __inline void
906 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data,
907     int count)
908 {
909 	for (; count > 0; count--, addr += 4)
910 		*data++ = iwn_mem_read(sc, addr);
911 }
912 
913 #endif
914 
915 static __inline void
916 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val,
917     int count)
918 {
919 	for (; count > 0; count--, addr += 4)
920 		iwn_mem_write(sc, addr, val);
921 }
922 
923 int
924 iwn_eeprom_lock(struct iwn_softc *sc)
925 {
926 	int i, ntries;
927 
928 	for (i = 0; i < 100; i++) {
929 		/* Request exclusive access to EEPROM. */
930 		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
931 		    IWN_HW_IF_CONFIG_EEPROM_LOCKED);
932 
933 		/* Spin until we actually get the lock. */
934 		for (ntries = 0; ntries < 100; ntries++) {
935 			if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
936 			    IWN_HW_IF_CONFIG_EEPROM_LOCKED)
937 				return 0;
938 			DELAY(10);
939 		}
940 	}
941 	return ETIMEDOUT;
942 }
943 
944 static __inline void
945 iwn_eeprom_unlock(struct iwn_softc *sc)
946 {
947 	IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED);
948 }
949 
950 /*
951  * Initialize access by host to One Time Programmable ROM.
952  * NB: This kind of ROM can be found on 1000 or 6000 Series only.
953  */
954 int
955 iwn_init_otprom(struct iwn_softc *sc)
956 {
957 	uint16_t prev, base, next;
958 	int count, error;
959 
960 	/* Wait for clock stabilization before accessing prph. */
961 	if ((error = iwn_clock_wait(sc)) != 0)
962 		return error;
963 
964 	if ((error = iwn_nic_lock(sc)) != 0)
965 		return error;
966 	iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
967 	DELAY(5);
968 	iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
969 	iwn_nic_unlock(sc);
970 
971 	/* Set auto clock gate disable bit for HW with OTP shadow RAM. */
972 	if (sc->hw_type != IWN_HW_REV_TYPE_1000) {
973 		IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT,
974 		    IWN_RESET_LINK_PWR_MGMT_DIS);
975 	}
976 	IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER);
977 	/* Clear ECC status. */
978 	IWN_SETBITS(sc, IWN_OTP_GP,
979 	    IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS);
980 
981 	/*
982 	 * Find the block before last block (contains the EEPROM image)
983 	 * for HW without OTP shadow RAM.
984 	 */
985 	if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
986 		/* Switch to absolute addressing mode. */
987 		IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS);
988 		base = 0;
989 		for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) {
990 			error = iwn_read_prom_data(sc, base, &next, 2);
991 			if (error != 0)
992 				return error;
993 			if (next == 0)	/* End of linked-list. */
994 				break;
995 			prev = base;
996 			base = letoh16(next);
997 		}
998 		if (count == 0 || count == IWN1000_OTP_NBLOCKS)
999 			return EIO;
1000 		/* Skip "next" word. */
1001 		sc->prom_base = prev + 1;
1002 	}
1003 	return 0;
1004 }
1005 
1006 int
1007 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count)
1008 {
1009 	uint8_t *out = data;
1010 	uint32_t val, tmp;
1011 	int ntries;
1012 
1013 	addr += sc->prom_base;
1014 	for (; count > 0; count -= 2, addr++) {
1015 		IWN_WRITE(sc, IWN_EEPROM, addr << 2);
1016 		for (ntries = 0; ntries < 10; ntries++) {
1017 			val = IWN_READ(sc, IWN_EEPROM);
1018 			if (val & IWN_EEPROM_READ_VALID)
1019 				break;
1020 			DELAY(5);
1021 		}
1022 		if (ntries == 10) {
1023 			printf("%s: timeout reading ROM at 0x%x\n",
1024 			    sc->sc_dev.dv_xname, addr);
1025 			return ETIMEDOUT;
1026 		}
1027 		if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1028 			/* OTPROM, check for ECC errors. */
1029 			tmp = IWN_READ(sc, IWN_OTP_GP);
1030 			if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) {
1031 				printf("%s: OTPROM ECC error at 0x%x\n",
1032 				    sc->sc_dev.dv_xname, addr);
1033 				return EIO;
1034 			}
1035 			if (tmp & IWN_OTP_GP_ECC_CORR_STTS) {
1036 				/* Correctable ECC error, clear bit. */
1037 				IWN_SETBITS(sc, IWN_OTP_GP,
1038 				    IWN_OTP_GP_ECC_CORR_STTS);
1039 			}
1040 		}
1041 		*out++ = val >> 16;
1042 		if (count > 1)
1043 			*out++ = val >> 24;
1044 	}
1045 	return 0;
1046 }
1047 
1048 int
1049 iwn_dma_contig_alloc(bus_dma_tag_t tag, struct iwn_dma_info *dma, void **kvap,
1050     bus_size_t size, bus_size_t alignment)
1051 {
1052 	int nsegs, error;
1053 
1054 	dma->tag = tag;
1055 	dma->size = size;
1056 
1057 	error = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1058 	    &dma->map);
1059 	if (error != 0)
1060 		goto fail;
1061 
1062 	error = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1063 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1064 	if (error != 0)
1065 		goto fail;
1066 
1067 	error = bus_dmamem_map(tag, &dma->seg, 1, size, &dma->vaddr,
1068 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1069 	if (error != 0)
1070 		goto fail;
1071 
1072 	error = bus_dmamap_load_raw(tag, dma->map, &dma->seg, 1, size,
1073 	    BUS_DMA_NOWAIT);
1074 	if (error != 0)
1075 		goto fail;
1076 
1077 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1078 
1079 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1080 	if (kvap != NULL)
1081 		*kvap = dma->vaddr;
1082 
1083 	return 0;
1084 
1085 fail:	iwn_dma_contig_free(dma);
1086 	return error;
1087 }
1088 
1089 void
1090 iwn_dma_contig_free(struct iwn_dma_info *dma)
1091 {
1092 	if (dma->map != NULL) {
1093 		if (dma->vaddr != NULL) {
1094 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1095 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1096 			bus_dmamap_unload(dma->tag, dma->map);
1097 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1098 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1099 			dma->vaddr = NULL;
1100 		}
1101 		bus_dmamap_destroy(dma->tag, dma->map);
1102 		dma->map = NULL;
1103 	}
1104 }
1105 
1106 int
1107 iwn_alloc_sched(struct iwn_softc *sc)
1108 {
1109 	/* TX scheduler rings must be aligned on a 1KB boundary. */
1110 	return iwn_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
1111 	    (void **)&sc->sched, sc->schedsz, 1024);
1112 }
1113 
1114 void
1115 iwn_free_sched(struct iwn_softc *sc)
1116 {
1117 	iwn_dma_contig_free(&sc->sched_dma);
1118 }
1119 
1120 int
1121 iwn_alloc_kw(struct iwn_softc *sc)
1122 {
1123 	/* "Keep Warm" page must be aligned on a 4KB boundary. */
1124 	return iwn_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, NULL, 4096,
1125 	    4096);
1126 }
1127 
1128 void
1129 iwn_free_kw(struct iwn_softc *sc)
1130 {
1131 	iwn_dma_contig_free(&sc->kw_dma);
1132 }
1133 
1134 int
1135 iwn_alloc_ict(struct iwn_softc *sc)
1136 {
1137 	/* ICT table must be aligned on a 4KB boundary. */
1138 	return iwn_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
1139 	    (void **)&sc->ict, IWN_ICT_SIZE, 4096);
1140 }
1141 
1142 void
1143 iwn_free_ict(struct iwn_softc *sc)
1144 {
1145 	iwn_dma_contig_free(&sc->ict_dma);
1146 }
1147 
1148 int
1149 iwn_alloc_fwmem(struct iwn_softc *sc)
1150 {
1151 	/* Must be aligned on a 16-byte boundary. */
1152 	return iwn_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, NULL,
1153 	    sc->fwsz, 16);
1154 }
1155 
1156 void
1157 iwn_free_fwmem(struct iwn_softc *sc)
1158 {
1159 	iwn_dma_contig_free(&sc->fw_dma);
1160 }
1161 
1162 int
1163 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1164 {
1165 	bus_size_t size;
1166 	int i, error;
1167 
1168 	ring->cur = 0;
1169 
1170 	/* Allocate RX descriptors (256-byte aligned). */
1171 	size = IWN_RX_RING_COUNT * sizeof (uint32_t);
1172 	error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma,
1173 	    (void **)&ring->desc, size, 256);
1174 	if (error != 0) {
1175 		printf("%s: could not allocate RX ring DMA memory\n",
1176 		    sc->sc_dev.dv_xname);
1177 		goto fail;
1178 	}
1179 
1180 	/* Allocate RX status area (16-byte aligned). */
1181 	error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1182 	    (void **)&ring->stat, sizeof (struct iwn_rx_status), 16);
1183 	if (error != 0) {
1184 		printf("%s: could not allocate RX status DMA memory\n",
1185 		    sc->sc_dev.dv_xname);
1186 		goto fail;
1187 	}
1188 
1189 	/*
1190 	 * Allocate and map RX buffers.
1191 	 */
1192 	for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1193 		struct iwn_rx_data *data = &ring->data[i];
1194 
1195 		error = bus_dmamap_create(sc->sc_dmat, IWN_RBUF_SIZE, 1,
1196 		    IWN_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1197 		    &data->map);
1198 		if (error != 0) {
1199 			printf("%s: could not create RX buf DMA map\n",
1200 			    sc->sc_dev.dv_xname);
1201 			goto fail;
1202 		}
1203 
1204 		data->m = MCLGETL(NULL, M_DONTWAIT, IWN_RBUF_SIZE);
1205 		if (data->m == NULL) {
1206 			printf("%s: could not allocate RX mbuf\n",
1207 			    sc->sc_dev.dv_xname);
1208 			error = ENOBUFS;
1209 			goto fail;
1210 		}
1211 
1212 		error = bus_dmamap_load(sc->sc_dmat, data->map,
1213 		    mtod(data->m, void *), IWN_RBUF_SIZE, NULL,
1214 		    BUS_DMA_NOWAIT | BUS_DMA_READ);
1215 		if (error != 0) {
1216 			printf("%s: can't map mbuf (error %d)\n",
1217 			    sc->sc_dev.dv_xname, error);
1218 			goto fail;
1219 		}
1220 
1221 		/* Set physical address of RX buffer (256-byte aligned). */
1222 		ring->desc[i] = htole32(data->map->dm_segs[0].ds_addr >> 8);
1223 	}
1224 
1225 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0, size,
1226 	    BUS_DMASYNC_PREWRITE);
1227 
1228 	return 0;
1229 
1230 fail:	iwn_free_rx_ring(sc, ring);
1231 	return error;
1232 }
1233 
1234 void
1235 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1236 {
1237 	int ntries;
1238 
1239 	if (iwn_nic_lock(sc) == 0) {
1240 		IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
1241 		for (ntries = 0; ntries < 1000; ntries++) {
1242 			if (IWN_READ(sc, IWN_FH_RX_STATUS) &
1243 			    IWN_FH_RX_STATUS_IDLE)
1244 				break;
1245 			DELAY(10);
1246 		}
1247 		iwn_nic_unlock(sc);
1248 	}
1249 	ring->cur = 0;
1250 	sc->last_rx_valid = 0;
1251 }
1252 
1253 void
1254 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1255 {
1256 	int i;
1257 
1258 	iwn_dma_contig_free(&ring->desc_dma);
1259 	iwn_dma_contig_free(&ring->stat_dma);
1260 
1261 	for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1262 		struct iwn_rx_data *data = &ring->data[i];
1263 
1264 		if (data->m != NULL) {
1265 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1266 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1267 			bus_dmamap_unload(sc->sc_dmat, data->map);
1268 			m_freem(data->m);
1269 		}
1270 		if (data->map != NULL)
1271 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1272 	}
1273 }
1274 
1275 int
1276 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid)
1277 {
1278 	bus_addr_t paddr;
1279 	bus_size_t size;
1280 	int i, error;
1281 
1282 	ring->qid = qid;
1283 	ring->queued = 0;
1284 	ring->cur = 0;
1285 
1286 	/* Allocate TX descriptors (256-byte aligned). */
1287 	size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc);
1288 	error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma,
1289 	    (void **)&ring->desc, size, 256);
1290 	if (error != 0) {
1291 		printf("%s: could not allocate TX ring DMA memory\n",
1292 		    sc->sc_dev.dv_xname);
1293 		goto fail;
1294 	}
1295 
1296 	size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd);
1297 	error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma,
1298 	    (void **)&ring->cmd, size, 4);
1299 	if (error != 0) {
1300 		printf("%s: could not allocate TX cmd DMA memory\n",
1301 		    sc->sc_dev.dv_xname);
1302 		goto fail;
1303 	}
1304 
1305 	paddr = ring->cmd_dma.paddr;
1306 	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1307 		struct iwn_tx_data *data = &ring->data[i];
1308 
1309 		data->cmd_paddr = paddr;
1310 		data->scratch_paddr = paddr + 12;
1311 		paddr += sizeof (struct iwn_tx_cmd);
1312 
1313 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1314 		    IWN_MAX_SCATTER - 1, MCLBYTES, 0, BUS_DMA_NOWAIT,
1315 		    &data->map);
1316 		if (error != 0) {
1317 			printf("%s: could not create TX buf DMA map\n",
1318 			    sc->sc_dev.dv_xname);
1319 			goto fail;
1320 		}
1321 	}
1322 	return 0;
1323 
1324 fail:	iwn_free_tx_ring(sc, ring);
1325 	return error;
1326 }
1327 
1328 void
1329 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1330 {
1331 	int i;
1332 
1333 	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1334 		struct iwn_tx_data *data = &ring->data[i];
1335 
1336 		if (data->m != NULL) {
1337 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1338 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1339 			bus_dmamap_unload(sc->sc_dmat, data->map);
1340 			m_freem(data->m);
1341 			data->m = NULL;
1342 		}
1343 	}
1344 	/* Clear TX descriptors. */
1345 	memset(ring->desc, 0, ring->desc_dma.size);
1346 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1347 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1348 	sc->qfullmsk &= ~(1 << ring->qid);
1349 	ring->queued = 0;
1350 	ring->cur = 0;
1351 }
1352 
1353 void
1354 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1355 {
1356 	int i;
1357 
1358 	iwn_dma_contig_free(&ring->desc_dma);
1359 	iwn_dma_contig_free(&ring->cmd_dma);
1360 
1361 	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1362 		struct iwn_tx_data *data = &ring->data[i];
1363 
1364 		if (data->m != NULL) {
1365 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1366 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1367 			bus_dmamap_unload(sc->sc_dmat, data->map);
1368 			m_freem(data->m);
1369 		}
1370 		if (data->map != NULL)
1371 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1372 	}
1373 }
1374 
1375 void
1376 iwn5000_ict_reset(struct iwn_softc *sc)
1377 {
1378 	/* Disable interrupts. */
1379 	IWN_WRITE(sc, IWN_INT_MASK, 0);
1380 
1381 	/* Reset ICT table. */
1382 	memset(sc->ict, 0, IWN_ICT_SIZE);
1383 	sc->ict_cur = 0;
1384 
1385 	/* Set physical address of ICT table (4KB aligned). */
1386 	DPRINTF(("enabling ICT\n"));
1387 	IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE |
1388 	    IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12);
1389 
1390 	/* Enable periodic RX interrupt. */
1391 	sc->int_mask |= IWN_INT_RX_PERIODIC;
1392 	/* Switch to ICT interrupt mode in driver. */
1393 	sc->sc_flags |= IWN_FLAG_USE_ICT;
1394 
1395 	/* Re-enable interrupts. */
1396 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
1397 	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
1398 }
1399 
1400 int
1401 iwn_read_eeprom(struct iwn_softc *sc)
1402 {
1403 	struct iwn_ops *ops = &sc->ops;
1404 	struct ieee80211com *ic = &sc->sc_ic;
1405 	uint16_t val;
1406 	int error;
1407 
1408 	/* Check whether adapter has an EEPROM or an OTPROM. */
1409 	if (sc->hw_type >= IWN_HW_REV_TYPE_1000 &&
1410 	    (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP))
1411 		sc->sc_flags |= IWN_FLAG_HAS_OTPROM;
1412 	DPRINTF(("%s found\n", (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ?
1413 	    "OTPROM" : "EEPROM"));
1414 
1415 	/* Adapter has to be powered on for EEPROM access to work. */
1416 	if ((error = iwn_apm_init(sc)) != 0) {
1417 		printf("%s: could not power ON adapter\n",
1418 		    sc->sc_dev.dv_xname);
1419 		return error;
1420 	}
1421 
1422 	if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) {
1423 		printf("%s: bad ROM signature\n", sc->sc_dev.dv_xname);
1424 		return EIO;
1425 	}
1426 	if ((error = iwn_eeprom_lock(sc)) != 0) {
1427 		printf("%s: could not lock ROM (error=%d)\n",
1428 		    sc->sc_dev.dv_xname, error);
1429 		return error;
1430 	}
1431 	if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1432 		if ((error = iwn_init_otprom(sc)) != 0) {
1433 			printf("%s: could not initialize OTPROM\n",
1434 			    sc->sc_dev.dv_xname);
1435 			return error;
1436 		}
1437 	}
1438 
1439 	iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2);
1440 	DPRINTF(("SKU capabilities=0x%04x\n", letoh16(val)));
1441 	/* Check if HT support is bonded out. */
1442 	if (val & htole16(IWN_EEPROM_SKU_CAP_11N))
1443 		sc->sc_flags |= IWN_FLAG_HAS_11N;
1444 
1445 	iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2);
1446 	sc->rfcfg = letoh16(val);
1447 	DPRINTF(("radio config=0x%04x\n", sc->rfcfg));
1448 	/* Read Tx/Rx chains from ROM unless it's known to be broken. */
1449 	if (sc->txchainmask == 0)
1450 		sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg);
1451 	if (sc->rxchainmask == 0)
1452 		sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg);
1453 
1454 	/* Read MAC address. */
1455 	iwn_read_prom_data(sc, IWN_EEPROM_MAC, ic->ic_myaddr, 6);
1456 
1457 	/* Read adapter-specific information from EEPROM. */
1458 	ops->read_eeprom(sc);
1459 
1460 	iwn_apm_stop(sc);	/* Power OFF adapter. */
1461 
1462 	iwn_eeprom_unlock(sc);
1463 	return 0;
1464 }
1465 
1466 void
1467 iwn4965_read_eeprom(struct iwn_softc *sc)
1468 {
1469 	uint32_t addr;
1470 	uint16_t val;
1471 	int i;
1472 
1473 	/* Read regulatory domain (4 ASCII characters). */
1474 	iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4);
1475 
1476 	/* Read the list of authorized channels (20MHz ones only). */
1477 	for (i = 0; i < 5; i++) {
1478 		addr = iwn4965_regulatory_bands[i];
1479 		iwn_read_eeprom_channels(sc, i, addr);
1480 	}
1481 
1482 	/* Read maximum allowed TX power for 2GHz and 5GHz bands. */
1483 	iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2);
1484 	sc->maxpwr2GHz = val & 0xff;
1485 	sc->maxpwr5GHz = val >> 8;
1486 	/* Check that EEPROM values are within valid range. */
1487 	if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50)
1488 		sc->maxpwr5GHz = 38;
1489 	if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50)
1490 		sc->maxpwr2GHz = 38;
1491 	DPRINTF(("maxpwr 2GHz=%d 5GHz=%d\n", sc->maxpwr2GHz, sc->maxpwr5GHz));
1492 
1493 	/* Read samples for each TX power group. */
1494 	iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands,
1495 	    sizeof sc->bands);
1496 
1497 	/* Read voltage at which samples were taken. */
1498 	iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2);
1499 	sc->eeprom_voltage = (int16_t)letoh16(val);
1500 	DPRINTF(("voltage=%d (in 0.3V)\n", sc->eeprom_voltage));
1501 
1502 #ifdef IWN_DEBUG
1503 	/* Print samples. */
1504 	if (iwn_debug > 0) {
1505 		for (i = 0; i < IWN_NBANDS; i++)
1506 			iwn4965_print_power_group(sc, i);
1507 	}
1508 #endif
1509 }
1510 
1511 #ifdef IWN_DEBUG
1512 void
1513 iwn4965_print_power_group(struct iwn_softc *sc, int i)
1514 {
1515 	struct iwn4965_eeprom_band *band = &sc->bands[i];
1516 	struct iwn4965_eeprom_chan_samples *chans = band->chans;
1517 	int j, c;
1518 
1519 	printf("===band %d===\n", i);
1520 	printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi);
1521 	printf("chan1 num=%d\n", chans[0].num);
1522 	for (c = 0; c < 2; c++) {
1523 		for (j = 0; j < IWN_NSAMPLES; j++) {
1524 			printf("chain %d, sample %d: temp=%d gain=%d "
1525 			    "power=%d pa_det=%d\n", c, j,
1526 			    chans[0].samples[c][j].temp,
1527 			    chans[0].samples[c][j].gain,
1528 			    chans[0].samples[c][j].power,
1529 			    chans[0].samples[c][j].pa_det);
1530 		}
1531 	}
1532 	printf("chan2 num=%d\n", chans[1].num);
1533 	for (c = 0; c < 2; c++) {
1534 		for (j = 0; j < IWN_NSAMPLES; j++) {
1535 			printf("chain %d, sample %d: temp=%d gain=%d "
1536 			    "power=%d pa_det=%d\n", c, j,
1537 			    chans[1].samples[c][j].temp,
1538 			    chans[1].samples[c][j].gain,
1539 			    chans[1].samples[c][j].power,
1540 			    chans[1].samples[c][j].pa_det);
1541 		}
1542 	}
1543 }
1544 #endif
1545 
1546 void
1547 iwn5000_read_eeprom(struct iwn_softc *sc)
1548 {
1549 	struct iwn5000_eeprom_calib_hdr hdr;
1550 	int32_t volt;
1551 	uint32_t base, addr;
1552 	uint16_t val;
1553 	int i;
1554 
1555 	/* Read regulatory domain (4 ASCII characters). */
1556 	iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
1557 	base = letoh16(val);
1558 	iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN,
1559 	    sc->eeprom_domain, 4);
1560 
1561 	/* Read the list of authorized channels (20MHz ones only). */
1562 	for (i = 0; i < 5; i++) {
1563 		addr = base + iwn5000_regulatory_bands[i];
1564 		iwn_read_eeprom_channels(sc, i, addr);
1565 	}
1566 
1567 	/* Read enhanced TX power information for 6000 Series. */
1568 	if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
1569 		iwn_read_eeprom_enhinfo(sc);
1570 
1571 	iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2);
1572 	base = letoh16(val);
1573 	iwn_read_prom_data(sc, base, &hdr, sizeof hdr);
1574 	DPRINTF(("calib version=%u pa type=%u voltage=%u\n",
1575 	    hdr.version, hdr.pa_type, letoh16(hdr.volt)));
1576 	sc->calib_ver = hdr.version;
1577 
1578 	if (sc->hw_type == IWN_HW_REV_TYPE_2030 ||
1579 	    sc->hw_type == IWN_HW_REV_TYPE_2000 ||
1580 	    sc->hw_type == IWN_HW_REV_TYPE_135 ||
1581 	    sc->hw_type == IWN_HW_REV_TYPE_105) {
1582 		sc->eeprom_voltage = letoh16(hdr.volt);
1583 		iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
1584 		sc->eeprom_temp = letoh16(val);
1585 		iwn_read_prom_data(sc, base + IWN2000_EEPROM_RAWTEMP, &val, 2);
1586 		sc->eeprom_rawtemp = letoh16(val);
1587 	}
1588 
1589 	if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
1590 		/* Compute temperature offset. */
1591 		iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
1592 		sc->eeprom_temp = letoh16(val);
1593 		iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2);
1594 		volt = letoh16(val);
1595 		sc->temp_off = sc->eeprom_temp - (volt / -5);
1596 		DPRINTF(("temp=%d volt=%d offset=%dK\n",
1597 		    sc->eeprom_temp, volt, sc->temp_off));
1598 	} else {
1599 		/* Read crystal calibration. */
1600 		iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL,
1601 		    &sc->eeprom_crystal, sizeof (uint32_t));
1602 		DPRINTF(("crystal calibration 0x%08x\n",
1603 		    letoh32(sc->eeprom_crystal)));
1604 	}
1605 }
1606 
1607 void
1608 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr)
1609 {
1610 	struct ieee80211com *ic = &sc->sc_ic;
1611 	const struct iwn_chan_band *band = &iwn_bands[n];
1612 	struct iwn_eeprom_chan channels[IWN_MAX_CHAN_PER_BAND];
1613 	uint8_t chan;
1614 	int i;
1615 
1616 	iwn_read_prom_data(sc, addr, channels,
1617 	    band->nchan * sizeof (struct iwn_eeprom_chan));
1618 
1619 	for (i = 0; i < band->nchan; i++) {
1620 		if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID))
1621 			continue;
1622 
1623 		chan = band->chan[i];
1624 
1625 		if (n == 0) {	/* 2GHz band */
1626 			ic->ic_channels[chan].ic_freq =
1627 			    ieee80211_ieee2mhz(chan, IEEE80211_CHAN_2GHZ);
1628 			ic->ic_channels[chan].ic_flags =
1629 			    IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
1630 			    IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
1631 
1632 		} else {	/* 5GHz band */
1633 			/*
1634 			 * Some adapters support channels 7, 8, 11 and 12
1635 			 * both in the 2GHz and 4.9GHz bands.
1636 			 * Because of limitations in our net80211 layer,
1637 			 * we don't support them in the 4.9GHz band.
1638 			 */
1639 			if (chan <= 14)
1640 				continue;
1641 
1642 			ic->ic_channels[chan].ic_freq =
1643 			    ieee80211_ieee2mhz(chan, IEEE80211_CHAN_5GHZ);
1644 			ic->ic_channels[chan].ic_flags = IEEE80211_CHAN_A;
1645 			/* We have at least one valid 5GHz channel. */
1646 			sc->sc_flags |= IWN_FLAG_HAS_5GHZ;
1647 		}
1648 
1649 		/* Is active scan allowed on this channel? */
1650 		if (!(channels[i].flags & IWN_EEPROM_CHAN_ACTIVE)) {
1651 			ic->ic_channels[chan].ic_flags |=
1652 			    IEEE80211_CHAN_PASSIVE;
1653 		}
1654 
1655 		/* Save maximum allowed TX power for this channel. */
1656 		sc->maxpwr[chan] = channels[i].maxpwr;
1657 
1658 		if (sc->sc_flags & IWN_FLAG_HAS_11N)
1659 			ic->ic_channels[chan].ic_flags |= IEEE80211_CHAN_HT;
1660 
1661 		DPRINTF(("adding chan %d flags=0x%x maxpwr=%d\n",
1662 		    chan, channels[i].flags, sc->maxpwr[chan]));
1663 	}
1664 }
1665 
1666 void
1667 iwn_read_eeprom_enhinfo(struct iwn_softc *sc)
1668 {
1669 	struct iwn_eeprom_enhinfo enhinfo[35];
1670 	uint16_t val, base;
1671 	int8_t maxpwr;
1672 	int i;
1673 
1674 	iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
1675 	base = letoh16(val);
1676 	iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO,
1677 	    enhinfo, sizeof enhinfo);
1678 
1679 	memset(sc->enh_maxpwr, 0, sizeof sc->enh_maxpwr);
1680 	for (i = 0; i < nitems(enhinfo); i++) {
1681 		if (enhinfo[i].chan == 0 || enhinfo[i].reserved != 0)
1682 			continue;	/* Skip invalid entries. */
1683 
1684 		maxpwr = 0;
1685 		if (sc->txchainmask & IWN_ANT_A)
1686 			maxpwr = MAX(maxpwr, enhinfo[i].chain[0]);
1687 		if (sc->txchainmask & IWN_ANT_B)
1688 			maxpwr = MAX(maxpwr, enhinfo[i].chain[1]);
1689 		if (sc->txchainmask & IWN_ANT_C)
1690 			maxpwr = MAX(maxpwr, enhinfo[i].chain[2]);
1691 		if (sc->ntxchains == 2)
1692 			maxpwr = MAX(maxpwr, enhinfo[i].mimo2);
1693 		else if (sc->ntxchains == 3)
1694 			maxpwr = MAX(maxpwr, enhinfo[i].mimo3);
1695 		maxpwr /= 2;	/* Convert half-dBm to dBm. */
1696 
1697 		DPRINTF(("enhinfo %d, maxpwr=%d\n", i, maxpwr));
1698 		sc->enh_maxpwr[i] = maxpwr;
1699 	}
1700 }
1701 
1702 struct ieee80211_node *
1703 iwn_node_alloc(struct ieee80211com *ic)
1704 {
1705 	return malloc(sizeof (struct iwn_node), M_DEVBUF, M_NOWAIT | M_ZERO);
1706 }
1707 
1708 void
1709 iwn_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, int isnew)
1710 {
1711 	struct iwn_softc *sc = ic->ic_if.if_softc;
1712 	struct iwn_node *wn = (void *)ni;
1713 	uint8_t rate;
1714 	int ridx, i;
1715 
1716 	if ((ni->ni_flags & IEEE80211_NODE_HT) == 0)
1717 		ieee80211_amrr_node_init(&sc->amrr, &wn->amn);
1718 
1719 	/* Start at lowest available bit-rate, AMRR/MiRA will raise. */
1720 	ni->ni_txrate = 0;
1721 	ni->ni_txmcs = 0;
1722 
1723 	for (i = 0; i < ni->ni_rates.rs_nrates; i++) {
1724 		rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL;
1725 		/* Map 802.11 rate to HW rate index. */
1726 		for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) {
1727 			if (iwn_rates[ridx].plcp != IWN_PLCP_INVALID &&
1728 			    iwn_rates[ridx].rate == rate)
1729 				break;
1730 		}
1731 		wn->ridx[i] = ridx;
1732 	}
1733 }
1734 
1735 int
1736 iwn_media_change(struct ifnet *ifp)
1737 {
1738 	struct iwn_softc *sc = ifp->if_softc;
1739 	struct ieee80211com *ic = &sc->sc_ic;
1740 	uint8_t rate, ridx;
1741 	int error;
1742 
1743 	error = ieee80211_media_change(ifp);
1744 	if (error != ENETRESET)
1745 		return error;
1746 
1747 	if (ic->ic_fixed_mcs != -1)
1748 		sc->fixed_ridx = iwn_mcs2ridx[ic->ic_fixed_mcs];
1749 	if (ic->ic_fixed_rate != -1) {
1750 		rate = ic->ic_sup_rates[ic->ic_curmode].
1751 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
1752 		/* Map 802.11 rate to HW rate index. */
1753 		for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++)
1754 			if (iwn_rates[ridx].plcp != IWN_PLCP_INVALID &&
1755 			    iwn_rates[ridx].rate == rate)
1756 				break;
1757 		sc->fixed_ridx = ridx;
1758 	}
1759 
1760 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1761 	    (IFF_UP | IFF_RUNNING)) {
1762 		iwn_stop(ifp);
1763 		error = iwn_init(ifp);
1764 	}
1765 	return error;
1766 }
1767 
1768 int
1769 iwn_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
1770 {
1771 	struct ifnet *ifp = &ic->ic_if;
1772 	struct iwn_softc *sc = ifp->if_softc;
1773 	struct ieee80211_node *ni = ic->ic_bss;
1774 	struct iwn_node *wn = (void *)ni;
1775 	int error;
1776 
1777 	if (ic->ic_state == IEEE80211_S_RUN) {
1778 		if (nstate == IEEE80211_S_SCAN) {
1779 			/*
1780 			 * During RUN->SCAN we don't call sc_newstate() so
1781 			 * we must stop A-MPDU Tx ourselves in this case.
1782 			 */
1783 			ieee80211_stop_ampdu_tx(ic, ni, -1);
1784 			ieee80211_ba_del(ni);
1785 		}
1786 		ieee80211_mira_cancel_timeouts(&wn->mn);
1787 		timeout_del(&sc->calib_to);
1788 		sc->calib.state = IWN_CALIB_STATE_INIT;
1789 		if (sc->sc_flags & IWN_FLAG_BGSCAN)
1790 			iwn_scan_abort(sc);
1791 	}
1792 
1793 	if (ic->ic_state == IEEE80211_S_SCAN) {
1794 		if (nstate == IEEE80211_S_SCAN) {
1795 			if (sc->sc_flags & IWN_FLAG_SCANNING)
1796 				return 0;
1797 		} else
1798 			sc->sc_flags &= ~IWN_FLAG_SCANNING;
1799 		/* Turn LED off when leaving scan state. */
1800 		iwn_set_led(sc, IWN_LED_LINK, 1, 0);
1801 	}
1802 
1803 	if (ic->ic_state >= IEEE80211_S_ASSOC &&
1804 	    nstate <= IEEE80211_S_ASSOC) {
1805 		/* Reset state to handle re- and disassociations. */
1806 		sc->rxon.associd = 0;
1807 		sc->rxon.filter &= ~htole32(IWN_FILTER_BSS);
1808 		sc->calib.state = IWN_CALIB_STATE_INIT;
1809 		error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
1810 		if (error != 0)
1811 			printf("%s: RXON command failed\n",
1812 			    sc->sc_dev.dv_xname);
1813 	}
1814 
1815 	switch (nstate) {
1816 	case IEEE80211_S_SCAN:
1817 		/* Make the link LED blink while we're scanning. */
1818 		iwn_set_led(sc, IWN_LED_LINK, 10, 10);
1819 
1820 		if ((error = iwn_scan(sc, IEEE80211_CHAN_2GHZ, 0)) != 0) {
1821 			printf("%s: could not initiate scan\n",
1822 			    sc->sc_dev.dv_xname);
1823 			return error;
1824 		}
1825 		if (ifp->if_flags & IFF_DEBUG)
1826 			printf("%s: %s -> %s\n", ifp->if_xname,
1827 			    ieee80211_state_name[ic->ic_state],
1828 			    ieee80211_state_name[nstate]);
1829 		if ((sc->sc_flags & IWN_FLAG_BGSCAN) == 0) {
1830 			ieee80211_set_link_state(ic, LINK_STATE_DOWN);
1831 			ieee80211_node_cleanup(ic, ic->ic_bss);
1832 		}
1833 		ic->ic_state = nstate;
1834 		return 0;
1835 
1836 	case IEEE80211_S_ASSOC:
1837 		if (ic->ic_state != IEEE80211_S_RUN)
1838 			break;
1839 		/* FALLTHROUGH */
1840 	case IEEE80211_S_AUTH:
1841 		if ((error = iwn_auth(sc, arg)) != 0) {
1842 			printf("%s: could not move to auth state\n",
1843 			    sc->sc_dev.dv_xname);
1844 			return error;
1845 		}
1846 		break;
1847 
1848 	case IEEE80211_S_RUN:
1849 		if ((error = iwn_run(sc)) != 0) {
1850 			printf("%s: could not move to run state\n",
1851 			    sc->sc_dev.dv_xname);
1852 			return error;
1853 		}
1854 		break;
1855 
1856 	case IEEE80211_S_INIT:
1857 		sc->calib.state = IWN_CALIB_STATE_INIT;
1858 		break;
1859 	}
1860 
1861 	return sc->sc_newstate(ic, nstate, arg);
1862 }
1863 
1864 void
1865 iwn_iter_func(void *arg, struct ieee80211_node *ni)
1866 {
1867 	struct iwn_softc *sc = arg;
1868 	struct iwn_node *wn = (void *)ni;
1869 
1870 	if ((ni->ni_flags & IEEE80211_NODE_HT) == 0) {
1871 		int old_txrate = ni->ni_txrate;
1872 		ieee80211_amrr_choose(&sc->amrr, ni, &wn->amn);
1873 		if (old_txrate != ni->ni_txrate)
1874 			iwn_set_link_quality(sc, ni);
1875 	}
1876 }
1877 
1878 void
1879 iwn_calib_timeout(void *arg)
1880 {
1881 	struct iwn_softc *sc = arg;
1882 	struct ieee80211com *ic = &sc->sc_ic;
1883 	int s;
1884 
1885 	s = splnet();
1886 	if (ic->ic_fixed_rate == -1) {
1887 		if (ic->ic_opmode == IEEE80211_M_STA)
1888 			iwn_iter_func(sc, ic->ic_bss);
1889 		else
1890 			ieee80211_iterate_nodes(ic, iwn_iter_func, sc);
1891 	}
1892 	/* Force automatic TX power calibration every 60 secs. */
1893 	if (++sc->calib_cnt >= 120) {
1894 		uint32_t flags = 0;
1895 
1896 		DPRINTFN(2, ("sending request for statistics\n"));
1897 		(void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags,
1898 		    sizeof flags, 1);
1899 		sc->calib_cnt = 0;
1900 	}
1901 	splx(s);
1902 
1903 	/* Automatic rate control triggered every 500ms. */
1904 	timeout_add_msec(&sc->calib_to, 500);
1905 }
1906 
1907 int
1908 iwn_ccmp_decap(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
1909 {
1910 	struct ieee80211com *ic = &sc->sc_ic;
1911 	struct ieee80211_key *k = &ni->ni_pairwise_key;
1912 	struct ieee80211_frame *wh;
1913 	uint64_t pn, *prsc;
1914 	uint8_t *ivp;
1915 	uint8_t tid;
1916 	int hdrlen, hasqos;
1917 
1918 	wh = mtod(m, struct ieee80211_frame *);
1919 	hdrlen = ieee80211_get_hdrlen(wh);
1920 	ivp = (uint8_t *)wh + hdrlen;
1921 
1922 	/* Check that ExtIV bit is set. */
1923 	if (!(ivp[3] & IEEE80211_WEP_EXTIV)) {
1924 		DPRINTF(("CCMP decap ExtIV not set\n"));
1925 		return 1;
1926 	}
1927 	hasqos = ieee80211_has_qos(wh);
1928 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
1929 	prsc = &k->k_rsc[tid];
1930 
1931 	/* Extract the 48-bit PN from the CCMP header. */
1932 	pn = (uint64_t)ivp[0]       |
1933 	     (uint64_t)ivp[1] <<  8 |
1934 	     (uint64_t)ivp[4] << 16 |
1935 	     (uint64_t)ivp[5] << 24 |
1936 	     (uint64_t)ivp[6] << 32 |
1937 	     (uint64_t)ivp[7] << 40;
1938 	if (pn <= *prsc) {
1939 		DPRINTF(("CCMP replayed\n"));
1940 		ic->ic_stats.is_ccmp_replays++;
1941 		return 1;
1942 	}
1943 	/* Last seen packet number is updated in ieee80211_inputm(). */
1944 
1945 	/* Strip MIC. IV will be stripped by ieee80211_inputm(). */
1946 	m_adj(m, -IEEE80211_CCMP_MICLEN);
1947 	return 0;
1948 }
1949 
1950 /*
1951  * Process an RX_PHY firmware notification.  This is usually immediately
1952  * followed by an MPDU_RX_DONE notification.
1953  */
1954 void
1955 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc,
1956     struct iwn_rx_data *data)
1957 {
1958 	struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1);
1959 
1960 	DPRINTFN(2, ("received PHY stats\n"));
1961 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
1962 	    sizeof (*stat), BUS_DMASYNC_POSTREAD);
1963 
1964 	/* Save RX statistics, they will be used on MPDU_RX_DONE. */
1965 	memcpy(&sc->last_rx_stat, stat, sizeof (*stat));
1966 	sc->last_rx_valid = IWN_LAST_RX_VALID;
1967 	/*
1968 	 * The firmware does not send separate RX_PHY
1969 	 * notifications for A-MPDU subframes.
1970 	 */
1971 	if (stat->flags & htole16(IWN_STAT_FLAG_AGG))
1972 		sc->last_rx_valid |= IWN_LAST_RX_AMPDU;
1973 }
1974 
1975 /*
1976  * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification.
1977  * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one.
1978  */
1979 void
1980 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
1981     struct iwn_rx_data *data, struct mbuf_list *ml)
1982 {
1983 	struct iwn_ops *ops = &sc->ops;
1984 	struct ieee80211com *ic = &sc->sc_ic;
1985 	struct ifnet *ifp = &ic->ic_if;
1986 	struct iwn_rx_ring *ring = &sc->rxq;
1987 	struct ieee80211_frame *wh;
1988 	struct ieee80211_rxinfo rxi;
1989 	struct ieee80211_node *ni;
1990 	struct ieee80211_channel *bss_chan = NULL;
1991 	uint8_t saved_bssid[IEEE80211_ADDR_LEN] = { 0 };
1992 	struct mbuf *m, *m1;
1993 	struct iwn_rx_stat *stat;
1994 	caddr_t head;
1995 	uint32_t flags;
1996 	int error, len, rssi;
1997 	uint16_t chan;
1998 
1999 	if (desc->type == IWN_MPDU_RX_DONE) {
2000 		/* Check for prior RX_PHY notification. */
2001 		if (!sc->last_rx_valid) {
2002 			DPRINTF(("missing RX_PHY\n"));
2003 			return;
2004 		}
2005 		sc->last_rx_valid &= ~IWN_LAST_RX_VALID;
2006 		stat = &sc->last_rx_stat;
2007 		if ((sc->last_rx_valid & IWN_LAST_RX_AMPDU) &&
2008 		    (stat->flags & htole16(IWN_STAT_FLAG_AGG)) == 0) {
2009 			DPRINTF(("missing RX_PHY (expecting A-MPDU)\n"));
2010 			return;
2011 		}
2012 		if ((sc->last_rx_valid & IWN_LAST_RX_AMPDU) == 0 &&
2013 		    (stat->flags & htole16(IWN_STAT_FLAG_AGG))) {
2014 			DPRINTF(("missing RX_PHY (unexpected A-MPDU)\n"));
2015 			return;
2016 		}
2017 	} else
2018 		stat = (struct iwn_rx_stat *)(desc + 1);
2019 
2020 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWN_RBUF_SIZE,
2021 	    BUS_DMASYNC_POSTREAD);
2022 
2023 	if (stat->cfg_phy_len > IWN_STAT_MAXLEN) {
2024 		printf("%s: invalid RX statistic header\n",
2025 		    sc->sc_dev.dv_xname);
2026 		return;
2027 	}
2028 	if (desc->type == IWN_MPDU_RX_DONE) {
2029 		struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1);
2030 		head = (caddr_t)(mpdu + 1);
2031 		len = letoh16(mpdu->len);
2032 	} else {
2033 		head = (caddr_t)(stat + 1) + stat->cfg_phy_len;
2034 		len = letoh16(stat->len);
2035 	}
2036 
2037 	flags = letoh32(*(uint32_t *)(head + len));
2038 
2039 	/* Discard frames with a bad FCS early. */
2040 	if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) {
2041 		DPRINTFN(2, ("RX flags error %x\n", flags));
2042 		ifp->if_ierrors++;
2043 		return;
2044 	}
2045 	/* Discard frames that are too short. */
2046 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
2047 		/* Allow control frames in monitor mode. */
2048 		if (len < sizeof (struct ieee80211_frame_cts)) {
2049 			DPRINTF(("frame too short: %d\n", len));
2050 			ic->ic_stats.is_rx_tooshort++;
2051 			ifp->if_ierrors++;
2052 			return;
2053 		}
2054 	} else if (len < sizeof (*wh)) {
2055 		DPRINTF(("frame too short: %d\n", len));
2056 		ic->ic_stats.is_rx_tooshort++;
2057 		ifp->if_ierrors++;
2058 		return;
2059 	}
2060 
2061 	m1 = MCLGETL(NULL, M_DONTWAIT, IWN_RBUF_SIZE);
2062 	if (m1 == NULL) {
2063 		ic->ic_stats.is_rx_nombuf++;
2064 		ifp->if_ierrors++;
2065 		return;
2066 	}
2067 	bus_dmamap_unload(sc->sc_dmat, data->map);
2068 
2069 	error = bus_dmamap_load(sc->sc_dmat, data->map, mtod(m1, void *),
2070 	    IWN_RBUF_SIZE, NULL, BUS_DMA_NOWAIT | BUS_DMA_READ);
2071 	if (error != 0) {
2072 		m_freem(m1);
2073 
2074 		/* Try to reload the old mbuf. */
2075 		error = bus_dmamap_load(sc->sc_dmat, data->map,
2076 		    mtod(data->m, void *), IWN_RBUF_SIZE, NULL,
2077 		    BUS_DMA_NOWAIT | BUS_DMA_READ);
2078 		if (error != 0) {
2079 			panic("%s: could not load old RX mbuf",
2080 			    sc->sc_dev.dv_xname);
2081 		}
2082 		/* Physical address may have changed. */
2083 		ring->desc[ring->cur] =
2084 		    htole32(data->map->dm_segs[0].ds_addr >> 8);
2085 		bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
2086 		    ring->cur * sizeof (uint32_t), sizeof (uint32_t),
2087 		    BUS_DMASYNC_PREWRITE);
2088 		ifp->if_ierrors++;
2089 		return;
2090 	}
2091 
2092 	m = data->m;
2093 	data->m = m1;
2094 	/* Update RX descriptor. */
2095 	ring->desc[ring->cur] = htole32(data->map->dm_segs[0].ds_addr >> 8);
2096 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
2097 	    ring->cur * sizeof (uint32_t), sizeof (uint32_t),
2098 	    BUS_DMASYNC_PREWRITE);
2099 
2100 	/* Finalize mbuf. */
2101 	m->m_data = head;
2102 	m->m_pkthdr.len = m->m_len = len;
2103 
2104 	/*
2105 	 * Grab a reference to the source node. Note that control frames are
2106 	 * shorter than struct ieee80211_frame but ieee80211_find_rxnode()
2107 	 * is being careful about control frames.
2108 	 */
2109 	wh = mtod(m, struct ieee80211_frame *);
2110 	if (len < sizeof (*wh) &&
2111 	   (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) {
2112 		ic->ic_stats.is_rx_tooshort++;
2113 		ifp->if_ierrors++;
2114 		m_freem(m);
2115 		return;
2116 	}
2117 	ni = ieee80211_find_rxnode(ic, wh);
2118 
2119 	rxi.rxi_flags = 0;
2120 	if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL)
2121 	    && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) &&
2122 	    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
2123 	    (ni->ni_flags & IEEE80211_NODE_RXPROT) &&
2124 	    ni->ni_pairwise_key.k_cipher == IEEE80211_CIPHER_CCMP) {
2125 		if ((flags & IWN_RX_CIPHER_MASK) != IWN_RX_CIPHER_CCMP) {
2126 			ic->ic_stats.is_ccmp_dec_errs++;
2127 			ifp->if_ierrors++;
2128 			m_freem(m);
2129 			ieee80211_release_node(ic, ni);
2130 			return;
2131 		}
2132 		/* Check whether decryption was successful or not. */
2133 		if ((desc->type == IWN_MPDU_RX_DONE &&
2134 		     (flags & (IWN_RX_MPDU_DEC | IWN_RX_MPDU_MIC_OK)) !=
2135 		      (IWN_RX_MPDU_DEC | IWN_RX_MPDU_MIC_OK)) ||
2136 		    (desc->type != IWN_MPDU_RX_DONE &&
2137 		     (flags & IWN_RX_DECRYPT_MASK) != IWN_RX_DECRYPT_OK)) {
2138 			DPRINTF(("CCMP decryption failed 0x%x\n", flags));
2139 			ic->ic_stats.is_ccmp_dec_errs++;
2140 			ifp->if_ierrors++;
2141 			m_freem(m);
2142 			ieee80211_release_node(ic, ni);
2143 			return;
2144 		}
2145 		if (iwn_ccmp_decap(sc, m, ni) != 0) {
2146 			ifp->if_ierrors++;
2147 			m_freem(m);
2148 			ieee80211_release_node(ic, ni);
2149 			return;
2150 		}
2151 		rxi.rxi_flags |= IEEE80211_RXI_HWDEC;
2152 	}
2153 
2154 	rssi = ops->get_rssi(stat);
2155 
2156 	chan = stat->chan;
2157 	if (chan > IEEE80211_CHAN_MAX)
2158 		chan = IEEE80211_CHAN_MAX;
2159 
2160 	/* Fix current channel. */
2161 	if (ni == ic->ic_bss) {
2162 		/*
2163 		 * We may switch ic_bss's channel during scans.
2164 		 * Record the current channel so we can restore it later.
2165 		 */
2166 		bss_chan = ni->ni_chan;
2167 		IEEE80211_ADDR_COPY(&saved_bssid, ni->ni_macaddr);
2168 	}
2169 	ni->ni_chan = &ic->ic_channels[chan];
2170 
2171 #if NBPFILTER > 0
2172 	if (sc->sc_drvbpf != NULL) {
2173 		struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap;
2174 		uint16_t chan_flags;
2175 
2176 		tap->wr_flags = 0;
2177 		if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE))
2178 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2179 		tap->wr_chan_freq = htole16(ic->ic_channels[chan].ic_freq);
2180 		chan_flags = ic->ic_channels[chan].ic_flags;
2181 		if (ic->ic_curmode != IEEE80211_MODE_11N)
2182 			chan_flags &= ~IEEE80211_CHAN_HT;
2183 		tap->wr_chan_flags = htole16(chan_flags);
2184 		tap->wr_dbm_antsignal = (int8_t)rssi;
2185 		tap->wr_dbm_antnoise = (int8_t)sc->noise;
2186 		tap->wr_tsft = stat->tstamp;
2187 		if (stat->rflags & IWN_RFLAG_MCS) {
2188 			tap->wr_rate = (0x80 | stat->rate); /* HT MCS index */
2189 		} else {
2190 			switch (stat->rate) {
2191 			/* CCK rates. */
2192 			case  10: tap->wr_rate =   2; break;
2193 			case  20: tap->wr_rate =   4; break;
2194 			case  55: tap->wr_rate =  11; break;
2195 			case 110: tap->wr_rate =  22; break;
2196 			/* OFDM rates. */
2197 			case 0xd: tap->wr_rate =  12; break;
2198 			case 0xf: tap->wr_rate =  18; break;
2199 			case 0x5: tap->wr_rate =  24; break;
2200 			case 0x7: tap->wr_rate =  36; break;
2201 			case 0x9: tap->wr_rate =  48; break;
2202 			case 0xb: tap->wr_rate =  72; break;
2203 			case 0x1: tap->wr_rate =  96; break;
2204 			case 0x3: tap->wr_rate = 108; break;
2205 			/* Unknown rate: should not happen. */
2206 			default:  tap->wr_rate =  0;
2207 			}
2208 		}
2209 
2210 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
2211 		    m, BPF_DIRECTION_IN);
2212 	}
2213 #endif
2214 
2215 	/* Send the frame to the 802.11 layer. */
2216 	rxi.rxi_rssi = rssi;
2217 	rxi.rxi_tstamp = 0;	/* unused */
2218 	ieee80211_inputm(ifp, m, ni, &rxi, ml);
2219 
2220 	/*
2221 	 * ieee80211_inputm() might have changed our BSS.
2222 	 * Restore ic_bss's channel if we are still in the same BSS.
2223 	 */
2224 	if (ni == ic->ic_bss && IEEE80211_ADDR_EQ(saved_bssid, ni->ni_macaddr))
2225 		ni->ni_chan = bss_chan;
2226 
2227 	/* Node is no longer needed. */
2228 	ieee80211_release_node(ic, ni);
2229 }
2230 
2231 void
2232 iwn_mira_choose(struct iwn_softc *sc, struct ieee80211_node *ni)
2233 {
2234 	struct ieee80211com *ic = &sc->sc_ic;
2235 	struct iwn_node *wn = (void *)ni;
2236 	int best_mcs = ieee80211_mira_get_best_mcs(&wn->mn);
2237 
2238 	ieee80211_mira_choose(&wn->mn, ic, ni);
2239 
2240 	/*
2241 	 * Update firmware's LQ retry table if MiRA has chosen a new MCS.
2242 	 *
2243 	 * We only need to do this if the best MCS has changed because
2244 	 * we ask firmware to use a fixed MCS while MiRA is probing a
2245 	 * candidate MCS.
2246 	 * While not probing we ask firmware to retry at lower rates in case
2247 	 * Tx at the newly chosen best MCS ends up failing, and then report
2248 	 * any resulting Tx retries to MiRA in order to trigger probing.
2249 	 */
2250 	if (best_mcs != ieee80211_mira_get_best_mcs(&wn->mn))
2251 		iwn_set_link_quality(sc, ni);
2252 }
2253 
2254 void
2255 iwn_ampdu_rate_control(struct iwn_softc *sc, struct ieee80211_node *ni,
2256     struct iwn_tx_ring *txq, int tid, uint16_t seq, uint16_t ssn)
2257 {
2258 	struct ieee80211com *ic = &sc->sc_ic;
2259 	struct iwn_node *wn = (void *)ni;
2260 	struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
2261 	int min_ampdu_id, max_ampdu_id, id;
2262 	int idx, end_idx;
2263 
2264 	/* Determine the min/max IDs we assigned to AMPDUs in this range. */
2265 	idx = IWN_AGG_SSN_TO_TXQ_IDX(seq);
2266 	end_idx = IWN_AGG_SSN_TO_TXQ_IDX(ssn);
2267 	min_ampdu_id = txq->data[idx].ampdu_id;
2268 	max_ampdu_id = min_ampdu_id;
2269 	while (idx != end_idx) {
2270 		struct iwn_tx_data *txdata = &txq->data[idx];
2271 
2272 		if (txdata->m != NULL) {
2273 			if (min_ampdu_id > txdata->ampdu_id)
2274 				min_ampdu_id = txdata->ampdu_id;
2275 			if (max_ampdu_id < txdata->ampdu_id)
2276 				max_ampdu_id = txdata->ampdu_id;
2277 		}
2278 
2279 		idx = (idx + 1) % IWN_TX_RING_COUNT;
2280 	}
2281 
2282 	/*
2283 	 * Update Tx rate statistics for A-MPDUs before firmware's BA window.
2284 	 */
2285 	for (id = min_ampdu_id; id <= max_ampdu_id; id++) {
2286 		int have_ack = 0, bit = 0;
2287 		idx = IWN_AGG_SSN_TO_TXQ_IDX(seq);
2288 		end_idx = IWN_AGG_SSN_TO_TXQ_IDX(ssn);
2289 		wn->mn.agglen = 0;
2290 		wn->mn.ampdu_size = 0;
2291 		while (idx != end_idx) {
2292 			struct iwn_tx_data *txdata = &txq->data[idx];
2293 			uint16_t s = (seq + bit) & 0xfff;
2294 			/*
2295 			 * We can assume that this subframe has been ACKed
2296 			 * because ACK failures come as single frames and
2297 			 * before failing an A-MPDU subframe the firmware
2298 			 * sends it as a single frame at least once.
2299 			 *
2300 			 * However, when this A-MPDU was transmitted we
2301 			 * learned how many subframes it contained.
2302 			 * So if firmware isn't reporting all subframes now
2303 			 * we can deduce an ACK failure for missing frames.
2304 			 */
2305 			if (txdata->m != NULL && txdata->ampdu_id == id &&
2306 			    txdata->ampdu_txmcs == ni->ni_txmcs &&
2307 			    txdata->ampdu_nframes > 0 &&
2308 			    (SEQ_LT(ba->ba_winend, s) ||
2309 			    (ba->ba_bitmap & (1 << bit)) == 0)) {
2310 				have_ack++;
2311 				wn->mn.frames = txdata->ampdu_nframes;
2312 				wn->mn.agglen = txdata->ampdu_nframes;
2313 				wn->mn.ampdu_size = txdata->ampdu_size;
2314 				if (txdata->retries > 1)
2315 					wn->mn.retries++;
2316 				if (!SEQ_LT(ba->ba_winend, s))
2317 					ieee80211_output_ba_record_ack(ic, ni,
2318 					    tid, s);
2319 			}
2320 
2321 			idx = (idx + 1) % IWN_TX_RING_COUNT;
2322 			bit++;
2323 		}
2324 
2325 		if (have_ack > 0) {
2326 			wn->mn.txfail = wn->mn.frames - have_ack;
2327 			iwn_mira_choose(sc, ni);
2328 		}
2329 	}
2330 }
2331 
2332 /*
2333  * Process an incoming Compressed BlockAck.
2334  * Note that these block ack notifications are generated by firmware and do
2335  * not necessarily correspond to contents of block ack frames seen on the air.
2336  */
2337 void
2338 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2339     struct iwn_rx_data *data)
2340 {
2341 	struct iwn_compressed_ba *cba = (struct iwn_compressed_ba *)(desc + 1);
2342 	struct ieee80211com *ic = &sc->sc_ic;
2343 	struct ieee80211_node *ni;
2344 	struct ieee80211_tx_ba *ba;
2345 	struct iwn_tx_ring *txq;
2346 	uint16_t seq, ssn;
2347 	int qid;
2348 
2349 	if (ic->ic_state != IEEE80211_S_RUN)
2350 		return;
2351 
2352 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), sizeof (*cba),
2353 	    BUS_DMASYNC_POSTREAD);
2354 
2355 	if (!IEEE80211_ADDR_EQ(ic->ic_bss->ni_macaddr, cba->macaddr))
2356 		return;
2357 
2358 	ni = ic->ic_bss;
2359 
2360 	qid = le16toh(cba->qid);
2361 	if (qid < sc->first_agg_txq || qid >= sc->ntxqs)
2362 		return;
2363 
2364 	txq = &sc->txq[qid];
2365 
2366 	/* Protect against a firmware bug where the queue/TID are off. */
2367 	if (qid != sc->first_agg_txq + cba->tid)
2368 		return;
2369 
2370 	ba = &ni->ni_tx_ba[cba->tid];
2371 	if (ba->ba_state != IEEE80211_BA_AGREED)
2372 		return;
2373 
2374 	/*
2375 	 * The first bit in cba->bitmap corresponds to the sequence number
2376 	 * stored in the sequence control field cba->seq.
2377 	 * Multiple BA notifications in a row may be using this number, with
2378 	 * additional bits being set in cba->bitmap. It is unclear how the
2379 	 * firmware decides to shift this window forward.
2380 	 * We rely on ba->ba_winstart instead.
2381 	 */
2382 	seq = le16toh(cba->seq) >> IEEE80211_SEQ_SEQ_SHIFT;
2383 
2384 	/*
2385 	 * The firmware's new BA window starting sequence number
2386 	 * corresponds to the first hole in cba->bitmap, implying
2387 	 * that all frames between 'seq' and 'ssn' (non-inclusive)
2388 	 * have been acked.
2389 	 */
2390 	ssn = le16toh(cba->ssn);
2391 
2392 	/* Skip rate control if our Tx rate is fixed. */
2393 	if (ic->ic_fixed_mcs == -1)
2394 		iwn_ampdu_rate_control(sc, ni, txq, cba->tid, ba->ba_winstart,
2395 		    ssn);
2396 
2397 	/*
2398 	 * SSN corresponds to the first (perhaps not yet transmitted) frame
2399 	 * in firmware's BA window. Firmware is not going to retransmit any
2400 	 * frames before its BA window so mark them all as done.
2401 	 */
2402 	if (SEQ_LT(ba->ba_winstart, ssn)) {
2403 		ieee80211_output_ba_move_window(ic, ni, cba->tid, ssn);
2404 		iwn_ampdu_txq_advance(sc, txq, qid,
2405 		    IWN_AGG_SSN_TO_TXQ_IDX(ssn));
2406 		iwn_clear_oactive(sc, txq);
2407 	}
2408 }
2409 
2410 /*
2411  * Process a CALIBRATION_RESULT notification sent by the initialization
2412  * firmware on response to a CMD_CALIB_CONFIG command (5000 only).
2413  */
2414 void
2415 iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2416     struct iwn_rx_data *data)
2417 {
2418 	struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1);
2419 	int len, idx = -1;
2420 
2421 	/* Runtime firmware should not send such a notification. */
2422 	if (sc->sc_flags & IWN_FLAG_CALIB_DONE)
2423 		return;
2424 
2425 	len = (letoh32(desc->len) & IWN_RX_DESC_LEN_MASK) - 4;
2426 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), len,
2427 	    BUS_DMASYNC_POSTREAD);
2428 
2429 	switch (calib->code) {
2430 	case IWN5000_PHY_CALIB_DC:
2431 		if (sc->hw_type == IWN_HW_REV_TYPE_5150 ||
2432 		    sc->hw_type == IWN_HW_REV_TYPE_2030 ||
2433 		    sc->hw_type == IWN_HW_REV_TYPE_2000 ||
2434 		    sc->hw_type == IWN_HW_REV_TYPE_135 ||
2435 		    sc->hw_type == IWN_HW_REV_TYPE_105)
2436 			idx = 0;
2437 		break;
2438 	case IWN5000_PHY_CALIB_LO:
2439 		idx = 1;
2440 		break;
2441 	case IWN5000_PHY_CALIB_TX_IQ:
2442 		idx = 2;
2443 		break;
2444 	case IWN5000_PHY_CALIB_TX_IQ_PERIODIC:
2445 		if (sc->hw_type < IWN_HW_REV_TYPE_6000 &&
2446 		    sc->hw_type != IWN_HW_REV_TYPE_5150)
2447 			idx = 3;
2448 		break;
2449 	case IWN5000_PHY_CALIB_BASE_BAND:
2450 		idx = 4;
2451 		break;
2452 	}
2453 	if (idx == -1)	/* Ignore other results. */
2454 		return;
2455 
2456 	/* Save calibration result. */
2457 	if (sc->calibcmd[idx].buf != NULL)
2458 		free(sc->calibcmd[idx].buf, M_DEVBUF, 0);
2459 	sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT);
2460 	if (sc->calibcmd[idx].buf == NULL) {
2461 		DPRINTF(("not enough memory for calibration result %d\n",
2462 		    calib->code));
2463 		return;
2464 	}
2465 	DPRINTF(("saving calibration result code=%d len=%d\n",
2466 	    calib->code, len));
2467 	sc->calibcmd[idx].len = len;
2468 	memcpy(sc->calibcmd[idx].buf, calib, len);
2469 }
2470 
2471 /*
2472  * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification.
2473  * The latter is sent by the firmware after each received beacon.
2474  */
2475 void
2476 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2477     struct iwn_rx_data *data)
2478 {
2479 	struct iwn_ops *ops = &sc->ops;
2480 	struct ieee80211com *ic = &sc->sc_ic;
2481 	struct iwn_calib_state *calib = &sc->calib;
2482 	struct iwn_stats *stats = (struct iwn_stats *)(desc + 1);
2483 	int temp;
2484 
2485 	/* Ignore statistics received during a scan. */
2486 	if (ic->ic_state != IEEE80211_S_RUN)
2487 		return;
2488 
2489 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2490 	    sizeof (*stats), BUS_DMASYNC_POSTREAD);
2491 
2492 	DPRINTFN(3, ("received statistics (cmd=%d)\n", desc->type));
2493 	sc->calib_cnt = 0;	/* Reset TX power calibration timeout. */
2494 
2495 	/* Test if temperature has changed. */
2496 	if (stats->general.temp != sc->rawtemp) {
2497 		/* Convert "raw" temperature to degC. */
2498 		sc->rawtemp = stats->general.temp;
2499 		temp = ops->get_temperature(sc);
2500 		DPRINTFN(2, ("temperature=%dC\n", temp));
2501 
2502 		/* Update TX power if need be (4965AGN only). */
2503 		if (sc->hw_type == IWN_HW_REV_TYPE_4965)
2504 			iwn4965_power_calibration(sc, temp);
2505 	}
2506 
2507 	if (desc->type != IWN_BEACON_STATISTICS)
2508 		return;	/* Reply to a statistics request. */
2509 
2510 	sc->noise = iwn_get_noise(&stats->rx.general);
2511 
2512 	/* Test that RSSI and noise are present in stats report. */
2513 	if (sc->noise == -127)
2514 		return;
2515 
2516 	if (letoh32(stats->rx.general.flags) != 1) {
2517 		DPRINTF(("received statistics without RSSI\n"));
2518 		return;
2519 	}
2520 
2521 	/*
2522 	 * XXX Differential gain calibration makes the 6005 firmware
2523 	 * crap out, so skip it for now.  This effectively disables
2524 	 * sensitivity tuning as well.
2525 	 */
2526 	if (sc->hw_type == IWN_HW_REV_TYPE_6005)
2527 		return;
2528 
2529 	if (calib->state == IWN_CALIB_STATE_ASSOC)
2530 		iwn_collect_noise(sc, &stats->rx.general);
2531 	else if (calib->state == IWN_CALIB_STATE_RUN)
2532 		iwn_tune_sensitivity(sc, &stats->rx);
2533 }
2534 
2535 void
2536 iwn_ampdu_txq_advance(struct iwn_softc *sc, struct iwn_tx_ring *txq, int qid,
2537     int idx)
2538 {
2539 	struct iwn_ops *ops = &sc->ops;
2540 
2541 	DPRINTFN(3, ("%s: txq->cur=%d txq->read=%d txq->queued=%d qid=%d "
2542 	    "idx=%d\n", __func__, txq->cur, txq->read, txq->queued, qid, idx));
2543 
2544 	while (txq->read != idx) {
2545 		struct iwn_tx_data *txdata = &txq->data[txq->read];
2546 		if (txdata->m != NULL) {
2547 			ops->reset_sched(sc, qid, txq->read);
2548 			iwn_tx_done_free_txdata(sc, txdata);
2549 			txq->queued--;
2550 		}
2551 		txq->read = (txq->read + 1) % IWN_TX_RING_COUNT;
2552 	}
2553 }
2554 
2555 /*
2556  * Handle A-MPDU Tx queue status report.
2557  * Tx failures come as single frames (perhaps out of order), and before failing
2558  * an A-MPDU subframe the firmware transmits it as a single frame at least once
2559  * and reports Tx success/failure here. Frames successfully transmitted in an
2560  * A-MPDU are completed when a compressed block ack notification is received.
2561  */
2562 void
2563 iwn_ampdu_tx_done(struct iwn_softc *sc, struct iwn_tx_ring *txq,
2564     struct iwn_rx_desc *desc, uint16_t status, uint8_t ackfailcnt,
2565     uint8_t rate, uint8_t rflags, int nframes, uint32_t ssn,
2566     struct iwn_txagg_status *agg_status)
2567 {
2568 	struct ieee80211com *ic = &sc->sc_ic;
2569 	int tid = desc->qid - sc->first_agg_txq;
2570 	struct iwn_tx_data *txdata = &txq->data[desc->idx];
2571 	struct ieee80211_node *ni = txdata->ni;
2572 	struct iwn_node *wn = (void *)ni;
2573 	int txfail = (status != IWN_TX_STATUS_SUCCESS &&
2574 	    status != IWN_TX_STATUS_DIRECT_DONE);
2575 	struct ieee80211_tx_ba *ba;
2576 	uint16_t seq;
2577 
2578 	sc->sc_tx_timer = 0;
2579 
2580 	if (ic->ic_state != IEEE80211_S_RUN)
2581 		return;
2582 
2583 	if (nframes > 1) {
2584 		int ampdu_id, have_ampdu_id = 0, ampdu_size = 0;
2585 		int i;
2586 
2587 		/* Compute the size of this A-MPDU. */
2588 		for (i = 0; i < nframes; i++) {
2589 			uint8_t qid = agg_status[i].qid;
2590 			uint8_t idx = agg_status[i].idx;
2591 
2592 			if (qid != desc->qid)
2593 				continue;
2594 
2595 			txdata = &txq->data[idx];
2596 			if (txdata->ni == NULL)
2597 				continue;
2598 
2599 			ampdu_size += txdata->totlen + IEEE80211_CRC_LEN;
2600 		}
2601 
2602 		/*
2603 		 * For each subframe collect Tx status, retries, and Tx rate.
2604 		 * (The Tx rate is the same for all subframes in this batch.)
2605 		 */
2606 		for (i = 0; i < nframes; i++) {
2607 			uint8_t qid = agg_status[i].qid;
2608 			uint8_t idx = agg_status[i].idx;
2609 			uint16_t txstatus = (le16toh(agg_status[i].status) &
2610 			    IWN_AGG_TX_STATUS_MASK);
2611 			uint16_t trycnt = (le16toh(agg_status[i].status) &
2612 			    IWN_AGG_TX_TRY) >> IWN_AGG_TX_TRY_SHIFT;
2613 
2614 			if (qid != desc->qid)
2615 				continue;
2616 
2617 			txdata = &txq->data[idx];
2618 			if (txdata->ni == NULL)
2619 				continue;
2620 
2621 			if (rflags & IWN_RFLAG_MCS)
2622 				txdata->ampdu_txmcs = rate;
2623 			if (txstatus != IWN_AGG_TX_STATE_TRANSMITTED)
2624 				txdata->txfail++;
2625 			if (trycnt > 1)
2626 				txdata->retries++;
2627 
2628 			/*
2629 			 * Assign a common ID to all subframes of this A-MPDU.
2630 			 * This ID will be used during Tx rate control to
2631 			 * infer the ACK status of individual subframes.
2632 			 */
2633 			if (!have_ampdu_id) {
2634 				wn = (void *)txdata->ni;
2635 				ampdu_id = wn->next_ampdu_id++;
2636 				have_ampdu_id = 1;
2637 			}
2638 			txdata->ampdu_id = ampdu_id;
2639 
2640 			/*
2641 			 * We will also need to know the total number of
2642 			 * subframes and the size of this A-MPDU. We store
2643 			 * this redundantly on each subframe because firmware
2644 			 * only reports acknowledged subframes via compressed
2645 			 * block-ack notification. This way we will know what
2646 			 * the total number of subframes and size were even if
2647 			 * just one of these subframes gets acknowledged.
2648 			 */
2649 			txdata->ampdu_nframes = nframes;
2650 			txdata->ampdu_size = ampdu_size;
2651 		}
2652 		return;
2653 	}
2654 
2655 	if (ni == NULL)
2656 		return;
2657 
2658 	ba = &ni->ni_tx_ba[tid];
2659 	if (ba->ba_state != IEEE80211_BA_AGREED)
2660 		return;
2661 
2662 	/* This was a final single-frame Tx attempt for frame SSN-1. */
2663 	seq = (ssn - 1) & 0xfff;
2664 
2665 	/*
2666 	 * Skip rate control if our Tx rate is fixed.
2667 	 * Don't report frames to MiRA which were sent at a different
2668 	 * Tx rate than ni->ni_txmcs.
2669 	 */
2670 	if (ic->ic_fixed_mcs == -1 && txdata->txmcs == ni->ni_txmcs) {
2671 		wn->mn.frames++;
2672 		wn->mn.agglen = 1;
2673 		wn->mn.ampdu_size = txdata->totlen + IEEE80211_CRC_LEN;
2674 		if (ackfailcnt > 0)
2675 			wn->mn.retries++;
2676 		if (txfail)
2677 			wn->mn.txfail++;
2678 		iwn_mira_choose(sc, ni);
2679 	}
2680 
2681 	if (txfail)
2682 		ieee80211_tx_compressed_bar(ic, ni, tid, ssn);
2683 	else if (!SEQ_LT(seq, ba->ba_winstart)) {
2684 		/*
2685 		 * Move window forward if SEQ lies beyond end of window,
2686 		 * otherwise we can't record the ACK for this frame.
2687 		 * Non-acked frames which left holes in the bitmap near
2688 		 * the beginning of the window must be discarded.
2689 		 */
2690 		uint16_t s = seq;
2691 		while (SEQ_LT(ba->ba_winend, s)) {
2692 			ieee80211_output_ba_move_window(ic, ni, tid, s);
2693 			iwn_ampdu_txq_advance(sc, txq, desc->qid,
2694 			    IWN_AGG_SSN_TO_TXQ_IDX(s));
2695 			s = (s + 1) % 0xfff;
2696 		}
2697 		/* SEQ should now be within window; set corresponding bit. */
2698 		ieee80211_output_ba_record_ack(ic, ni, tid, seq);
2699 	}
2700 
2701 	/* Move window forward up to the first hole in the bitmap. */
2702 	ieee80211_output_ba_move_window_to_first_unacked(ic, ni, tid, ssn);
2703 	iwn_ampdu_txq_advance(sc, txq, desc->qid,
2704 	    IWN_AGG_SSN_TO_TXQ_IDX(ba->ba_winstart));
2705 
2706 	iwn_clear_oactive(sc, txq);
2707 }
2708 
2709 /*
2710  * Process a TX_DONE firmware notification.  Unfortunately, the 4965AGN
2711  * and 5000 adapters have different incompatible TX status formats.
2712  */
2713 void
2714 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2715     struct iwn_rx_data *data)
2716 {
2717 	struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1);
2718 	struct iwn_tx_ring *ring;
2719 	size_t len = (letoh32(desc->len) & IWN_RX_DESC_LEN_MASK);
2720 	uint16_t status = letoh32(stat->stat.status) & 0xff;
2721 	uint32_t ssn;
2722 
2723 	if (desc->qid > IWN4965_NTXQUEUES)
2724 		return;
2725 
2726 	ring = &sc->txq[desc->qid];
2727 
2728 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2729 	    len, BUS_DMASYNC_POSTREAD);
2730 
2731 	/* Sanity checks. */
2732 	if (sizeof(*stat) > len)
2733 		return;
2734 	if (stat->nframes < 1 || stat->nframes > IWN_AMPDU_MAX)
2735 		return;
2736 	if (desc->qid < sc->first_agg_txq && stat->nframes > 1)
2737 		return;
2738 	if (desc->qid >= sc->first_agg_txq && sizeof(*stat) + sizeof(ssn) +
2739 	    stat->nframes * sizeof(stat->stat) > len)
2740 		return;
2741 
2742 	if (desc->qid < sc->first_agg_txq) {
2743 		/* XXX 4965 does not report byte count */
2744 		struct iwn_tx_data *txdata = &ring->data[desc->idx];
2745 		uint16_t framelen = txdata->totlen + IEEE80211_CRC_LEN;
2746 		int txfail = (status != IWN_TX_STATUS_SUCCESS &&
2747 		    status != IWN_TX_STATUS_DIRECT_DONE);
2748 
2749 		iwn_tx_done(sc, desc, stat->ackfailcnt, stat->rate, txfail,
2750 		    desc->qid, framelen);
2751 	} else {
2752 		memcpy(&ssn, &stat->stat.status + stat->nframes, sizeof(ssn));
2753 		ssn = le32toh(ssn) & 0xfff;
2754 		iwn_ampdu_tx_done(sc, ring, desc, status, stat->ackfailcnt,
2755 		    stat->rate, stat->rflags, stat->nframes, ssn,
2756 		    stat->stat.agg_status);
2757 	}
2758 }
2759 
2760 void
2761 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2762     struct iwn_rx_data *data)
2763 {
2764 	struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1);
2765 	struct iwn_tx_ring *ring;
2766 	size_t len = (letoh32(desc->len) & IWN_RX_DESC_LEN_MASK);
2767 	uint16_t status = letoh32(stat->stat.status) & 0xff;
2768 	uint32_t ssn;
2769 
2770 	if (desc->qid > IWN5000_NTXQUEUES)
2771 		return;
2772 
2773 	ring = &sc->txq[desc->qid];
2774 
2775 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2776 	    sizeof (*stat), BUS_DMASYNC_POSTREAD);
2777 
2778 	/* Sanity checks. */
2779 	if (sizeof(*stat) > len)
2780 		return;
2781 	if (stat->nframes < 1 || stat->nframes > IWN_AMPDU_MAX)
2782 		return;
2783 	if (desc->qid < sc->first_agg_txq && stat->nframes > 1)
2784 		return;
2785 	if (desc->qid >= sc->first_agg_txq && sizeof(*stat) + sizeof(ssn) +
2786 	    stat->nframes * sizeof(stat->stat) > len)
2787 		return;
2788 
2789 	/* If this was not an aggregated frame, complete it now. */
2790 	if (desc->qid < sc->first_agg_txq) {
2791 		int txfail = (status != IWN_TX_STATUS_SUCCESS &&
2792 		    status != IWN_TX_STATUS_DIRECT_DONE);
2793 
2794 		/* Reset TX scheduler slot. */
2795 		iwn5000_reset_sched(sc, desc->qid, desc->idx);
2796 
2797 		iwn_tx_done(sc, desc, stat->ackfailcnt, stat->rate, txfail,
2798 		    desc->qid, letoh16(stat->len));
2799 	} else {
2800 		memcpy(&ssn, &stat->stat.status + stat->nframes, sizeof(ssn));
2801 		ssn = le32toh(ssn) & 0xfff;
2802 		iwn_ampdu_tx_done(sc, ring, desc, status, stat->ackfailcnt,
2803 		    stat->rate, stat->rflags, stat->nframes, ssn,
2804 		    stat->stat.agg_status);
2805 	}
2806 }
2807 
2808 void
2809 iwn_tx_done_free_txdata(struct iwn_softc *sc, struct iwn_tx_data *data)
2810 {
2811 	struct ieee80211com *ic = &sc->sc_ic;
2812 
2813 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
2814 	    BUS_DMASYNC_POSTWRITE);
2815 	bus_dmamap_unload(sc->sc_dmat, data->map);
2816 	m_freem(data->m);
2817 	data->m = NULL;
2818 	ieee80211_release_node(ic, data->ni);
2819 	data->ni = NULL;
2820 	data->totlen = 0;
2821 	data->retries = 0;
2822 	data->txfail = 0;
2823 	data->txmcs = 0;
2824 	data->ampdu_txmcs = 0;
2825 	data->txrate = 0;
2826 }
2827 
2828 void
2829 iwn_clear_oactive(struct iwn_softc *sc, struct iwn_tx_ring *ring)
2830 {
2831 	struct ieee80211com *ic = &sc->sc_ic;
2832 	struct ifnet *ifp = &ic->ic_if;
2833 
2834 	if (ring->queued < IWN_TX_RING_LOMARK) {
2835 		sc->qfullmsk &= ~(1 << ring->qid);
2836 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
2837 			ifq_clr_oactive(&ifp->if_snd);
2838 			(*ifp->if_start)(ifp);
2839 		}
2840 	}
2841 }
2842 
2843 /*
2844  * Adapter-independent backend for TX_DONE firmware notifications.
2845  * This handles Tx status for non-aggregation queues.
2846  */
2847 void
2848 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2849     uint8_t ackfailcnt, uint8_t rate, int txfail, int qid, uint16_t len)
2850 {
2851 	struct ieee80211com *ic = &sc->sc_ic;
2852 	struct ifnet *ifp = &ic->ic_if;
2853 	struct iwn_tx_ring *ring = &sc->txq[qid];
2854 	struct iwn_tx_data *data = &ring->data[desc->idx];
2855 	struct iwn_node *wn = (void *)data->ni;
2856 
2857 	if (data->ni == NULL)
2858 		return;
2859 
2860 	if (data->ni->ni_flags & IEEE80211_NODE_HT) {
2861 		if (ic->ic_state == IEEE80211_S_RUN &&
2862 		    ic->ic_fixed_mcs == -1 &&
2863 		    data->txmcs == data->ni->ni_txmcs) {
2864 			wn->mn.frames++;
2865 			wn->mn.ampdu_size = len;
2866 			wn->mn.agglen = 1;
2867 			if (ackfailcnt > 0)
2868 				wn->mn.retries++;
2869 			if (txfail)
2870 				wn->mn.txfail++;
2871 			iwn_mira_choose(sc, data->ni);
2872 		}
2873 	} else if (data->txrate == data->ni->ni_txrate) {
2874 		wn->amn.amn_txcnt++;
2875 		if (ackfailcnt > 0)
2876 			wn->amn.amn_retrycnt++;
2877 		if (txfail)
2878 			wn->amn.amn_retrycnt++;
2879 	}
2880 	if (txfail)
2881 		ifp->if_oerrors++;
2882 
2883 	iwn_tx_done_free_txdata(sc, data);
2884 
2885 	sc->sc_tx_timer = 0;
2886 	ring->queued--;
2887 	iwn_clear_oactive(sc, ring);
2888 }
2889 
2890 /*
2891  * Process a "command done" firmware notification.  This is where we wakeup
2892  * processes waiting for a synchronous command completion.
2893  */
2894 void
2895 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc)
2896 {
2897 	struct iwn_tx_ring *ring = &sc->txq[4];
2898 	struct iwn_tx_data *data;
2899 
2900 	if ((desc->qid & 0xf) != 4)
2901 		return;	/* Not a command ack. */
2902 
2903 	data = &ring->data[desc->idx];
2904 
2905 	/* If the command was mapped in an mbuf, free it. */
2906 	if (data->m != NULL) {
2907 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
2908 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2909 		bus_dmamap_unload(sc->sc_dmat, data->map);
2910 		m_freem(data->m);
2911 		data->m = NULL;
2912 	}
2913 	wakeup(&ring->desc[desc->idx]);
2914 }
2915 
2916 /*
2917  * Process an INT_FH_RX or INT_SW_RX interrupt.
2918  */
2919 void
2920 iwn_notif_intr(struct iwn_softc *sc)
2921 {
2922 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2923 	struct iwn_ops *ops = &sc->ops;
2924 	struct ieee80211com *ic = &sc->sc_ic;
2925 	struct ifnet *ifp = &ic->ic_if;
2926 	uint16_t hw;
2927 
2928 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
2929 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
2930 
2931 	hw = letoh16(sc->rxq.stat->closed_count) & 0xfff;
2932 	while (sc->rxq.cur != hw) {
2933 		struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur];
2934 		struct iwn_rx_desc *desc;
2935 
2936 		bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof (*desc),
2937 		    BUS_DMASYNC_POSTREAD);
2938 		desc = mtod(data->m, struct iwn_rx_desc *);
2939 
2940 		DPRINTFN(4, ("notification qid=%d idx=%d flags=%x type=%d\n",
2941 		    desc->qid & 0xf, desc->idx, desc->flags, desc->type));
2942 
2943 		if (!(desc->qid & 0x80))	/* Reply to a command. */
2944 			iwn_cmd_done(sc, desc);
2945 
2946 		switch (desc->type) {
2947 		case IWN_RX_PHY:
2948 			iwn_rx_phy(sc, desc, data);
2949 			break;
2950 
2951 		case IWN_RX_DONE:		/* 4965AGN only. */
2952 		case IWN_MPDU_RX_DONE:
2953 			/* An 802.11 frame has been received. */
2954 			iwn_rx_done(sc, desc, data, &ml);
2955 			break;
2956 		case IWN_RX_COMPRESSED_BA:
2957 			/* A Compressed BlockAck has been received. */
2958 			iwn_rx_compressed_ba(sc, desc, data);
2959 			break;
2960 		case IWN_TX_DONE:
2961 			/* An 802.11 frame has been transmitted. */
2962 			ops->tx_done(sc, desc, data);
2963 			break;
2964 
2965 		case IWN_RX_STATISTICS:
2966 		case IWN_BEACON_STATISTICS:
2967 			iwn_rx_statistics(sc, desc, data);
2968 			break;
2969 
2970 		case IWN_BEACON_MISSED:
2971 		{
2972 			struct iwn_beacon_missed *miss =
2973 			    (struct iwn_beacon_missed *)(desc + 1);
2974 			uint32_t missed;
2975 
2976 			if ((ic->ic_opmode != IEEE80211_M_STA) ||
2977 			    (ic->ic_state != IEEE80211_S_RUN))
2978 				break;
2979 
2980 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2981 			    sizeof (*miss), BUS_DMASYNC_POSTREAD);
2982 			missed = letoh32(miss->consecutive);
2983 
2984 			/*
2985 			 * If more than 5 consecutive beacons are missed,
2986 			 * reinitialize the sensitivity state machine.
2987 			 */
2988 			if (missed > 5)
2989 				(void)iwn_init_sensitivity(sc);
2990 
2991 			/*
2992 			 * Rather than go directly to scan state, try to send a
2993 			 * directed probe request first. If that fails then the
2994 			 * state machine will drop us into scanning after timing
2995 			 * out waiting for a probe response.
2996 			 */
2997 			if (missed > ic->ic_bmissthres && !ic->ic_mgt_timer) {
2998 				if (ic->ic_if.if_flags & IFF_DEBUG)
2999 					printf("%s: receiving no beacons from "
3000 					    "%s; checking if this AP is still "
3001 					    "responding to probe requests\n",
3002 					    sc->sc_dev.dv_xname, ether_sprintf(
3003 					    ic->ic_bss->ni_macaddr));
3004 				IEEE80211_SEND_MGMT(ic, ic->ic_bss,
3005 				    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
3006 			}
3007 			break;
3008 		}
3009 		case IWN_UC_READY:
3010 		{
3011 			struct iwn_ucode_info *uc =
3012 			    (struct iwn_ucode_info *)(desc + 1);
3013 
3014 			/* The microcontroller is ready. */
3015 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
3016 			    sizeof (*uc), BUS_DMASYNC_POSTREAD);
3017 			DPRINTF(("microcode alive notification version=%d.%d "
3018 			    "subtype=%x alive=%x\n", uc->major, uc->minor,
3019 			    uc->subtype, letoh32(uc->valid)));
3020 
3021 			if (letoh32(uc->valid) != 1) {
3022 				printf("%s: microcontroller initialization "
3023 				    "failed\n", sc->sc_dev.dv_xname);
3024 				break;
3025 			}
3026 			if (uc->subtype == IWN_UCODE_INIT) {
3027 				/* Save microcontroller report. */
3028 				memcpy(&sc->ucode_info, uc, sizeof (*uc));
3029 			}
3030 			/* Save the address of the error log in SRAM. */
3031 			sc->errptr = letoh32(uc->errptr);
3032 			break;
3033 		}
3034 		case IWN_STATE_CHANGED:
3035 		{
3036 			uint32_t *status = (uint32_t *)(desc + 1);
3037 
3038 			/* Enabled/disabled notification. */
3039 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
3040 			    sizeof (*status), BUS_DMASYNC_POSTREAD);
3041 			DPRINTF(("state changed to %x\n", letoh32(*status)));
3042 
3043 			if (letoh32(*status) & 1) {
3044 				/* Radio transmitter is off, power down. */
3045 				iwn_stop(ifp);
3046 				return;	/* No further processing. */
3047 			}
3048 			break;
3049 		}
3050 		case IWN_START_SCAN:
3051 		{
3052 			struct iwn_start_scan *scan =
3053 			    (struct iwn_start_scan *)(desc + 1);
3054 
3055 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
3056 			    sizeof (*scan), BUS_DMASYNC_POSTREAD);
3057 			DPRINTFN(2, ("scan start: chan %d status %x\n",
3058 			    scan->chan, letoh32(scan->status)));
3059 
3060 			if (sc->sc_flags & IWN_FLAG_BGSCAN)
3061 				break;
3062 
3063 			/* Fix current channel. */
3064 			ic->ic_bss->ni_chan = &ic->ic_channels[scan->chan];
3065 			break;
3066 		}
3067 		case IWN_STOP_SCAN:
3068 		{
3069 			struct iwn_stop_scan *scan =
3070 			    (struct iwn_stop_scan *)(desc + 1);
3071 
3072 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
3073 			    sizeof (*scan), BUS_DMASYNC_POSTREAD);
3074 			DPRINTFN(2, ("scan stop: nchan=%d status=%d chan=%d\n",
3075 			    scan->nchan, scan->status, scan->chan));
3076 
3077 			if (scan->status == 1 && scan->chan <= 14 &&
3078 			    (sc->sc_flags & IWN_FLAG_HAS_5GHZ)) {
3079 			    	int error;
3080 				/*
3081 				 * We just finished scanning 2GHz channels,
3082 				 * start scanning 5GHz ones.
3083 				 */
3084 				error = iwn_scan(sc, IEEE80211_CHAN_5GHZ,
3085 				    (sc->sc_flags & IWN_FLAG_BGSCAN) ? 1 : 0);
3086 				if (error == 0)
3087 					break;
3088 			}
3089 			sc->sc_flags &= ~IWN_FLAG_SCANNING;
3090 			sc->sc_flags &= ~IWN_FLAG_BGSCAN;
3091 			ieee80211_end_scan(ifp);
3092 			break;
3093 		}
3094 		case IWN5000_CALIBRATION_RESULT:
3095 			iwn5000_rx_calib_results(sc, desc, data);
3096 			break;
3097 
3098 		case IWN5000_CALIBRATION_DONE:
3099 			sc->sc_flags |= IWN_FLAG_CALIB_DONE;
3100 			wakeup(sc);
3101 			break;
3102 		}
3103 
3104 		sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT;
3105 	}
3106 	if_input(&sc->sc_ic.ic_if, &ml);
3107 
3108 	/* Tell the firmware what we have processed. */
3109 	hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1;
3110 	IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7);
3111 }
3112 
3113 /*
3114  * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up
3115  * from power-down sleep mode.
3116  */
3117 void
3118 iwn_wakeup_intr(struct iwn_softc *sc)
3119 {
3120 	int qid;
3121 
3122 	DPRINTF(("ucode wakeup from power-down sleep\n"));
3123 
3124 	/* Wakeup RX and TX rings. */
3125 	IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7);
3126 	for (qid = 0; qid < sc->ntxqs; qid++) {
3127 		struct iwn_tx_ring *ring = &sc->txq[qid];
3128 		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur);
3129 	}
3130 }
3131 
3132 #ifdef IWN_DEBUG
3133 /*
3134  * Dump the error log of the firmware when a firmware panic occurs.  Although
3135  * we can't debug the firmware because it is neither open source nor free, it
3136  * can help us to identify certain classes of problems.
3137  */
3138 void
3139 iwn_fatal_intr(struct iwn_softc *sc)
3140 {
3141 	struct iwn_fw_dump dump;
3142 	int i;
3143 
3144 	/* Check that the error log address is valid. */
3145 	if (sc->errptr < IWN_FW_DATA_BASE ||
3146 	    sc->errptr + sizeof (dump) >
3147 	    IWN_FW_DATA_BASE + sc->fw_data_maxsz) {
3148 		printf("%s: bad firmware error log address 0x%08x\n",
3149 		    sc->sc_dev.dv_xname, sc->errptr);
3150 		return;
3151 	}
3152 	if (iwn_nic_lock(sc) != 0) {
3153 		printf("%s: could not read firmware error log\n",
3154 		    sc->sc_dev.dv_xname);
3155 		return;
3156 	}
3157 	/* Read firmware error log from SRAM. */
3158 	iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump,
3159 	    sizeof (dump) / sizeof (uint32_t));
3160 	iwn_nic_unlock(sc);
3161 
3162 	if (dump.valid == 0) {
3163 		printf("%s: firmware error log is empty\n",
3164 		    sc->sc_dev.dv_xname);
3165 		return;
3166 	}
3167 	printf("firmware error log:\n");
3168 	printf("  error type      = \"%s\" (0x%08X)\n",
3169 	    (dump.id < nitems(iwn_fw_errmsg)) ?
3170 		iwn_fw_errmsg[dump.id] : "UNKNOWN",
3171 	    dump.id);
3172 	printf("  program counter = 0x%08X\n", dump.pc);
3173 	printf("  source line     = 0x%08X\n", dump.src_line);
3174 	printf("  error data      = 0x%08X%08X\n",
3175 	    dump.error_data[0], dump.error_data[1]);
3176 	printf("  branch link     = 0x%08X%08X\n",
3177 	    dump.branch_link[0], dump.branch_link[1]);
3178 	printf("  interrupt link  = 0x%08X%08X\n",
3179 	    dump.interrupt_link[0], dump.interrupt_link[1]);
3180 	printf("  time            = %u\n", dump.time[0]);
3181 
3182 	/* Dump driver status (TX and RX rings) while we're here. */
3183 	printf("driver status:\n");
3184 	for (i = 0; i < sc->ntxqs; i++) {
3185 		struct iwn_tx_ring *ring = &sc->txq[i];
3186 		printf("  tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n",
3187 		    i, ring->qid, ring->cur, ring->queued);
3188 	}
3189 	printf("  rx ring: cur=%d\n", sc->rxq.cur);
3190 	printf("  802.11 state %d\n", sc->sc_ic.ic_state);
3191 }
3192 #endif
3193 
3194 int
3195 iwn_intr(void *arg)
3196 {
3197 	struct iwn_softc *sc = arg;
3198 	struct ifnet *ifp = &sc->sc_ic.ic_if;
3199 	uint32_t r1, r2, tmp;
3200 
3201 	/* Disable interrupts. */
3202 	IWN_WRITE(sc, IWN_INT_MASK, 0);
3203 
3204 	/* Read interrupts from ICT (fast) or from registers (slow). */
3205 	if (sc->sc_flags & IWN_FLAG_USE_ICT) {
3206 		tmp = 0;
3207 		while (sc->ict[sc->ict_cur] != 0) {
3208 			tmp |= sc->ict[sc->ict_cur];
3209 			sc->ict[sc->ict_cur] = 0;	/* Acknowledge. */
3210 			sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT;
3211 		}
3212 		tmp = letoh32(tmp);
3213 		if (tmp == 0xffffffff)	/* Shouldn't happen. */
3214 			tmp = 0;
3215 		else if (tmp & 0xc0000)	/* Workaround a HW bug. */
3216 			tmp |= 0x8000;
3217 		r1 = (tmp & 0xff00) << 16 | (tmp & 0xff);
3218 		r2 = 0;	/* Unused. */
3219 	} else {
3220 		r1 = IWN_READ(sc, IWN_INT);
3221 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
3222 			return 0;	/* Hardware gone! */
3223 		r2 = IWN_READ(sc, IWN_FH_INT);
3224 	}
3225 	if (r1 == 0 && r2 == 0) {
3226 		if (ifp->if_flags & IFF_UP)
3227 			IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
3228 		return 0;	/* Interrupt not for us. */
3229 	}
3230 
3231 	/* Acknowledge interrupts. */
3232 	IWN_WRITE(sc, IWN_INT, r1);
3233 	if (!(sc->sc_flags & IWN_FLAG_USE_ICT))
3234 		IWN_WRITE(sc, IWN_FH_INT, r2);
3235 
3236 	if (r1 & IWN_INT_RF_TOGGLED) {
3237 		tmp = IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL;
3238 		printf("%s: RF switch: radio %s\n", sc->sc_dev.dv_xname,
3239 		    tmp ? "enabled" : "disabled");
3240 		if (tmp)
3241 			task_add(systq, &sc->init_task);
3242 	}
3243 	if (r1 & IWN_INT_CT_REACHED) {
3244 		printf("%s: critical temperature reached!\n",
3245 		    sc->sc_dev.dv_xname);
3246 	}
3247 	if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) {
3248 		printf("%s: fatal firmware error\n", sc->sc_dev.dv_xname);
3249 
3250 		/* Force a complete recalibration on next init. */
3251 		sc->sc_flags &= ~IWN_FLAG_CALIB_DONE;
3252 
3253 		/* Dump firmware error log and stop. */
3254 #ifdef IWN_DEBUG
3255 		iwn_fatal_intr(sc);
3256 #endif
3257 		iwn_stop(ifp);
3258 		task_add(systq, &sc->init_task);
3259 		return 1;
3260 	}
3261 	if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) ||
3262 	    (r2 & IWN_FH_INT_RX)) {
3263 		if (sc->sc_flags & IWN_FLAG_USE_ICT) {
3264 			if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX))
3265 				IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX);
3266 			IWN_WRITE_1(sc, IWN_INT_PERIODIC,
3267 			    IWN_INT_PERIODIC_DIS);
3268 			iwn_notif_intr(sc);
3269 			if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) {
3270 				IWN_WRITE_1(sc, IWN_INT_PERIODIC,
3271 				    IWN_INT_PERIODIC_ENA);
3272 			}
3273 		} else
3274 			iwn_notif_intr(sc);
3275 	}
3276 
3277 	if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) {
3278 		if (sc->sc_flags & IWN_FLAG_USE_ICT)
3279 			IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX);
3280 		wakeup(sc);	/* FH DMA transfer completed. */
3281 	}
3282 
3283 	if (r1 & IWN_INT_ALIVE)
3284 		wakeup(sc);	/* Firmware is alive. */
3285 
3286 	if (r1 & IWN_INT_WAKEUP)
3287 		iwn_wakeup_intr(sc);
3288 
3289 	/* Re-enable interrupts. */
3290 	if (ifp->if_flags & IFF_UP)
3291 		IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
3292 
3293 	return 1;
3294 }
3295 
3296 /*
3297  * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and
3298  * 5000 adapters use a slightly different format).
3299  */
3300 void
3301 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
3302     uint16_t len)
3303 {
3304 	uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx];
3305 
3306 	*w = htole16(len + 8);
3307 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
3308 	    (caddr_t)w - sc->sched_dma.vaddr, sizeof (uint16_t),
3309 	    BUS_DMASYNC_PREWRITE);
3310 	if (idx < IWN_SCHED_WINSZ) {
3311 		*(w + IWN_TX_RING_COUNT) = *w;
3312 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
3313 		    (caddr_t)(w + IWN_TX_RING_COUNT) - sc->sched_dma.vaddr,
3314 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
3315 	}
3316 }
3317 
3318 void
3319 iwn4965_reset_sched(struct iwn_softc *sc, int qid, int idx)
3320 {
3321 	/* TBD */
3322 }
3323 
3324 void
3325 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
3326     uint16_t len)
3327 {
3328 	uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
3329 
3330 	*w = htole16(id << 12 | (len + 8));
3331 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
3332 	    (caddr_t)w - sc->sched_dma.vaddr, sizeof (uint16_t),
3333 	    BUS_DMASYNC_PREWRITE);
3334 	if (idx < IWN_SCHED_WINSZ) {
3335 		*(w + IWN_TX_RING_COUNT) = *w;
3336 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
3337 		    (caddr_t)(w + IWN_TX_RING_COUNT) - sc->sched_dma.vaddr,
3338 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
3339 	}
3340 }
3341 
3342 void
3343 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx)
3344 {
3345 	uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
3346 
3347 	*w = (*w & htole16(0xf000)) | htole16(1);
3348 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
3349 	    (caddr_t)w - sc->sched_dma.vaddr, sizeof (uint16_t),
3350 	    BUS_DMASYNC_PREWRITE);
3351 	if (idx < IWN_SCHED_WINSZ) {
3352 		*(w + IWN_TX_RING_COUNT) = *w;
3353 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
3354 		    (caddr_t)(w + IWN_TX_RING_COUNT) - sc->sched_dma.vaddr,
3355 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
3356 	}
3357 }
3358 
3359 int
3360 iwn_rval2ridx(int rval)
3361 {
3362 	int ridx;
3363 
3364 	for (ridx = 0; ridx < nitems(iwn_rates); ridx++) {
3365 		if (rval == iwn_rates[ridx].rate)
3366 			break;
3367 	}
3368 
3369 	return ridx;
3370 }
3371 
3372 int
3373 iwn_tx(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
3374 {
3375 	struct iwn_ops *ops = &sc->ops;
3376 	struct ieee80211com *ic = &sc->sc_ic;
3377 	struct iwn_node *wn = (void *)ni;
3378 	struct iwn_tx_ring *ring;
3379 	struct iwn_tx_desc *desc;
3380 	struct iwn_tx_data *data;
3381 	struct iwn_tx_cmd *cmd;
3382 	struct iwn_cmd_data *tx;
3383 	const struct iwn_rate *rinfo;
3384 	struct ieee80211_frame *wh;
3385 	struct ieee80211_key *k = NULL;
3386 	enum ieee80211_edca_ac ac;
3387 	int qid;
3388 	uint32_t flags;
3389 	uint16_t qos;
3390 	u_int hdrlen;
3391 	bus_dma_segment_t *seg;
3392 	uint8_t *ivp, tid, ridx, txant, type, subtype;
3393 	int i, totlen, hasqos, error, pad;
3394 
3395 	wh = mtod(m, struct ieee80211_frame *);
3396 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3397 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3398 	if (type == IEEE80211_FC0_TYPE_CTL)
3399 		hdrlen = sizeof(struct ieee80211_frame_min);
3400 	else
3401 		hdrlen = ieee80211_get_hdrlen(wh);
3402 
3403 	if ((hasqos = ieee80211_has_qos(wh))) {
3404 		/* Select EDCA Access Category and TX ring for this frame. */
3405 		struct ieee80211_tx_ba *ba;
3406 		qos = ieee80211_get_qos(wh);
3407 		tid = qos & IEEE80211_QOS_TID;
3408 		ac = ieee80211_up_to_ac(ic, tid);
3409 		qid = ac;
3410 
3411 		/* If possible, put this frame on an aggregation queue. */
3412 		if (sc->sc_tx_ba[tid].wn == wn) {
3413 			ba = &ni->ni_tx_ba[tid];
3414 			if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
3415 			    ba->ba_state == IEEE80211_BA_AGREED) {
3416 				qid = sc->first_agg_txq + tid;
3417 				if (sc->qfullmsk & (1 << qid)) {
3418 					m_freem(m);
3419 					return ENOBUFS;
3420 				}
3421 			}
3422 		}
3423 	} else {
3424 		qos = 0;
3425 		tid = IWN_NONQOS_TID;
3426 		ac = EDCA_AC_BE;
3427 		qid = ac;
3428 	}
3429 
3430 	ring = &sc->txq[qid];
3431 	desc = &ring->desc[ring->cur];
3432 	data = &ring->data[ring->cur];
3433 
3434 	/* Choose a TX rate index. */
3435 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3436 	    type != IEEE80211_FC0_TYPE_DATA)
3437 		ridx = iwn_rval2ridx(ieee80211_min_basic_rate(ic));
3438 	else if (ic->ic_fixed_mcs != -1)
3439 		ridx = sc->fixed_ridx;
3440 	else if (ic->ic_fixed_rate != -1)
3441 		ridx = sc->fixed_ridx;
3442 	else {
3443 		if (ni->ni_flags & IEEE80211_NODE_HT)
3444 			ridx = iwn_mcs2ridx[ni->ni_txmcs];
3445 		else
3446 			ridx = wn->ridx[ni->ni_txrate];
3447 	}
3448 	rinfo = &iwn_rates[ridx];
3449 #if NBPFILTER > 0
3450 	if (sc->sc_drvbpf != NULL) {
3451 		struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
3452 		uint16_t chan_flags;
3453 
3454 		tap->wt_flags = 0;
3455 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3456 		chan_flags = ni->ni_chan->ic_flags;
3457 		if (ic->ic_curmode != IEEE80211_MODE_11N)
3458 			chan_flags &= ~IEEE80211_CHAN_HT;
3459 		tap->wt_chan_flags = htole16(chan_flags);
3460 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
3461 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
3462 		    type == IEEE80211_FC0_TYPE_DATA) {
3463 			tap->wt_rate = (0x80 | ni->ni_txmcs);
3464 		} else
3465 			tap->wt_rate = rinfo->rate;
3466 		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
3467 		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
3468 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3469 
3470 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
3471 		    m, BPF_DIRECTION_OUT);
3472 	}
3473 #endif
3474 
3475 	totlen = m->m_pkthdr.len;
3476 
3477 	/* Encrypt the frame if need be. */
3478 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3479 		/* Retrieve key for TX. */
3480 		k = ieee80211_get_txkey(ic, wh, ni);
3481 		if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
3482 			/* Do software encryption. */
3483 			if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
3484 				return ENOBUFS;
3485 			/* 802.11 header may have moved. */
3486 			wh = mtod(m, struct ieee80211_frame *);
3487 			totlen = m->m_pkthdr.len;
3488 
3489 		} else	/* HW appends CCMP MIC. */
3490 			totlen += IEEE80211_CCMP_HDRLEN;
3491 	}
3492 
3493 	data->totlen = totlen;
3494 
3495 	/* Prepare TX firmware command. */
3496 	cmd = &ring->cmd[ring->cur];
3497 	cmd->code = IWN_CMD_TX_DATA;
3498 	cmd->flags = 0;
3499 	cmd->qid = ring->qid;
3500 	cmd->idx = ring->cur;
3501 
3502 	tx = (struct iwn_cmd_data *)cmd->data;
3503 	/* NB: No need to clear tx, all fields are reinitialized here. */
3504 	tx->scratch = 0;	/* clear "scratch" area */
3505 
3506 	flags = 0;
3507 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3508 		/* Unicast frame, check if an ACK is expected. */
3509 		if (!hasqos || (qos & IEEE80211_QOS_ACK_POLICY_MASK) !=
3510 		    IEEE80211_QOS_ACK_POLICY_NOACK)
3511 			flags |= IWN_TX_NEED_ACK;
3512 	}
3513 	if (type == IEEE80211_FC0_TYPE_CTL &&
3514 	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
3515 		struct ieee80211_frame_min *mwh;
3516 		uint8_t *barfrm;
3517 		uint16_t ctl;
3518 		mwh = mtod(m, struct ieee80211_frame_min *);
3519 		barfrm = (uint8_t *)&mwh[1];
3520 		ctl = LE_READ_2(barfrm);
3521 		tid = (ctl & IEEE80211_BA_TID_INFO_MASK) >>
3522 		    IEEE80211_BA_TID_INFO_SHIFT;
3523 		flags |= (IWN_TX_NEED_ACK | IWN_TX_IMM_BA);
3524 	}
3525 
3526 	if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
3527 		flags |= IWN_TX_MORE_FRAG;	/* Cannot happen yet. */
3528 
3529 	/* Check if frame must be protected using RTS/CTS or CTS-to-self. */
3530 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3531 		int rtsthres = ic->ic_rtsthreshold;
3532 		if (ni->ni_flags & IEEE80211_NODE_HT)
3533 			rtsthres = ieee80211_mira_get_rts_threshold(&wn->mn,
3534 			    ic, ni, totlen + IEEE80211_CRC_LEN);
3535 
3536 		/* NB: Group frames are sent using CCK in 802.11b/g/n (2GHz). */
3537 		if (totlen + IEEE80211_CRC_LEN > rtsthres) {
3538 			flags |= IWN_TX_NEED_RTS;
3539 		} else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
3540 		    ridx >= IWN_RIDX_OFDM6) {
3541 			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
3542 				flags |= IWN_TX_NEED_CTS;
3543 			else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
3544 				flags |= IWN_TX_NEED_RTS;
3545 		}
3546 
3547 		if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) {
3548 			if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3549 				/* 5000 autoselects RTS/CTS or CTS-to-self. */
3550 				flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS);
3551 				flags |= IWN_TX_NEED_PROTECTION;
3552 			} else
3553 				flags |= IWN_TX_FULL_TXOP;
3554 		}
3555 	}
3556 
3557 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3558 	    type != IEEE80211_FC0_TYPE_DATA)
3559 		tx->id = sc->broadcast_id;
3560 	else
3561 		tx->id = wn->id;
3562 
3563 	if (type == IEEE80211_FC0_TYPE_MGT) {
3564 #ifndef IEEE80211_STA_ONLY
3565 		/* Tell HW to set timestamp in probe responses. */
3566 		if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
3567 			flags |= IWN_TX_INSERT_TSTAMP;
3568 #endif
3569 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3570 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3571 			tx->timeout = htole16(3);
3572 		else
3573 			tx->timeout = htole16(2);
3574 	} else
3575 		tx->timeout = htole16(0);
3576 
3577 	if (hdrlen & 3) {
3578 		/* First segment length must be a multiple of 4. */
3579 		flags |= IWN_TX_NEED_PADDING;
3580 		pad = 4 - (hdrlen & 3);
3581 	} else
3582 		pad = 0;
3583 
3584 	tx->len = htole16(totlen);
3585 	tx->tid = tid;
3586 	tx->rts_ntries = 60;
3587 	tx->data_ntries = 15;
3588 	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
3589 
3590 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
3591 	    tx->id != sc->broadcast_id)
3592 		tx->plcp = rinfo->ht_plcp;
3593 	else
3594 		tx->plcp = rinfo->plcp;
3595 
3596 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
3597 	    tx->id != sc->broadcast_id) {
3598 		tx->rflags = rinfo->ht_flags;
3599 		if (ni->ni_htcaps & IEEE80211_HTCAP_SGI20)
3600 			tx->rflags |= IWN_RFLAG_SGI;
3601 	}
3602 	else
3603 		tx->rflags = rinfo->flags;
3604 	/*
3605 	 * Keep the Tx rate constant while mira is probing, or if this is
3606 	 * an aggregation queue in which case a fixed Tx rate works around
3607 	 * FIFO_UNDERRUN Tx errors.
3608 	 */
3609 	if (tx->id == sc->broadcast_id || ieee80211_mira_is_probing(&wn->mn) ||
3610 	    qid >= sc->first_agg_txq ||
3611 	    ic->ic_fixed_mcs != -1 || ic->ic_fixed_rate != -1) {
3612 		/* Group or management frame, or probing, or fixed Tx rate. */
3613 		tx->linkq = 0;
3614 		/* XXX Alternate between antenna A and B? */
3615 		txant = IWN_LSB(sc->txchainmask);
3616 		tx->rflags |= IWN_RFLAG_ANT(txant);
3617 	} else {
3618 		tx->linkq = 0; /* initial index into firmware LQ retry table */
3619 		flags |= IWN_TX_LINKQ;	/* enable multi-rate retry */
3620 	}
3621 	/* Set physical address of "scratch area". */
3622 	tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
3623 	tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
3624 
3625 	/* Copy 802.11 header in TX command. */
3626 	memcpy((uint8_t *)(tx + 1), wh, hdrlen);
3627 
3628 	if (k != NULL && k->k_cipher == IEEE80211_CIPHER_CCMP) {
3629 		/* Trim 802.11 header and prepend CCMP IV. */
3630 		m_adj(m, hdrlen - IEEE80211_CCMP_HDRLEN);
3631 		ivp = mtod(m, uint8_t *);
3632 		k->k_tsc++;
3633 		ivp[0] = k->k_tsc;
3634 		ivp[1] = k->k_tsc >> 8;
3635 		ivp[2] = 0;
3636 		ivp[3] = k->k_id << 6 | IEEE80211_WEP_EXTIV;
3637 		ivp[4] = k->k_tsc >> 16;
3638 		ivp[5] = k->k_tsc >> 24;
3639 		ivp[6] = k->k_tsc >> 32;
3640 		ivp[7] = k->k_tsc >> 40;
3641 
3642 		tx->security = IWN_CIPHER_CCMP;
3643 		if (qid >= sc->first_agg_txq)
3644 			flags |= IWN_TX_AMPDU_CCMP;
3645 		memcpy(tx->key, k->k_key, k->k_len);
3646 
3647 		/* TX scheduler includes CCMP MIC len w/5000 Series. */
3648 		if (sc->hw_type != IWN_HW_REV_TYPE_4965)
3649 			totlen += IEEE80211_CCMP_MICLEN;
3650 	} else {
3651 		/* Trim 802.11 header. */
3652 		m_adj(m, hdrlen);
3653 		tx->security = 0;
3654 	}
3655 	tx->flags = htole32(flags);
3656 
3657 	error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3658 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3659 	if (error != 0 && error != EFBIG) {
3660 		printf("%s: can't map mbuf (error %d)\n",
3661 		    sc->sc_dev.dv_xname, error);
3662 		m_freem(m);
3663 		return error;
3664 	}
3665 	if (error != 0) {
3666 		/* Too many DMA segments, linearize mbuf. */
3667 		if (m_defrag(m, M_DONTWAIT)) {
3668 			m_freem(m);
3669 			return ENOBUFS;
3670 		}
3671 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3672 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3673 		if (error != 0) {
3674 			printf("%s: can't map mbuf (error %d)\n",
3675 			    sc->sc_dev.dv_xname, error);
3676 			m_freem(m);
3677 			return error;
3678 		}
3679 	}
3680 
3681 	data->m = m;
3682 	data->ni = ni;
3683 	data->txmcs = ni->ni_txmcs;
3684 	data->txrate = ni->ni_txrate;
3685 	data->ampdu_txmcs = ni->ni_txmcs; /* updated upon Tx interrupt */
3686 
3687 	DPRINTFN(4, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n",
3688 	    ring->qid, ring->cur, m->m_pkthdr.len, data->map->dm_nsegs));
3689 
3690 	/* Fill TX descriptor. */
3691 	desc->nsegs = 1 + data->map->dm_nsegs;
3692 	/* First DMA segment is used by the TX command. */
3693 	desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
3694 	desc->segs[0].len  = htole16(IWN_HIADDR(data->cmd_paddr) |
3695 	    (4 + sizeof (*tx) + hdrlen + pad) << 4);
3696 	/* Other DMA segments are for data payload. */
3697 	seg = data->map->dm_segs;
3698 	for (i = 1; i <= data->map->dm_nsegs; i++) {
3699 		desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr));
3700 		desc->segs[i].len  = htole16(IWN_HIADDR(seg->ds_addr) |
3701 		    seg->ds_len << 4);
3702 		seg++;
3703 	}
3704 
3705 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
3706 	    BUS_DMASYNC_PREWRITE);
3707 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
3708 	    (caddr_t)cmd - ring->cmd_dma.vaddr, sizeof (*cmd),
3709 	    BUS_DMASYNC_PREWRITE);
3710 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3711 	    (caddr_t)desc - ring->desc_dma.vaddr, sizeof (*desc),
3712 	    BUS_DMASYNC_PREWRITE);
3713 
3714 	/* Update TX scheduler. */
3715 	ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
3716 
3717 	/* Kick TX ring. */
3718 	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3719 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3720 
3721 	/* Mark TX ring as full if we reach a certain threshold. */
3722 	if (++ring->queued > IWN_TX_RING_HIMARK)
3723 		sc->qfullmsk |= 1 << ring->qid;
3724 
3725 	return 0;
3726 }
3727 
3728 void
3729 iwn_start(struct ifnet *ifp)
3730 {
3731 	struct iwn_softc *sc = ifp->if_softc;
3732 	struct ieee80211com *ic = &sc->sc_ic;
3733 	struct ieee80211_node *ni;
3734 	struct mbuf *m;
3735 
3736 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
3737 		return;
3738 
3739 	for (;;) {
3740 		if (sc->qfullmsk != 0) {
3741 			ifq_set_oactive(&ifp->if_snd);
3742 			break;
3743 		}
3744 
3745 		/* Send pending management frames first. */
3746 		m = mq_dequeue(&ic->ic_mgtq);
3747 		if (m != NULL) {
3748 			ni = m->m_pkthdr.ph_cookie;
3749 			goto sendit;
3750 		}
3751 		if (ic->ic_state != IEEE80211_S_RUN ||
3752 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
3753 			break;
3754 
3755 		/* Encapsulate and send data frames. */
3756 		m = ifq_dequeue(&ifp->if_snd);
3757 		if (m == NULL)
3758 			break;
3759 #if NBPFILTER > 0
3760 		if (ifp->if_bpf != NULL)
3761 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
3762 #endif
3763 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL)
3764 			continue;
3765 sendit:
3766 #if NBPFILTER > 0
3767 		if (ic->ic_rawbpf != NULL)
3768 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
3769 #endif
3770 		if (iwn_tx(sc, m, ni) != 0) {
3771 			ieee80211_release_node(ic, ni);
3772 			ifp->if_oerrors++;
3773 			continue;
3774 		}
3775 
3776 		sc->sc_tx_timer = 5;
3777 		ifp->if_timer = 1;
3778 	}
3779 }
3780 
3781 void
3782 iwn_watchdog(struct ifnet *ifp)
3783 {
3784 	struct iwn_softc *sc = ifp->if_softc;
3785 
3786 	ifp->if_timer = 0;
3787 
3788 	if (sc->sc_tx_timer > 0) {
3789 		if (--sc->sc_tx_timer == 0) {
3790 			printf("%s: device timeout\n", sc->sc_dev.dv_xname);
3791 			iwn_stop(ifp);
3792 			ifp->if_oerrors++;
3793 			return;
3794 		}
3795 		ifp->if_timer = 1;
3796 	}
3797 
3798 	ieee80211_watchdog(ifp);
3799 }
3800 
3801 int
3802 iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
3803 {
3804 	struct iwn_softc *sc = ifp->if_softc;
3805 	struct ieee80211com *ic = &sc->sc_ic;
3806 	int s, error = 0;
3807 
3808 	error = rw_enter(&sc->sc_rwlock, RW_WRITE | RW_INTR);
3809 	if (error)
3810 		return error;
3811 	s = splnet();
3812 
3813 	switch (cmd) {
3814 	case SIOCSIFADDR:
3815 		ifp->if_flags |= IFF_UP;
3816 		/* FALLTHROUGH */
3817 	case SIOCSIFFLAGS:
3818 		if (ifp->if_flags & IFF_UP) {
3819 			if (!(ifp->if_flags & IFF_RUNNING))
3820 				error = iwn_init(ifp);
3821 		} else {
3822 			if (ifp->if_flags & IFF_RUNNING)
3823 				iwn_stop(ifp);
3824 		}
3825 		break;
3826 
3827 	case SIOCS80211POWER:
3828 		error = ieee80211_ioctl(ifp, cmd, data);
3829 		if (error != ENETRESET)
3830 			break;
3831 		if (ic->ic_state == IEEE80211_S_RUN &&
3832 		    sc->calib.state == IWN_CALIB_STATE_RUN) {
3833 			if (ic->ic_flags & IEEE80211_F_PMGTON)
3834 				error = iwn_set_pslevel(sc, 0, 3, 0);
3835 			else	/* back to CAM */
3836 				error = iwn_set_pslevel(sc, 0, 0, 0);
3837 		} else {
3838 			/* Defer until transition to IWN_CALIB_STATE_RUN. */
3839 			error = 0;
3840 		}
3841 		break;
3842 
3843 	default:
3844 		error = ieee80211_ioctl(ifp, cmd, data);
3845 	}
3846 
3847 	if (error == ENETRESET) {
3848 		error = 0;
3849 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
3850 		    (IFF_UP | IFF_RUNNING)) {
3851 			iwn_stop(ifp);
3852 			error = iwn_init(ifp);
3853 		}
3854 	}
3855 
3856 	splx(s);
3857 	rw_exit_write(&sc->sc_rwlock);
3858 	return error;
3859 }
3860 
3861 /*
3862  * Send a command to the firmware.
3863  */
3864 int
3865 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async)
3866 {
3867 	struct iwn_ops *ops = &sc->ops;
3868 	struct iwn_tx_ring *ring = &sc->txq[4];
3869 	struct iwn_tx_desc *desc;
3870 	struct iwn_tx_data *data;
3871 	struct iwn_tx_cmd *cmd;
3872 	struct mbuf *m;
3873 	bus_addr_t paddr;
3874 	int totlen, error;
3875 
3876 	desc = &ring->desc[ring->cur];
3877 	data = &ring->data[ring->cur];
3878 	totlen = 4 + size;
3879 
3880 	if (size > sizeof cmd->data) {
3881 		/* Command is too large to fit in a descriptor. */
3882 		if (totlen > MCLBYTES)
3883 			return EINVAL;
3884 		MGETHDR(m, M_DONTWAIT, MT_DATA);
3885 		if (m == NULL)
3886 			return ENOMEM;
3887 		if (totlen > MHLEN) {
3888 			MCLGET(m, M_DONTWAIT);
3889 			if (!(m->m_flags & M_EXT)) {
3890 				m_freem(m);
3891 				return ENOMEM;
3892 			}
3893 		}
3894 		cmd = mtod(m, struct iwn_tx_cmd *);
3895 		error = bus_dmamap_load(sc->sc_dmat, data->map, cmd, totlen,
3896 		    NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3897 		if (error != 0) {
3898 			m_freem(m);
3899 			return error;
3900 		}
3901 		data->m = m;
3902 		paddr = data->map->dm_segs[0].ds_addr;
3903 	} else {
3904 		cmd = &ring->cmd[ring->cur];
3905 		paddr = data->cmd_paddr;
3906 	}
3907 
3908 	cmd->code = code;
3909 	cmd->flags = 0;
3910 	cmd->qid = ring->qid;
3911 	cmd->idx = ring->cur;
3912 	memcpy(cmd->data, buf, size);
3913 
3914 	desc->nsegs = 1;
3915 	desc->segs[0].addr = htole32(IWN_LOADDR(paddr));
3916 	desc->segs[0].len  = htole16(IWN_HIADDR(paddr) | totlen << 4);
3917 
3918 	if (size > sizeof cmd->data) {
3919 		bus_dmamap_sync(sc->sc_dmat, data->map, 0, totlen,
3920 		    BUS_DMASYNC_PREWRITE);
3921 	} else {
3922 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
3923 		    (caddr_t)cmd - ring->cmd_dma.vaddr, totlen,
3924 		    BUS_DMASYNC_PREWRITE);
3925 	}
3926 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3927 	    (caddr_t)desc - ring->desc_dma.vaddr, sizeof (*desc),
3928 	    BUS_DMASYNC_PREWRITE);
3929 
3930 	/* Update TX scheduler. */
3931 	ops->update_sched(sc, ring->qid, ring->cur, 0, 0);
3932 
3933 	/* Kick command ring. */
3934 	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3935 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3936 
3937 	return async ? 0 : tsleep_nsec(desc, PCATCH, "iwncmd", SEC_TO_NSEC(1));
3938 }
3939 
3940 int
3941 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
3942 {
3943 	struct iwn4965_node_info hnode;
3944 	caddr_t src, dst;
3945 
3946 	/*
3947 	 * We use the node structure for 5000 Series internally (it is
3948 	 * a superset of the one for 4965AGN). We thus copy the common
3949 	 * fields before sending the command.
3950 	 */
3951 	src = (caddr_t)node;
3952 	dst = (caddr_t)&hnode;
3953 	memcpy(dst, src, 48);
3954 	/* Skip TSC, RX MIC and TX MIC fields from ``src''. */
3955 	memcpy(dst + 48, src + 72, 20);
3956 	return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async);
3957 }
3958 
3959 int
3960 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
3961 {
3962 	/* Direct mapping. */
3963 	return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async);
3964 }
3965 
3966 int
3967 iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni)
3968 {
3969 	struct ieee80211com *ic = &sc->sc_ic;
3970 	struct iwn_node *wn = (void *)ni;
3971 	struct iwn_cmd_link_quality linkq;
3972 	const struct iwn_rate *rinfo;
3973 	uint8_t txant;
3974 	int i;
3975 
3976 	/* Use the first valid TX antenna. */
3977 	txant = IWN_LSB(sc->txchainmask);
3978 
3979 	memset(&linkq, 0, sizeof linkq);
3980 	linkq.id = wn->id;
3981 	linkq.antmsk_1stream = txant;
3982 	linkq.antmsk_2stream = IWN_ANT_AB;
3983 	linkq.ampdu_max = IWN_AMPDU_MAX;
3984 	linkq.ampdu_threshold = 3;
3985 	linkq.ampdu_limit = htole16(4000);	/* 4ms */
3986 
3987 	i = 0;
3988 	if (ni->ni_flags & IEEE80211_NODE_HT) {
3989 		int txmcs;
3990 		for (txmcs = ni->ni_txmcs; txmcs >= 0; txmcs--) {
3991 			rinfo = &iwn_rates[iwn_mcs2ridx[txmcs]];
3992 			linkq.retry[i].plcp = rinfo->ht_plcp;
3993 			linkq.retry[i].rflags = rinfo->ht_flags;
3994 
3995 			if (ni->ni_htcaps & IEEE80211_HTCAP_SGI20)
3996 				linkq.retry[i].rflags |= IWN_RFLAG_SGI;
3997 
3998 			/* XXX set correct ant mask for MIMO rates here */
3999 			linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant);
4000 
4001 			if (++i >= IWN_MAX_TX_RETRIES)
4002 				break;
4003 		}
4004 	} else {
4005 		int txrate;
4006 		for (txrate = ni->ni_txrate; txrate >= 0; txrate--) {
4007 			rinfo = &iwn_rates[wn->ridx[txrate]];
4008 			linkq.retry[i].plcp = rinfo->plcp;
4009 			linkq.retry[i].rflags = rinfo->flags;
4010 			linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant);
4011 			if (++i >= IWN_MAX_TX_RETRIES)
4012 				break;
4013 		}
4014 	}
4015 
4016 	/* Fill the rest with the lowest basic rate. */
4017 	rinfo = &iwn_rates[iwn_rval2ridx(ieee80211_min_basic_rate(ic))];
4018 	while (i < IWN_MAX_TX_RETRIES) {
4019 		linkq.retry[i].plcp = rinfo->plcp;
4020 		linkq.retry[i].rflags = rinfo->flags;
4021 		linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant);
4022 		i++;
4023 	}
4024 
4025 	return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1);
4026 }
4027 
4028 /*
4029  * Broadcast node is used to send group-addressed and management frames.
4030  */
4031 int
4032 iwn_add_broadcast_node(struct iwn_softc *sc, int async, int ridx)
4033 {
4034 	struct iwn_ops *ops = &sc->ops;
4035 	struct iwn_node_info node;
4036 	struct iwn_cmd_link_quality linkq;
4037 	const struct iwn_rate *rinfo;
4038 	uint8_t txant;
4039 	int i, error;
4040 
4041 	memset(&node, 0, sizeof node);
4042 	IEEE80211_ADDR_COPY(node.macaddr, etherbroadcastaddr);
4043 	node.id = sc->broadcast_id;
4044 	DPRINTF(("adding broadcast node\n"));
4045 	if ((error = ops->add_node(sc, &node, async)) != 0)
4046 		return error;
4047 
4048 	/* Use the first valid TX antenna. */
4049 	txant = IWN_LSB(sc->txchainmask);
4050 
4051 	memset(&linkq, 0, sizeof linkq);
4052 	linkq.id = sc->broadcast_id;
4053 	linkq.antmsk_1stream = txant;
4054 	linkq.antmsk_2stream = IWN_ANT_AB;
4055 	linkq.ampdu_max = IWN_AMPDU_MAX_NO_AGG;
4056 	linkq.ampdu_threshold = 3;
4057 	linkq.ampdu_limit = htole16(4000);	/* 4ms */
4058 
4059 	/* Use lowest mandatory bit-rate. */
4060 	rinfo = &iwn_rates[ridx];
4061 	linkq.retry[0].plcp = rinfo->plcp;
4062 	linkq.retry[0].rflags = rinfo->flags;
4063 	linkq.retry[0].rflags |= IWN_RFLAG_ANT(txant);
4064 	/* Use same bit-rate for all TX retries. */
4065 	for (i = 1; i < IWN_MAX_TX_RETRIES; i++) {
4066 		linkq.retry[i].plcp = linkq.retry[0].plcp;
4067 		linkq.retry[i].rflags = linkq.retry[0].rflags;
4068 	}
4069 	return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async);
4070 }
4071 
4072 void
4073 iwn_updateedca(struct ieee80211com *ic)
4074 {
4075 #define IWN_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
4076 	struct iwn_softc *sc = ic->ic_softc;
4077 	struct iwn_edca_params cmd;
4078 	int aci;
4079 
4080 	memset(&cmd, 0, sizeof cmd);
4081 	cmd.flags = htole32(IWN_EDCA_UPDATE);
4082 	for (aci = 0; aci < EDCA_NUM_AC; aci++) {
4083 		const struct ieee80211_edca_ac_params *ac =
4084 		    &ic->ic_edca_ac[aci];
4085 		cmd.ac[aci].aifsn = ac->ac_aifsn;
4086 		cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->ac_ecwmin));
4087 		cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->ac_ecwmax));
4088 		cmd.ac[aci].txoplimit =
4089 		    htole16(IEEE80211_TXOP_TO_US(ac->ac_txoplimit));
4090 	}
4091 	(void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1);
4092 #undef IWN_EXP2
4093 }
4094 
4095 void
4096 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on)
4097 {
4098 	struct iwn_cmd_led led;
4099 
4100 	/* Clear microcode LED ownership. */
4101 	IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL);
4102 
4103 	led.which = which;
4104 	led.unit = htole32(10000);	/* on/off in unit of 100ms */
4105 	led.off = off;
4106 	led.on = on;
4107 	(void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1);
4108 }
4109 
4110 /*
4111  * Set the critical temperature at which the firmware will stop the radio
4112  * and notify us.
4113  */
4114 int
4115 iwn_set_critical_temp(struct iwn_softc *sc)
4116 {
4117 	struct iwn_critical_temp crit;
4118 	int32_t temp;
4119 
4120 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF);
4121 
4122 	if (sc->hw_type == IWN_HW_REV_TYPE_5150)
4123 		temp = (IWN_CTOK(110) - sc->temp_off) * -5;
4124 	else if (sc->hw_type == IWN_HW_REV_TYPE_4965)
4125 		temp = IWN_CTOK(110);
4126 	else
4127 		temp = 110;
4128 	memset(&crit, 0, sizeof crit);
4129 	crit.tempR = htole32(temp);
4130 	DPRINTF(("setting critical temperature to %d\n", temp));
4131 	return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0);
4132 }
4133 
4134 int
4135 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni)
4136 {
4137 	struct iwn_cmd_timing cmd;
4138 	uint64_t val, mod;
4139 
4140 	memset(&cmd, 0, sizeof cmd);
4141 	memcpy(&cmd.tstamp, ni->ni_tstamp, sizeof (uint64_t));
4142 	cmd.bintval = htole16(ni->ni_intval);
4143 	cmd.lintval = htole16(10);
4144 
4145 	/* Compute remaining time until next beacon. */
4146 	val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU;
4147 	mod = letoh64(cmd.tstamp) % val;
4148 	cmd.binitval = htole32((uint32_t)(val - mod));
4149 
4150 	DPRINTF(("timing bintval=%u, tstamp=%llu, init=%u\n",
4151 	    ni->ni_intval, letoh64(cmd.tstamp), (uint32_t)(val - mod)));
4152 
4153 	return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1);
4154 }
4155 
4156 void
4157 iwn4965_power_calibration(struct iwn_softc *sc, int temp)
4158 {
4159 	/* Adjust TX power if need be (delta >= 3 degC). */
4160 	DPRINTF(("temperature %d->%d\n", sc->temp, temp));
4161 	if (abs(temp - sc->temp) >= 3) {
4162 		/* Record temperature of last calibration. */
4163 		sc->temp = temp;
4164 		(void)iwn4965_set_txpower(sc, 1);
4165 	}
4166 }
4167 
4168 /*
4169  * Set TX power for current channel (each rate has its own power settings).
4170  * This function takes into account the regulatory information from EEPROM,
4171  * the current temperature and the current voltage.
4172  */
4173 int
4174 iwn4965_set_txpower(struct iwn_softc *sc, int async)
4175 {
4176 /* Fixed-point arithmetic division using a n-bit fractional part. */
4177 #define fdivround(a, b, n)	\
4178 	((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n))
4179 /* Linear interpolation. */
4180 #define interpolate(x, x1, y1, x2, y2, n)	\
4181 	((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n))
4182 
4183 	static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 };
4184 	struct ieee80211com *ic = &sc->sc_ic;
4185 	struct iwn_ucode_info *uc = &sc->ucode_info;
4186 	struct ieee80211_channel *ch;
4187 	struct iwn4965_cmd_txpower cmd;
4188 	struct iwn4965_eeprom_chan_samples *chans;
4189 	const uint8_t *rf_gain, *dsp_gain;
4190 	int32_t vdiff, tdiff;
4191 	int i, c, grp, maxpwr;
4192 	uint8_t chan;
4193 
4194 	/* Retrieve current channel from last RXON. */
4195 	chan = sc->rxon.chan;
4196 	DPRINTF(("setting TX power for channel %d\n", chan));
4197 	ch = &ic->ic_channels[chan];
4198 
4199 	memset(&cmd, 0, sizeof cmd);
4200 	cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1;
4201 	cmd.chan = chan;
4202 
4203 	if (IEEE80211_IS_CHAN_5GHZ(ch)) {
4204 		maxpwr   = sc->maxpwr5GHz;
4205 		rf_gain  = iwn4965_rf_gain_5ghz;
4206 		dsp_gain = iwn4965_dsp_gain_5ghz;
4207 	} else {
4208 		maxpwr   = sc->maxpwr2GHz;
4209 		rf_gain  = iwn4965_rf_gain_2ghz;
4210 		dsp_gain = iwn4965_dsp_gain_2ghz;
4211 	}
4212 
4213 	/* Compute voltage compensation. */
4214 	vdiff = ((int32_t)letoh32(uc->volt) - sc->eeprom_voltage) / 7;
4215 	if (vdiff > 0)
4216 		vdiff *= 2;
4217 	if (abs(vdiff) > 2)
4218 		vdiff = 0;
4219 	DPRINTF(("voltage compensation=%d (UCODE=%d, EEPROM=%d)\n",
4220 	    vdiff, letoh32(uc->volt), sc->eeprom_voltage));
4221 
4222 	/* Get channel attenuation group. */
4223 	if (chan <= 20)		/* 1-20 */
4224 		grp = 4;
4225 	else if (chan <= 43)	/* 34-43 */
4226 		grp = 0;
4227 	else if (chan <= 70)	/* 44-70 */
4228 		grp = 1;
4229 	else if (chan <= 124)	/* 71-124 */
4230 		grp = 2;
4231 	else			/* 125-200 */
4232 		grp = 3;
4233 	DPRINTF(("chan %d, attenuation group=%d\n", chan, grp));
4234 
4235 	/* Get channel sub-band. */
4236 	for (i = 0; i < IWN_NBANDS; i++)
4237 		if (sc->bands[i].lo != 0 &&
4238 		    sc->bands[i].lo <= chan && chan <= sc->bands[i].hi)
4239 			break;
4240 	if (i == IWN_NBANDS)	/* Can't happen in real-life. */
4241 		return EINVAL;
4242 	chans = sc->bands[i].chans;
4243 	DPRINTF(("chan %d sub-band=%d\n", chan, i));
4244 
4245 	for (c = 0; c < 2; c++) {
4246 		uint8_t power, gain, temp;
4247 		int maxchpwr, pwr, ridx, idx;
4248 
4249 		power = interpolate(chan,
4250 		    chans[0].num, chans[0].samples[c][1].power,
4251 		    chans[1].num, chans[1].samples[c][1].power, 1);
4252 		gain  = interpolate(chan,
4253 		    chans[0].num, chans[0].samples[c][1].gain,
4254 		    chans[1].num, chans[1].samples[c][1].gain, 1);
4255 		temp  = interpolate(chan,
4256 		    chans[0].num, chans[0].samples[c][1].temp,
4257 		    chans[1].num, chans[1].samples[c][1].temp, 1);
4258 		DPRINTF(("TX chain %d: power=%d gain=%d temp=%d\n",
4259 		    c, power, gain, temp));
4260 
4261 		/* Compute temperature compensation. */
4262 		tdiff = ((sc->temp - temp) * 2) / tdiv[grp];
4263 		DPRINTF(("temperature compensation=%d (current=%d, "
4264 		    "EEPROM=%d)\n", tdiff, sc->temp, temp));
4265 
4266 		for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) {
4267 			/* Convert dBm to half-dBm. */
4268 			maxchpwr = sc->maxpwr[chan] * 2;
4269 #ifdef notyet
4270 			if (ridx > iwn_mcs2ridx[7] && ridx < iwn_mcs2ridx[16])
4271 				maxchpwr -= 6;	/* MIMO 2T: -3dB */
4272 #endif
4273 
4274 			pwr = maxpwr;
4275 
4276 			/* Adjust TX power based on rate. */
4277 			if ((ridx % 8) == 5)
4278 				pwr -= 15;	/* OFDM48: -7.5dB */
4279 			else if ((ridx % 8) == 6)
4280 				pwr -= 17;	/* OFDM54: -8.5dB */
4281 			else if ((ridx % 8) == 7)
4282 				pwr -= 20;	/* OFDM60: -10dB */
4283 			else
4284 				pwr -= 10;	/* Others: -5dB */
4285 
4286 			/* Do not exceed channel max TX power. */
4287 			if (pwr > maxchpwr)
4288 				pwr = maxchpwr;
4289 
4290 			idx = gain - (pwr - power) - tdiff - vdiff;
4291 			if (ridx > iwn_mcs2ridx[7]) /* MIMO */
4292 				idx += (int32_t)letoh32(uc->atten[grp][c]);
4293 
4294 			if (cmd.band == 0)
4295 				idx += 9;	/* 5GHz */
4296 			if (ridx == IWN_RIDX_MAX)
4297 				idx += 5;	/* CCK */
4298 
4299 			/* Make sure idx stays in a valid range. */
4300 			if (idx < 0)
4301 				idx = 0;
4302 			else if (idx > IWN4965_MAX_PWR_INDEX)
4303 				idx = IWN4965_MAX_PWR_INDEX;
4304 
4305 			DPRINTF(("TX chain %d, rate idx %d: power=%d\n",
4306 			    c, ridx, idx));
4307 			cmd.power[ridx].rf_gain[c] = rf_gain[idx];
4308 			cmd.power[ridx].dsp_gain[c] = dsp_gain[idx];
4309 		}
4310 	}
4311 
4312 	DPRINTF(("setting TX power for chan %d\n", chan));
4313 	return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async);
4314 
4315 #undef interpolate
4316 #undef fdivround
4317 }
4318 
4319 int
4320 iwn5000_set_txpower(struct iwn_softc *sc, int async)
4321 {
4322 	struct iwn5000_cmd_txpower cmd;
4323 
4324 	/*
4325 	 * TX power calibration is handled automatically by the firmware
4326 	 * for 5000 Series.
4327 	 */
4328 	memset(&cmd, 0, sizeof cmd);
4329 	cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM;	/* 16 dBm */
4330 	cmd.flags = IWN5000_TXPOWER_NO_CLOSED;
4331 	cmd.srv_limit = IWN5000_TXPOWER_AUTO;
4332 	DPRINTF(("setting TX power\n"));
4333 	return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async);
4334 }
4335 
4336 /*
4337  * Retrieve the maximum RSSI (in dBm) among receivers.
4338  */
4339 int
4340 iwn4965_get_rssi(const struct iwn_rx_stat *stat)
4341 {
4342 	struct iwn4965_rx_phystat *phy = (void *)stat->phybuf;
4343 	uint8_t mask, agc;
4344 	int rssi;
4345 
4346 	mask = (letoh16(phy->antenna) >> 4) & IWN_ANT_ABC;
4347 	agc  = (letoh16(phy->agc) >> 7) & 0x7f;
4348 
4349 	rssi = 0;
4350 	if (mask & IWN_ANT_A)
4351 		rssi = MAX(rssi, phy->rssi[0]);
4352 	if (mask & IWN_ANT_B)
4353 		rssi = MAX(rssi, phy->rssi[2]);
4354 	if (mask & IWN_ANT_C)
4355 		rssi = MAX(rssi, phy->rssi[4]);
4356 
4357 	return rssi - agc - IWN_RSSI_TO_DBM;
4358 }
4359 
4360 int
4361 iwn5000_get_rssi(const struct iwn_rx_stat *stat)
4362 {
4363 	struct iwn5000_rx_phystat *phy = (void *)stat->phybuf;
4364 	uint8_t agc;
4365 	int rssi;
4366 
4367 	agc = (letoh32(phy->agc) >> 9) & 0x7f;
4368 
4369 	rssi = MAX(letoh16(phy->rssi[0]) & 0xff,
4370 		   letoh16(phy->rssi[1]) & 0xff);
4371 	rssi = MAX(letoh16(phy->rssi[2]) & 0xff, rssi);
4372 
4373 	return rssi - agc - IWN_RSSI_TO_DBM;
4374 }
4375 
4376 /*
4377  * Retrieve the average noise (in dBm) among receivers.
4378  */
4379 int
4380 iwn_get_noise(const struct iwn_rx_general_stats *stats)
4381 {
4382 	int i, total, nbant, noise;
4383 
4384 	total = nbant = 0;
4385 	for (i = 0; i < 3; i++) {
4386 		if ((noise = letoh32(stats->noise[i]) & 0xff) == 0)
4387 			continue;
4388 		total += noise;
4389 		nbant++;
4390 	}
4391 	/* There should be at least one antenna but check anyway. */
4392 	return (nbant == 0) ? -127 : (total / nbant) - 107;
4393 }
4394 
4395 /*
4396  * Compute temperature (in degC) from last received statistics.
4397  */
4398 int
4399 iwn4965_get_temperature(struct iwn_softc *sc)
4400 {
4401 	struct iwn_ucode_info *uc = &sc->ucode_info;
4402 	int32_t r1, r2, r3, r4, temp;
4403 
4404 	r1 = letoh32(uc->temp[0].chan20MHz);
4405 	r2 = letoh32(uc->temp[1].chan20MHz);
4406 	r3 = letoh32(uc->temp[2].chan20MHz);
4407 	r4 = letoh32(sc->rawtemp);
4408 
4409 	if (r1 == r3)	/* Prevents division by 0 (should not happen). */
4410 		return 0;
4411 
4412 	/* Sign-extend 23-bit R4 value to 32-bit. */
4413 	r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000;
4414 	/* Compute temperature in Kelvin. */
4415 	temp = (259 * (r4 - r2)) / (r3 - r1);
4416 	temp = (temp * 97) / 100 + 8;
4417 
4418 	DPRINTF(("temperature %dK/%dC\n", temp, IWN_KTOC(temp)));
4419 	return IWN_KTOC(temp);
4420 }
4421 
4422 int
4423 iwn5000_get_temperature(struct iwn_softc *sc)
4424 {
4425 	int32_t temp;
4426 
4427 	/*
4428 	 * Temperature is not used by the driver for 5000 Series because
4429 	 * TX power calibration is handled by firmware.
4430 	 */
4431 	temp = letoh32(sc->rawtemp);
4432 	if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
4433 		temp = (temp / -5) + sc->temp_off;
4434 		temp = IWN_KTOC(temp);
4435 	}
4436 	return temp;
4437 }
4438 
4439 /*
4440  * Initialize sensitivity calibration state machine.
4441  */
4442 int
4443 iwn_init_sensitivity(struct iwn_softc *sc)
4444 {
4445 	struct iwn_ops *ops = &sc->ops;
4446 	struct iwn_calib_state *calib = &sc->calib;
4447 	uint32_t flags;
4448 	int error;
4449 
4450 	/* Reset calibration state machine. */
4451 	memset(calib, 0, sizeof (*calib));
4452 	calib->state = IWN_CALIB_STATE_INIT;
4453 	calib->cck_state = IWN_CCK_STATE_HIFA;
4454 	/* Set initial correlation values. */
4455 	calib->ofdm_x1     = sc->limits->min_ofdm_x1;
4456 	calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1;
4457 	calib->ofdm_x4     = sc->limits->min_ofdm_x4;
4458 	calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4;
4459 	calib->cck_x4      = 125;
4460 	calib->cck_mrc_x4  = sc->limits->min_cck_mrc_x4;
4461 	calib->energy_cck  = sc->limits->energy_cck;
4462 
4463 	/* Write initial sensitivity. */
4464 	if ((error = iwn_send_sensitivity(sc)) != 0)
4465 		return error;
4466 
4467 	/* Write initial gains. */
4468 	if ((error = ops->init_gains(sc)) != 0)
4469 		return error;
4470 
4471 	/* Request statistics at each beacon interval. */
4472 	flags = 0;
4473 	DPRINTFN(2, ("sending request for statistics\n"));
4474 	return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1);
4475 }
4476 
4477 /*
4478  * Collect noise and RSSI statistics for the first 20 beacons received
4479  * after association and use them to determine connected antennas and
4480  * to set differential gains.
4481  */
4482 void
4483 iwn_collect_noise(struct iwn_softc *sc,
4484     const struct iwn_rx_general_stats *stats)
4485 {
4486 	struct iwn_ops *ops = &sc->ops;
4487 	struct iwn_calib_state *calib = &sc->calib;
4488 	uint32_t val;
4489 	int i;
4490 
4491 	/* Accumulate RSSI and noise for all 3 antennas. */
4492 	for (i = 0; i < 3; i++) {
4493 		calib->rssi[i] += letoh32(stats->rssi[i]) & 0xff;
4494 		calib->noise[i] += letoh32(stats->noise[i]) & 0xff;
4495 	}
4496 	/* NB: We update differential gains only once after 20 beacons. */
4497 	if (++calib->nbeacons < 20)
4498 		return;
4499 
4500 	/* Determine highest average RSSI. */
4501 	val = MAX(calib->rssi[0], calib->rssi[1]);
4502 	val = MAX(calib->rssi[2], val);
4503 
4504 	/* Determine which antennas are connected. */
4505 	sc->chainmask = sc->rxchainmask;
4506 	for (i = 0; i < 3; i++)
4507 		if (val - calib->rssi[i] > 15 * 20)
4508 			sc->chainmask &= ~(1 << i);
4509 	DPRINTF(("RX chains mask: theoretical=0x%x, actual=0x%x\n",
4510 	    sc->rxchainmask, sc->chainmask));
4511 
4512 	/* If none of the TX antennas are connected, keep at least one. */
4513 	if ((sc->chainmask & sc->txchainmask) == 0)
4514 		sc->chainmask |= IWN_LSB(sc->txchainmask);
4515 
4516 	(void)ops->set_gains(sc);
4517 	calib->state = IWN_CALIB_STATE_RUN;
4518 
4519 #ifdef notyet
4520 	/* XXX Disable RX chains with no antennas connected. */
4521 	sc->rxon.rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask));
4522 	(void)iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
4523 #endif
4524 
4525 	/* Enable power-saving mode if requested by user. */
4526 	if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON)
4527 		(void)iwn_set_pslevel(sc, 0, 3, 1);
4528 }
4529 
4530 int
4531 iwn4965_init_gains(struct iwn_softc *sc)
4532 {
4533 	struct iwn_phy_calib_gain cmd;
4534 
4535 	memset(&cmd, 0, sizeof cmd);
4536 	cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
4537 	/* Differential gains initially set to 0 for all 3 antennas. */
4538 	DPRINTF(("setting initial differential gains\n"));
4539 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4540 }
4541 
4542 int
4543 iwn5000_init_gains(struct iwn_softc *sc)
4544 {
4545 	struct iwn_phy_calib cmd;
4546 
4547 	memset(&cmd, 0, sizeof cmd);
4548 	cmd.code = sc->reset_noise_gain;
4549 	cmd.ngroups = 1;
4550 	cmd.isvalid = 1;
4551 	DPRINTF(("setting initial differential gains\n"));
4552 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4553 }
4554 
4555 int
4556 iwn4965_set_gains(struct iwn_softc *sc)
4557 {
4558 	struct iwn_calib_state *calib = &sc->calib;
4559 	struct iwn_phy_calib_gain cmd;
4560 	int i, delta, noise;
4561 
4562 	/* Get minimal noise among connected antennas. */
4563 	noise = INT_MAX;	/* NB: There's at least one antenna. */
4564 	for (i = 0; i < 3; i++)
4565 		if (sc->chainmask & (1 << i))
4566 			noise = MIN(calib->noise[i], noise);
4567 
4568 	memset(&cmd, 0, sizeof cmd);
4569 	cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
4570 	/* Set differential gains for connected antennas. */
4571 	for (i = 0; i < 3; i++) {
4572 		if (sc->chainmask & (1 << i)) {
4573 			/* Compute attenuation (in unit of 1.5dB). */
4574 			delta = (noise - (int32_t)calib->noise[i]) / 30;
4575 			/* NB: delta <= 0 */
4576 			/* Limit to [-4.5dB,0]. */
4577 			cmd.gain[i] = MIN(abs(delta), 3);
4578 			if (delta < 0)
4579 				cmd.gain[i] |= 1 << 2;	/* sign bit */
4580 		}
4581 	}
4582 	DPRINTF(("setting differential gains Ant A/B/C: %x/%x/%x (%x)\n",
4583 	    cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask));
4584 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4585 }
4586 
4587 int
4588 iwn5000_set_gains(struct iwn_softc *sc)
4589 {
4590 	struct iwn_calib_state *calib = &sc->calib;
4591 	struct iwn_phy_calib_gain cmd;
4592 	int i, ant, div, delta;
4593 
4594 	/* We collected 20 beacons and !=6050 need a 1.5 factor. */
4595 	div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30;
4596 
4597 	memset(&cmd, 0, sizeof cmd);
4598 	cmd.code = sc->noise_gain;
4599 	cmd.ngroups = 1;
4600 	cmd.isvalid = 1;
4601 	/*
4602 	 * Get first available RX antenna as referential.
4603 	 * IWN_LSB() return values start with 1, but antenna gain array
4604 	 * cmd.gain[] and noise array calib->noise[] start with 0.
4605 	 */
4606 	ant = IWN_LSB(sc->rxchainmask) - 1;
4607 
4608 	/* Set differential gains for other antennas. */
4609 	for (i = ant + 1; i < 3; i++) {
4610 		if (sc->chainmask & (1 << i)) {
4611 			/* The delta is relative to antenna "ant". */
4612 			delta = ((int32_t)calib->noise[ant] -
4613 			    (int32_t)calib->noise[i]) / div;
4614 			DPRINTF(("Ant[%d] vs. Ant[%d]: delta %d\n", ant, i, delta));
4615 			/* Limit to [-4.5dB,+4.5dB]. */
4616 			cmd.gain[i] = MIN(abs(delta), 3);
4617 			if (delta < 0)
4618 				cmd.gain[i] |= 1 << 2;	/* sign bit */
4619 			DPRINTF(("Setting differential gains for antenna %d: %x\n",
4620 				i, cmd.gain[i]));
4621 		}
4622 	}
4623 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4624 }
4625 
4626 /*
4627  * Tune RF RX sensitivity based on the number of false alarms detected
4628  * during the last beacon period.
4629  */
4630 void
4631 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats)
4632 {
4633 #define inc(val, inc, max)			\
4634 	if ((val) < (max)) {			\
4635 		if ((val) < (max) - (inc))	\
4636 			(val) += (inc);		\
4637 		else				\
4638 			(val) = (max);		\
4639 		needs_update = 1;		\
4640 	}
4641 #define dec(val, dec, min)			\
4642 	if ((val) > (min)) {			\
4643 		if ((val) > (min) + (dec))	\
4644 			(val) -= (dec);		\
4645 		else				\
4646 			(val) = (min);		\
4647 		needs_update = 1;		\
4648 	}
4649 
4650 	const struct iwn_sensitivity_limits *limits = sc->limits;
4651 	struct iwn_calib_state *calib = &sc->calib;
4652 	uint32_t val, rxena, fa;
4653 	uint32_t energy[3], energy_min;
4654 	uint8_t noise[3], noise_ref;
4655 	int i, needs_update = 0;
4656 
4657 	/* Check that we've been enabled long enough. */
4658 	if ((rxena = letoh32(stats->general.load)) == 0)
4659 		return;
4660 
4661 	/* Compute number of false alarms since last call for OFDM. */
4662 	fa  = letoh32(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm;
4663 	fa += letoh32(stats->ofdm.fa) - calib->fa_ofdm;
4664 	fa *= 200 * IEEE80211_DUR_TU;	/* 200TU */
4665 
4666 	/* Save counters values for next call. */
4667 	calib->bad_plcp_ofdm = letoh32(stats->ofdm.bad_plcp);
4668 	calib->fa_ofdm = letoh32(stats->ofdm.fa);
4669 
4670 	if (fa > 50 * rxena) {
4671 		/* High false alarm count, decrease sensitivity. */
4672 		DPRINTFN(2, ("OFDM high false alarm count: %u\n", fa));
4673 		inc(calib->ofdm_x1,     1, limits->max_ofdm_x1);
4674 		inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1);
4675 		inc(calib->ofdm_x4,     1, limits->max_ofdm_x4);
4676 		inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4);
4677 
4678 	} else if (fa < 5 * rxena) {
4679 		/* Low false alarm count, increase sensitivity. */
4680 		DPRINTFN(2, ("OFDM low false alarm count: %u\n", fa));
4681 		dec(calib->ofdm_x1,     1, limits->min_ofdm_x1);
4682 		dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1);
4683 		dec(calib->ofdm_x4,     1, limits->min_ofdm_x4);
4684 		dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4);
4685 	}
4686 
4687 	/* Compute maximum noise among 3 receivers. */
4688 	for (i = 0; i < 3; i++)
4689 		noise[i] = (letoh32(stats->general.noise[i]) >> 8) & 0xff;
4690 	val = MAX(noise[0], noise[1]);
4691 	val = MAX(noise[2], val);
4692 	/* Insert it into our samples table. */
4693 	calib->noise_samples[calib->cur_noise_sample] = val;
4694 	calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20;
4695 
4696 	/* Compute maximum noise among last 20 samples. */
4697 	noise_ref = calib->noise_samples[0];
4698 	for (i = 1; i < 20; i++)
4699 		noise_ref = MAX(noise_ref, calib->noise_samples[i]);
4700 
4701 	/* Compute maximum energy among 3 receivers. */
4702 	for (i = 0; i < 3; i++)
4703 		energy[i] = letoh32(stats->general.energy[i]);
4704 	val = MIN(energy[0], energy[1]);
4705 	val = MIN(energy[2], val);
4706 	/* Insert it into our samples table. */
4707 	calib->energy_samples[calib->cur_energy_sample] = val;
4708 	calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10;
4709 
4710 	/* Compute minimum energy among last 10 samples. */
4711 	energy_min = calib->energy_samples[0];
4712 	for (i = 1; i < 10; i++)
4713 		energy_min = MAX(energy_min, calib->energy_samples[i]);
4714 	energy_min += 6;
4715 
4716 	/* Compute number of false alarms since last call for CCK. */
4717 	fa  = letoh32(stats->cck.bad_plcp) - calib->bad_plcp_cck;
4718 	fa += letoh32(stats->cck.fa) - calib->fa_cck;
4719 	fa *= 200 * IEEE80211_DUR_TU;	/* 200TU */
4720 
4721 	/* Save counters values for next call. */
4722 	calib->bad_plcp_cck = letoh32(stats->cck.bad_plcp);
4723 	calib->fa_cck = letoh32(stats->cck.fa);
4724 
4725 	if (fa > 50 * rxena) {
4726 		/* High false alarm count, decrease sensitivity. */
4727 		DPRINTFN(2, ("CCK high false alarm count: %u\n", fa));
4728 		calib->cck_state = IWN_CCK_STATE_HIFA;
4729 		calib->low_fa = 0;
4730 
4731 		if (calib->cck_x4 > 160) {
4732 			calib->noise_ref = noise_ref;
4733 			if (calib->energy_cck > 2)
4734 				dec(calib->energy_cck, 2, energy_min);
4735 		}
4736 		if (calib->cck_x4 < 160) {
4737 			calib->cck_x4 = 161;
4738 			needs_update = 1;
4739 		} else
4740 			inc(calib->cck_x4, 3, limits->max_cck_x4);
4741 
4742 		inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4);
4743 
4744 	} else if (fa < 5 * rxena) {
4745 		/* Low false alarm count, increase sensitivity. */
4746 		DPRINTFN(2, ("CCK low false alarm count: %u\n", fa));
4747 		calib->cck_state = IWN_CCK_STATE_LOFA;
4748 		calib->low_fa++;
4749 
4750 		if (calib->cck_state != IWN_CCK_STATE_INIT &&
4751 		    (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 ||
4752 		     calib->low_fa > 100)) {
4753 			inc(calib->energy_cck, 2, limits->min_energy_cck);
4754 			dec(calib->cck_x4,     3, limits->min_cck_x4);
4755 			dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4);
4756 		}
4757 	} else {
4758 		/* Not worth to increase or decrease sensitivity. */
4759 		DPRINTFN(2, ("CCK normal false alarm count: %u\n", fa));
4760 		calib->low_fa = 0;
4761 		calib->noise_ref = noise_ref;
4762 
4763 		if (calib->cck_state == IWN_CCK_STATE_HIFA) {
4764 			/* Previous interval had many false alarms. */
4765 			dec(calib->energy_cck, 8, energy_min);
4766 		}
4767 		calib->cck_state = IWN_CCK_STATE_INIT;
4768 	}
4769 
4770 	if (needs_update)
4771 		(void)iwn_send_sensitivity(sc);
4772 #undef dec
4773 #undef inc
4774 }
4775 
4776 int
4777 iwn_send_sensitivity(struct iwn_softc *sc)
4778 {
4779 	struct iwn_calib_state *calib = &sc->calib;
4780 	struct iwn_enhanced_sensitivity_cmd cmd;
4781 	int len;
4782 
4783 	memset(&cmd, 0, sizeof cmd);
4784 	len = sizeof (struct iwn_sensitivity_cmd);
4785 	cmd.which = IWN_SENSITIVITY_WORKTBL;
4786 	/* OFDM modulation. */
4787 	cmd.corr_ofdm_x1       = htole16(calib->ofdm_x1);
4788 	cmd.corr_ofdm_mrc_x1   = htole16(calib->ofdm_mrc_x1);
4789 	cmd.corr_ofdm_x4       = htole16(calib->ofdm_x4);
4790 	cmd.corr_ofdm_mrc_x4   = htole16(calib->ofdm_mrc_x4);
4791 	cmd.energy_ofdm        = htole16(sc->limits->energy_ofdm);
4792 	cmd.energy_ofdm_th     = htole16(62);
4793 	/* CCK modulation. */
4794 	cmd.corr_cck_x4        = htole16(calib->cck_x4);
4795 	cmd.corr_cck_mrc_x4    = htole16(calib->cck_mrc_x4);
4796 	cmd.energy_cck         = htole16(calib->energy_cck);
4797 	/* Barker modulation: use default values. */
4798 	cmd.corr_barker        = htole16(190);
4799 	cmd.corr_barker_mrc    = htole16(390);
4800 	if (!(sc->sc_flags & IWN_FLAG_ENH_SENS))
4801 		goto send;
4802 	/* Enhanced sensitivity settings. */
4803 	len = sizeof (struct iwn_enhanced_sensitivity_cmd);
4804 	cmd.ofdm_det_slope_mrc = htole16(668);
4805 	cmd.ofdm_det_icept_mrc = htole16(4);
4806 	cmd.ofdm_det_slope     = htole16(486);
4807 	cmd.ofdm_det_icept     = htole16(37);
4808 	cmd.cck_det_slope_mrc  = htole16(853);
4809 	cmd.cck_det_icept_mrc  = htole16(4);
4810 	cmd.cck_det_slope      = htole16(476);
4811 	cmd.cck_det_icept      = htole16(99);
4812 send:
4813 	return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1);
4814 }
4815 
4816 /*
4817  * Set STA mode power saving level (between 0 and 5).
4818  * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving.
4819  */
4820 int
4821 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async)
4822 {
4823 	struct iwn_pmgt_cmd cmd;
4824 	const struct iwn_pmgt *pmgt;
4825 	uint32_t max, skip_dtim;
4826 	pcireg_t reg;
4827 	int i;
4828 
4829 	/* Select which PS parameters to use. */
4830 	if (dtim <= 2)
4831 		pmgt = &iwn_pmgt[0][level];
4832 	else if (dtim <= 10)
4833 		pmgt = &iwn_pmgt[1][level];
4834 	else
4835 		pmgt = &iwn_pmgt[2][level];
4836 
4837 	memset(&cmd, 0, sizeof cmd);
4838 	if (level != 0)	/* not CAM */
4839 		cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP);
4840 	if (level == 5)
4841 		cmd.flags |= htole16(IWN_PS_FAST_PD);
4842 	/* Retrieve PCIe Active State Power Management (ASPM). */
4843 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
4844 	    sc->sc_cap_off + PCI_PCIE_LCSR);
4845 	if (!(reg & PCI_PCIE_LCSR_ASPM_L0S))	/* L0s Entry disabled. */
4846 		cmd.flags |= htole16(IWN_PS_PCI_PMGT);
4847 	cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024);
4848 	cmd.txtimeout = htole32(pmgt->txtimeout * 1024);
4849 
4850 	if (dtim == 0) {
4851 		dtim = 1;
4852 		skip_dtim = 0;
4853 	} else
4854 		skip_dtim = pmgt->skip_dtim;
4855 	if (skip_dtim != 0) {
4856 		cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM);
4857 		max = pmgt->intval[4];
4858 		if (max == (uint32_t)-1)
4859 			max = dtim * (skip_dtim + 1);
4860 		else if (max > dtim)
4861 			max = (max / dtim) * dtim;
4862 	} else
4863 		max = dtim;
4864 	for (i = 0; i < 5; i++)
4865 		cmd.intval[i] = htole32(MIN(max, pmgt->intval[i]));
4866 
4867 	DPRINTF(("setting power saving level to %d\n", level));
4868 	return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async);
4869 }
4870 
4871 int
4872 iwn_send_btcoex(struct iwn_softc *sc)
4873 {
4874 	struct iwn_bluetooth cmd;
4875 
4876 	memset(&cmd, 0, sizeof cmd);
4877 	cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO;
4878 	cmd.lead_time = IWN_BT_LEAD_TIME_DEF;
4879 	cmd.max_kill = IWN_BT_MAX_KILL_DEF;
4880 	DPRINTF(("configuring bluetooth coexistence\n"));
4881 	return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0);
4882 }
4883 
4884 int
4885 iwn_send_advanced_btcoex(struct iwn_softc *sc)
4886 {
4887 	static const uint32_t btcoex_3wire[12] = {
4888 		0xaaaaaaaa, 0xaaaaaaaa, 0xaeaaaaaa, 0xaaaaaaaa,
4889 		0xcc00ff28, 0x0000aaaa, 0xcc00aaaa, 0x0000aaaa,
4890 		0xc0004000, 0x00004000, 0xf0005000, 0xf0005000,
4891 	};
4892 	struct iwn_btcoex_priotable btprio;
4893 	struct iwn_btcoex_prot btprot;
4894 	int error, i;
4895 
4896 	if (sc->hw_type == IWN_HW_REV_TYPE_2030 ||
4897 	    sc->hw_type == IWN_HW_REV_TYPE_135) {
4898 		struct iwn2000_btcoex_config btconfig;
4899 
4900 		memset(&btconfig, 0, sizeof btconfig);
4901 		btconfig.flags = IWN_BT_COEX6000_CHAN_INHIBITION |
4902 		    (IWN_BT_COEX6000_MODE_3W << IWN_BT_COEX6000_MODE_SHIFT) |
4903 		    IWN_BT_SYNC_2_BT_DISABLE;
4904 		btconfig.max_kill = 5;
4905 		btconfig.bt3_t7_timer = 1;
4906 		btconfig.kill_ack = htole32(0xffff0000);
4907 		btconfig.kill_cts = htole32(0xffff0000);
4908 		btconfig.sample_time = 2;
4909 		btconfig.bt3_t2_timer = 0xc;
4910 		for (i = 0; i < 12; i++)
4911 			btconfig.lookup_table[i] = htole32(btcoex_3wire[i]);
4912 		btconfig.valid = htole16(0xff);
4913 		btconfig.prio_boost = htole32(0xf0);
4914 		DPRINTF(("configuring advanced bluetooth coexistence\n"));
4915 		error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig,
4916 		    sizeof(btconfig), 1);
4917 		if (error != 0)
4918 			return (error);
4919 	} else {
4920 		struct iwn6000_btcoex_config btconfig;
4921 
4922 		memset(&btconfig, 0, sizeof btconfig);
4923 		btconfig.flags = IWN_BT_COEX6000_CHAN_INHIBITION |
4924 		    (IWN_BT_COEX6000_MODE_3W << IWN_BT_COEX6000_MODE_SHIFT) |
4925 		    IWN_BT_SYNC_2_BT_DISABLE;
4926 		btconfig.max_kill = 5;
4927 		btconfig.bt3_t7_timer = 1;
4928 		btconfig.kill_ack = htole32(0xffff0000);
4929 		btconfig.kill_cts = htole32(0xffff0000);
4930 		btconfig.sample_time = 2;
4931 		btconfig.bt3_t2_timer = 0xc;
4932 		for (i = 0; i < 12; i++)
4933 			btconfig.lookup_table[i] = htole32(btcoex_3wire[i]);
4934 		btconfig.valid = htole16(0xff);
4935 		btconfig.prio_boost = 0xf0;
4936 		DPRINTF(("configuring advanced bluetooth coexistence\n"));
4937 		error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig,
4938 		    sizeof(btconfig), 1);
4939 		if (error != 0)
4940 			return (error);
4941 	}
4942 
4943 	memset(&btprio, 0, sizeof btprio);
4944 	btprio.calib_init1 = 0x6;
4945 	btprio.calib_init2 = 0x7;
4946 	btprio.calib_periodic_low1 = 0x2;
4947 	btprio.calib_periodic_low2 = 0x3;
4948 	btprio.calib_periodic_high1 = 0x4;
4949 	btprio.calib_periodic_high2 = 0x5;
4950 	btprio.dtim = 0x6;
4951 	btprio.scan52 = 0x8;
4952 	btprio.scan24 = 0xa;
4953 	error = iwn_cmd(sc, IWN_CMD_BT_COEX_PRIOTABLE, &btprio, sizeof(btprio),
4954 	    1);
4955 	if (error != 0)
4956 		return (error);
4957 
4958 	/* Force BT state machine change */
4959 	memset(&btprot, 0, sizeof btprot);
4960 	btprot.open = 1;
4961 	btprot.type = 1;
4962 	error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1);
4963 	if (error != 0)
4964 		return (error);
4965 
4966 	btprot.open = 0;
4967 	return (iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1));
4968 }
4969 
4970 int
4971 iwn5000_runtime_calib(struct iwn_softc *sc)
4972 {
4973 	struct iwn5000_calib_config cmd;
4974 
4975 	memset(&cmd, 0, sizeof cmd);
4976 	cmd.ucode.once.enable = 0xffffffff;
4977 	cmd.ucode.once.start = IWN5000_CALIB_DC;
4978 	DPRINTF(("configuring runtime calibration\n"));
4979 	return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0);
4980 }
4981 
4982 int
4983 iwn_config(struct iwn_softc *sc)
4984 {
4985 	struct iwn_ops *ops = &sc->ops;
4986 	struct ieee80211com *ic = &sc->sc_ic;
4987 	struct ifnet *ifp = &ic->ic_if;
4988 	uint32_t txmask;
4989 	uint16_t rxchain;
4990 	int error, ridx;
4991 
4992 	/* Set radio temperature sensor offset. */
4993 	if (sc->hw_type == IWN_HW_REV_TYPE_6005) {
4994 		error = iwn6000_temp_offset_calib(sc);
4995 		if (error != 0) {
4996 			printf("%s: could not set temperature offset\n",
4997 			    sc->sc_dev.dv_xname);
4998 			return error;
4999 		}
5000 	}
5001 
5002 	if (sc->hw_type == IWN_HW_REV_TYPE_2030 ||
5003 	    sc->hw_type == IWN_HW_REV_TYPE_2000 ||
5004 	    sc->hw_type == IWN_HW_REV_TYPE_135 ||
5005 	    sc->hw_type == IWN_HW_REV_TYPE_105) {
5006 		error = iwn2000_temp_offset_calib(sc);
5007 		if (error != 0) {
5008 			printf("%s: could not set temperature offset\n",
5009 			    sc->sc_dev.dv_xname);
5010 			return error;
5011 		}
5012 	}
5013 
5014 	if (sc->hw_type == IWN_HW_REV_TYPE_6050 ||
5015 	    sc->hw_type == IWN_HW_REV_TYPE_6005) {
5016 		/* Configure runtime DC calibration. */
5017 		error = iwn5000_runtime_calib(sc);
5018 		if (error != 0) {
5019 			printf("%s: could not configure runtime calibration\n",
5020 			    sc->sc_dev.dv_xname);
5021 			return error;
5022 		}
5023 	}
5024 
5025 	/* Configure valid TX chains for >=5000 Series. */
5026 	if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
5027 		txmask = htole32(sc->txchainmask);
5028 		DPRINTF(("configuring valid TX chains 0x%x\n", txmask));
5029 		error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask,
5030 		    sizeof txmask, 0);
5031 		if (error != 0) {
5032 			printf("%s: could not configure valid TX chains\n",
5033 			    sc->sc_dev.dv_xname);
5034 			return error;
5035 		}
5036 	}
5037 
5038 	/* Configure bluetooth coexistence. */
5039 	if (sc->sc_flags & IWN_FLAG_ADV_BT_COEX)
5040 		error = iwn_send_advanced_btcoex(sc);
5041 	else
5042 		error = iwn_send_btcoex(sc);
5043 	if (error != 0) {
5044 		printf("%s: could not configure bluetooth coexistence\n",
5045 		    sc->sc_dev.dv_xname);
5046 		return error;
5047 	}
5048 
5049 	/* Set mode, channel, RX filter and enable RX. */
5050 	memset(&sc->rxon, 0, sizeof (struct iwn_rxon));
5051 	IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl));
5052 	IEEE80211_ADDR_COPY(sc->rxon.myaddr, ic->ic_myaddr);
5053 	IEEE80211_ADDR_COPY(sc->rxon.wlap, ic->ic_myaddr);
5054 	sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
5055 	sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
5056 	if (IEEE80211_IS_CHAN_2GHZ(ic->ic_ibss_chan)) {
5057 		sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
5058 		if (ic->ic_flags & IEEE80211_F_USEPROT)
5059 			sc->rxon.flags |= htole32(IWN_RXON_TGG_PROT);
5060 		DPRINTF(("%s: 2ghz prot 0x%x\n", __func__,
5061 		    le32toh(sc->rxon.flags)));
5062 	}
5063 	switch (ic->ic_opmode) {
5064 	case IEEE80211_M_STA:
5065 		sc->rxon.mode = IWN_MODE_STA;
5066 		sc->rxon.filter = htole32(IWN_FILTER_MULTICAST);
5067 		break;
5068 	case IEEE80211_M_MONITOR:
5069 		sc->rxon.mode = IWN_MODE_MONITOR;
5070 		sc->rxon.filter = htole32(IWN_FILTER_MULTICAST |
5071 		    IWN_FILTER_CTL | IWN_FILTER_PROMISC);
5072 		break;
5073 	default:
5074 		/* Should not get there. */
5075 		break;
5076 	}
5077 	sc->rxon.cck_mask  = 0x0f;	/* not yet negotiated */
5078 	sc->rxon.ofdm_mask = 0xff;	/* not yet negotiated */
5079 	sc->rxon.ht_single_mask = 0xff;
5080 	sc->rxon.ht_dual_mask = 0xff;
5081 	sc->rxon.ht_triple_mask = 0xff;
5082 	rxchain =
5083 	    IWN_RXCHAIN_VALID(sc->rxchainmask) |
5084 	    IWN_RXCHAIN_MIMO_COUNT(sc->nrxchains) |
5085 	    IWN_RXCHAIN_IDLE_COUNT(sc->nrxchains);
5086 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5087 		rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask);
5088 		rxchain |= IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask);
5089 	    	rxchain |= (IWN_RXCHAIN_DRIVER_FORCE | IWN_RXCHAIN_MIMO_FORCE);
5090 	}
5091 	sc->rxon.rxchain = htole16(rxchain);
5092 	DPRINTF(("setting configuration\n"));
5093 	DPRINTF(("%s: rxon chan %d flags %x cck %x ofdm %x rxchain %x\n",
5094 	    __func__, sc->rxon.chan, le32toh(sc->rxon.flags), sc->rxon.cck_mask,
5095 	    sc->rxon.ofdm_mask, sc->rxon.rxchain));
5096 	error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 0);
5097 	if (error != 0) {
5098 		printf("%s: RXON command failed\n", sc->sc_dev.dv_xname);
5099 		return error;
5100 	}
5101 
5102 	ridx = (sc->sc_ic.ic_curmode == IEEE80211_MODE_11A) ?
5103 	    IWN_RIDX_OFDM6 : IWN_RIDX_CCK1;
5104 	if ((error = iwn_add_broadcast_node(sc, 0, ridx)) != 0) {
5105 		printf("%s: could not add broadcast node\n",
5106 		    sc->sc_dev.dv_xname);
5107 		return error;
5108 	}
5109 
5110 	/* Configuration has changed, set TX power accordingly. */
5111 	if ((error = ops->set_txpower(sc, 0)) != 0) {
5112 		printf("%s: could not set TX power\n", sc->sc_dev.dv_xname);
5113 		return error;
5114 	}
5115 
5116 	if ((error = iwn_set_critical_temp(sc)) != 0) {
5117 		printf("%s: could not set critical temperature\n",
5118 		    sc->sc_dev.dv_xname);
5119 		return error;
5120 	}
5121 
5122 	/* Set power saving level to CAM during initialization. */
5123 	if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) {
5124 		printf("%s: could not set power saving level\n",
5125 		    sc->sc_dev.dv_xname);
5126 		return error;
5127 	}
5128 	return 0;
5129 }
5130 
5131 uint16_t
5132 iwn_get_active_dwell_time(struct iwn_softc *sc,
5133     uint16_t flags, uint8_t n_probes)
5134 {
5135 	/* No channel? Default to 2GHz settings */
5136 	if (flags & IEEE80211_CHAN_2GHZ) {
5137 		return (IWN_ACTIVE_DWELL_TIME_2GHZ +
5138 		IWN_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1));
5139 	}
5140 
5141 	/* 5GHz dwell time */
5142 	return (IWN_ACTIVE_DWELL_TIME_5GHZ +
5143 	    IWN_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1));
5144 }
5145 
5146 /*
5147  * Limit the total dwell time to 85% of the beacon interval.
5148  *
5149  * Returns the dwell time in milliseconds.
5150  */
5151 uint16_t
5152 iwn_limit_dwell(struct iwn_softc *sc, uint16_t dwell_time)
5153 {
5154 	struct ieee80211com *ic = &sc->sc_ic;
5155 	struct ieee80211_node *ni = ic->ic_bss;
5156 	int bintval = 0;
5157 
5158 	/* bintval is in TU (1.024mS) */
5159 	if (ni != NULL)
5160 		bintval = ni->ni_intval;
5161 
5162 	/*
5163 	 * If it's non-zero, we should calculate the minimum of
5164 	 * it and the DWELL_BASE.
5165 	 *
5166 	 * XXX Yes, the math should take into account that bintval
5167 	 * is 1.024mS, not 1mS..
5168 	 */
5169 	if (ic->ic_state == IEEE80211_S_RUN && bintval > 0)
5170 		return (MIN(IWN_PASSIVE_DWELL_BASE, ((bintval * 85) / 100)));
5171 
5172 	/* No association context? Default */
5173 	return dwell_time;
5174 }
5175 
5176 uint16_t
5177 iwn_get_passive_dwell_time(struct iwn_softc *sc, uint16_t flags)
5178 {
5179 	uint16_t passive;
5180 	if (flags & IEEE80211_CHAN_2GHZ) {
5181 		passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_2GHZ;
5182 	} else {
5183 		passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_5GHZ;
5184 	}
5185 
5186 	/* Clamp to the beacon interval if we're associated */
5187 	return (iwn_limit_dwell(sc, passive));
5188 }
5189 
5190 int
5191 iwn_scan(struct iwn_softc *sc, uint16_t flags, int bgscan)
5192 {
5193 	struct ieee80211com *ic = &sc->sc_ic;
5194 	struct iwn_scan_hdr *hdr;
5195 	struct iwn_cmd_data *tx;
5196 	struct iwn_scan_essid *essid;
5197 	struct iwn_scan_chan *chan;
5198 	struct ieee80211_frame *wh;
5199 	struct ieee80211_rateset *rs;
5200 	struct ieee80211_channel *c;
5201 	uint8_t *buf, *frm;
5202 	uint16_t rxchain, dwell_active, dwell_passive;
5203 	uint8_t txant;
5204 	int buflen, error, is_active;
5205 
5206 	buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO);
5207 	if (buf == NULL) {
5208 		printf("%s: could not allocate buffer for scan command\n",
5209 		    sc->sc_dev.dv_xname);
5210 		return ENOMEM;
5211 	}
5212 	hdr = (struct iwn_scan_hdr *)buf;
5213 	/*
5214 	 * Move to the next channel if no frames are received within 10ms
5215 	 * after sending the probe request.
5216 	 */
5217 	hdr->quiet_time = htole16(10);		/* timeout in milliseconds */
5218 	hdr->quiet_threshold = htole16(1);	/* min # of packets */
5219 
5220 	if (bgscan) {
5221 		int bintval;
5222 
5223 		/* Set maximum off-channel time. */
5224 		hdr->max_out = htole32(200 * 1024);
5225 
5226 		/* Configure scan pauses which service on-channel traffic. */
5227 		bintval = ic->ic_bss->ni_intval ? ic->ic_bss->ni_intval : 100;
5228 		hdr->pause_scan = htole32(((100 / bintval) << 22) |
5229 		    ((100 % bintval) * 1024));
5230 	}
5231 
5232 	/* Select antennas for scanning. */
5233 	rxchain =
5234 	    IWN_RXCHAIN_VALID(sc->rxchainmask) |
5235 	    IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) |
5236 	    IWN_RXCHAIN_DRIVER_FORCE;
5237 	if ((flags & IEEE80211_CHAN_5GHZ) &&
5238 	    sc->hw_type == IWN_HW_REV_TYPE_4965) {
5239 		/*
5240 		 * On 4965 ant A and C must be avoided in 5GHz because of a
5241 		 * HW bug which causes very weak RSSI values being reported.
5242 		 */
5243 		rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_B);
5244 	} else	/* Use all available RX antennas. */
5245 		rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask);
5246 	hdr->rxchain = htole16(rxchain);
5247 	hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON);
5248 
5249 	tx = (struct iwn_cmd_data *)(hdr + 1);
5250 	tx->flags = htole32(IWN_TX_AUTO_SEQ);
5251 	tx->id = sc->broadcast_id;
5252 	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
5253 
5254 	if (flags & IEEE80211_CHAN_5GHZ) {
5255 		/* Send probe requests at 6Mbps. */
5256 		tx->plcp = iwn_rates[IWN_RIDX_OFDM6].plcp;
5257 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
5258 	} else {
5259 		hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO);
5260 		if (bgscan && sc->hw_type == IWN_HW_REV_TYPE_4965 &&
5261 		    sc->rxon.chan > 14) {
5262 			/*
5263 			 * 4965 firmware can crash when sending probe requests
5264 			 * with CCK rates while associated to a 5GHz AP.
5265 			 * Send probe requests at 6Mbps OFDM as a workaround.
5266 			 */
5267 			tx->plcp = iwn_rates[IWN_RIDX_OFDM6].plcp;
5268 		} else {
5269 			/* Send probe requests at 1Mbps. */
5270 			tx->plcp = iwn_rates[IWN_RIDX_CCK1].plcp;
5271 			tx->rflags = IWN_RFLAG_CCK;
5272 		}
5273 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
5274 	}
5275 	/* Use the first valid TX antenna. */
5276 	txant = IWN_LSB(sc->txchainmask);
5277 	tx->rflags |= IWN_RFLAG_ANT(txant);
5278 
5279 	/*
5280 	 * Only do active scanning if we're announcing a probe request
5281 	 * for a given SSID (or more, if we ever add it to the driver.)
5282 	 */
5283 	is_active = 0;
5284 
5285 	/*
5286 	 * If we're scanning for a specific SSID, add it to the command.
5287 	 */
5288 	essid = (struct iwn_scan_essid *)(tx + 1);
5289 	if (ic->ic_des_esslen != 0) {
5290 		essid[0].id = IEEE80211_ELEMID_SSID;
5291 		essid[0].len = ic->ic_des_esslen;
5292 		memcpy(essid[0].data, ic->ic_des_essid, ic->ic_des_esslen);
5293 
5294 		is_active = 1;
5295 	}
5296 	/*
5297 	 * Build a probe request frame.  Most of the following code is a
5298 	 * copy & paste of what is done in net80211.
5299 	 */
5300 	wh = (struct ieee80211_frame *)(essid + 20);
5301 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
5302 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
5303 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
5304 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
5305 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
5306 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
5307 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
5308 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
5309 
5310 	frm = (uint8_t *)(wh + 1);
5311 	frm = ieee80211_add_ssid(frm, NULL, 0);
5312 	frm = ieee80211_add_rates(frm, rs);
5313 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5314 		frm = ieee80211_add_xrates(frm, rs);
5315 	if (ic->ic_flags & IEEE80211_F_HTON)
5316 		frm = ieee80211_add_htcaps(frm, ic);
5317 
5318 	/* Set length of probe request. */
5319 	tx->len = htole16(frm - (uint8_t *)wh);
5320 
5321 	/*
5322 	 * If active scanning is requested but a certain channel is
5323 	 * marked passive, we can do active scanning if we detect
5324 	 * transmissions.
5325 	 *
5326 	 * There is an issue with some firmware versions that triggers
5327 	 * a sysassert on a "good CRC threshold" of zero (== disabled),
5328 	 * on a radar channel even though this means that we should NOT
5329 	 * send probes.
5330 	 *
5331 	 * The "good CRC threshold" is the number of frames that we
5332 	 * need to receive during our dwell time on a channel before
5333 	 * sending out probes -- setting this to a huge value will
5334 	 * mean we never reach it, but at the same time work around
5335 	 * the aforementioned issue. Thus use IWN_GOOD_CRC_TH_NEVER
5336 	 * here instead of IWN_GOOD_CRC_TH_DISABLED.
5337 	 *
5338 	 * This was fixed in later versions along with some other
5339 	 * scan changes, and the threshold behaves as a flag in those
5340 	 * versions.
5341 	 */
5342 
5343 	/*
5344 	 * If we're doing active scanning, set the crc_threshold
5345 	 * to a suitable value.  This is different to active veruss
5346 	 * passive scanning depending upon the channel flags; the
5347 	 * firmware will obey that particular check for us.
5348 	 */
5349 	if (sc->tlv_feature_flags & IWN_UCODE_TLV_FLAGS_NEWSCAN)
5350 		hdr->crc_threshold = is_active ?
5351 		    IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_DISABLED;
5352 	else
5353 		hdr->crc_threshold = is_active ?
5354 		    IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_NEVER;
5355 
5356 	chan = (struct iwn_scan_chan *)frm;
5357 	for (c  = &ic->ic_channels[1];
5358 	     c <= &ic->ic_channels[IEEE80211_CHAN_MAX]; c++) {
5359 		if ((c->ic_flags & flags) != flags)
5360 			continue;
5361 
5362 		chan->chan = htole16(ieee80211_chan2ieee(ic, c));
5363 		DPRINTFN(2, ("adding channel %d\n", chan->chan));
5364 		chan->flags = 0;
5365 		if (ic->ic_des_esslen != 0)
5366 			chan->flags |= htole32(IWN_CHAN_NPBREQS(1));
5367 
5368 		if (c->ic_flags & IEEE80211_CHAN_PASSIVE)
5369 			chan->flags |= htole32(IWN_CHAN_PASSIVE);
5370 		else
5371 			chan->flags |= htole32(IWN_CHAN_ACTIVE);
5372 
5373 		/*
5374 		 * Calculate the active/passive dwell times.
5375 		 */
5376 
5377 		dwell_active = iwn_get_active_dwell_time(sc, flags, is_active);
5378 		dwell_passive = iwn_get_passive_dwell_time(sc, flags);
5379 
5380 		/* Make sure they're valid */
5381 		if (dwell_passive <= dwell_active)
5382 			dwell_passive = dwell_active + 1;
5383 
5384 		chan->active = htole16(dwell_active);
5385 		chan->passive = htole16(dwell_passive);
5386 
5387 		chan->dsp_gain = 0x6e;
5388 		if (IEEE80211_IS_CHAN_5GHZ(c)) {
5389 			chan->rf_gain = 0x3b;
5390 		} else {
5391 			chan->rf_gain = 0x28;
5392 		}
5393 		hdr->nchan++;
5394 		chan++;
5395 	}
5396 
5397 	buflen = (uint8_t *)chan - buf;
5398 	hdr->len = htole16(buflen);
5399 
5400 	error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1);
5401 	if (error == 0) {
5402 		/*
5403 		 * The current mode might have been fixed during association.
5404 		 * Ensure all channels get scanned.
5405 		 */
5406 		if (IFM_MODE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
5407 			ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
5408 
5409 		sc->sc_flags |= IWN_FLAG_SCANNING;
5410 		if (bgscan)
5411 			sc->sc_flags |= IWN_FLAG_BGSCAN;
5412 	}
5413 	free(buf, M_DEVBUF, IWN_SCAN_MAXSZ);
5414 	return error;
5415 }
5416 
5417 void
5418 iwn_scan_abort(struct iwn_softc *sc)
5419 {
5420 	iwn_cmd(sc, IWN_CMD_SCAN_ABORT, NULL, 0, 1);
5421 
5422 	/* XXX Cannot wait for status response in interrupt context. */
5423 	DELAY(100);
5424 
5425 	sc->sc_flags &= ~IWN_FLAG_SCANNING;
5426 	sc->sc_flags &= ~IWN_FLAG_BGSCAN;
5427 }
5428 
5429 int
5430 iwn_bgscan(struct ieee80211com *ic)
5431 {
5432 	struct iwn_softc *sc = ic->ic_softc;
5433 	int error;
5434 
5435 	if (sc->sc_flags & IWN_FLAG_SCANNING)
5436 		return 0;
5437 
5438 	error = iwn_scan(sc, IEEE80211_CHAN_2GHZ, 1);
5439 	if (error)
5440 		printf("%s: could not initiate background scan\n",
5441 		    sc->sc_dev.dv_xname);
5442 	return error;
5443 }
5444 
5445 int
5446 iwn_auth(struct iwn_softc *sc, int arg)
5447 {
5448 	struct iwn_ops *ops = &sc->ops;
5449 	struct ieee80211com *ic = &sc->sc_ic;
5450 	struct ieee80211_node *ni = ic->ic_bss;
5451 	int error, ridx;
5452 	int bss_switch =
5453 	    (!IEEE80211_ADDR_EQ(sc->bss_node_addr, etheranyaddr) &&
5454 	    !IEEE80211_ADDR_EQ(sc->bss_node_addr, ni->ni_macaddr));
5455 
5456 	/* Update adapter configuration. */
5457 	IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
5458 	sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan);
5459 	sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
5460 	if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
5461 		sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
5462 		if (ic->ic_flags & IEEE80211_F_USEPROT)
5463 			sc->rxon.flags |= htole32(IWN_RXON_TGG_PROT);
5464 		DPRINTF(("%s: 2ghz prot 0x%x\n", __func__,
5465 		    le32toh(sc->rxon.flags)));
5466 	}
5467 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
5468 		sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
5469 	else
5470 		sc->rxon.flags &= ~htole32(IWN_RXON_SHSLOT);
5471 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5472 		sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
5473 	else
5474 		sc->rxon.flags &= ~htole32(IWN_RXON_SHPREAMBLE);
5475 	switch (ic->ic_curmode) {
5476 	case IEEE80211_MODE_11A:
5477 		sc->rxon.cck_mask  = 0;
5478 		sc->rxon.ofdm_mask = 0x15;
5479 		break;
5480 	case IEEE80211_MODE_11B:
5481 		sc->rxon.cck_mask  = 0x03;
5482 		sc->rxon.ofdm_mask = 0;
5483 		break;
5484 	default:	/* Assume 802.11b/g/n. */
5485 		sc->rxon.cck_mask  = 0x0f;
5486 		sc->rxon.ofdm_mask = 0x15;
5487 	}
5488 	DPRINTF(("%s: rxon chan %d flags %x cck %x ofdm %x\n", __func__,
5489 	    sc->rxon.chan, le32toh(sc->rxon.flags), sc->rxon.cck_mask,
5490 	    sc->rxon.ofdm_mask));
5491 	error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
5492 	if (error != 0) {
5493 		printf("%s: RXON command failed\n", sc->sc_dev.dv_xname);
5494 		return error;
5495 	}
5496 
5497 	/* Configuration has changed, set TX power accordingly. */
5498 	if ((error = ops->set_txpower(sc, 1)) != 0) {
5499 		printf("%s: could not set TX power\n", sc->sc_dev.dv_xname);
5500 		return error;
5501 	}
5502 	/*
5503 	 * Reconfiguring RXON clears the firmware nodes table so we must
5504 	 * add the broadcast node again.
5505 	 */
5506 	ridx = IEEE80211_IS_CHAN_5GHZ(ni->ni_chan) ?
5507 	    IWN_RIDX_OFDM6 : IWN_RIDX_CCK1;
5508 	if ((error = iwn_add_broadcast_node(sc, 1, ridx)) != 0) {
5509 		printf("%s: could not add broadcast node\n",
5510 		    sc->sc_dev.dv_xname);
5511 		return error;
5512 	}
5513 
5514 	/*
5515 	 * Make sure the firmware gets to see a beacon before we send
5516 	 * the auth request. Otherwise the Tx attempt can fail due to
5517 	 * the firmware's built-in regulatory domain enforcement.
5518 	 * Delaying here for every incoming deauth frame can result in a DoS.
5519 	 * Don't delay if we're here because of an incoming frame (arg != -1)
5520 	 * or if we're already waiting for a response (ic_mgt_timer != 0).
5521 	 * If we are switching APs after a background scan then net80211 has
5522 	 * just faked the reception of a deauth frame from our old AP, so it
5523 	 * is safe to delay in that case.
5524 	 */
5525 	if ((arg == -1 || bss_switch) && ic->ic_mgt_timer == 0)
5526 		DELAY(ni->ni_intval * 3 * IEEE80211_DUR_TU);
5527 
5528 	/* We can now clear the cached address of our previous AP. */
5529 	memset(sc->bss_node_addr, 0, sizeof(sc->bss_node_addr));
5530 
5531 	return 0;
5532 }
5533 
5534 int
5535 iwn_run(struct iwn_softc *sc)
5536 {
5537 	struct iwn_ops *ops = &sc->ops;
5538 	struct ieee80211com *ic = &sc->sc_ic;
5539 	struct ieee80211_node *ni = ic->ic_bss;
5540 	struct iwn_node *wn = (void *)ni;
5541 	struct iwn_node_info node;
5542 	int error;
5543 
5544 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5545 		/* Link LED blinks while monitoring. */
5546 		iwn_set_led(sc, IWN_LED_LINK, 50, 50);
5547 		return 0;
5548 	}
5549 	if ((error = iwn_set_timing(sc, ni)) != 0) {
5550 		printf("%s: could not set timing\n", sc->sc_dev.dv_xname);
5551 		return error;
5552 	}
5553 
5554 	/* Update adapter configuration. */
5555 	sc->rxon.associd = htole16(IEEE80211_AID(ni->ni_associd));
5556 	/* Short preamble and slot time are negotiated when associating. */
5557 	sc->rxon.flags &= ~htole32(IWN_RXON_SHPREAMBLE | IWN_RXON_SHSLOT);
5558 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
5559 		sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
5560 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5561 		sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
5562 	sc->rxon.filter |= htole32(IWN_FILTER_BSS);
5563 
5564 	/* HT is negotiated when associating. */
5565 	if (ni->ni_flags & IEEE80211_NODE_HT) {
5566 		enum ieee80211_htprot htprot =
5567 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
5568 		DPRINTF(("%s: htprot = %d\n", __func__, htprot));
5569 		sc->rxon.flags |= htole32(IWN_RXON_HT_PROTMODE(htprot));
5570 	} else
5571 		sc->rxon.flags &= ~htole32(IWN_RXON_HT_PROTMODE(3));
5572 
5573 	if (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) {
5574 		/* 11a or 11n 5GHz */
5575 		sc->rxon.cck_mask  = 0;
5576 		sc->rxon.ofdm_mask = 0x15;
5577 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
5578 		/* 11n 2GHz */
5579 		sc->rxon.cck_mask  = 0x0f;
5580 		sc->rxon.ofdm_mask = 0x15;
5581 	} else {
5582 		if (ni->ni_rates.rs_nrates == 4) {
5583 			/* 11b */
5584 			sc->rxon.cck_mask  = 0x03;
5585 			sc->rxon.ofdm_mask = 0;
5586 		} else {
5587 			/* assume 11g */
5588 			sc->rxon.cck_mask  = 0x0f;
5589 			sc->rxon.ofdm_mask = 0x15;
5590 		}
5591 	}
5592 	DPRINTF(("%s: rxon chan %d flags %x cck %x ofdm %x\n", __func__,
5593 	    sc->rxon.chan, le32toh(sc->rxon.flags), sc->rxon.cck_mask,
5594 	    sc->rxon.ofdm_mask));
5595 	error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
5596 	if (error != 0) {
5597 		printf("%s: could not update configuration\n",
5598 		    sc->sc_dev.dv_xname);
5599 		return error;
5600 	}
5601 
5602 	/* Configuration has changed, set TX power accordingly. */
5603 	if ((error = ops->set_txpower(sc, 1)) != 0) {
5604 		printf("%s: could not set TX power\n", sc->sc_dev.dv_xname);
5605 		return error;
5606 	}
5607 
5608 	/* Fake a join to initialize the TX rate. */
5609 	((struct iwn_node *)ni)->id = IWN_ID_BSS;
5610 	iwn_newassoc(ic, ni, 1);
5611 
5612 	/* Add BSS node. */
5613 	memset(&node, 0, sizeof node);
5614 	IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr);
5615 	node.id = IWN_ID_BSS;
5616 	if (ni->ni_flags & IEEE80211_NODE_HT) {
5617 		node.htmask = (IWN_AMDPU_SIZE_FACTOR_MASK |
5618 		    IWN_AMDPU_DENSITY_MASK);
5619 		node.htflags = htole32(
5620 		    IWN_AMDPU_SIZE_FACTOR(
5621 			(ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_LE)) |
5622 		    IWN_AMDPU_DENSITY(
5623 			(ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) >> 2));
5624 	}
5625 	DPRINTF(("adding BSS node\n"));
5626 	error = ops->add_node(sc, &node, 1);
5627 	if (error != 0) {
5628 		printf("%s: could not add BSS node\n", sc->sc_dev.dv_xname);
5629 		return error;
5630 	}
5631 
5632 	/* Cache address of AP in case it changes after a background scan. */
5633 	IEEE80211_ADDR_COPY(sc->bss_node_addr, ni->ni_macaddr);
5634 
5635 	DPRINTF(("setting link quality for node %d\n", node.id));
5636 	if ((error = iwn_set_link_quality(sc, ni)) != 0) {
5637 		printf("%s: could not setup link quality for node %d\n",
5638 		    sc->sc_dev.dv_xname, node.id);
5639 		return error;
5640 	}
5641 
5642 	if ((error = iwn_init_sensitivity(sc)) != 0) {
5643 		printf("%s: could not set sensitivity\n",
5644 		    sc->sc_dev.dv_xname);
5645 		return error;
5646 	}
5647 	/* Start periodic calibration timer. */
5648 	sc->calib.state = IWN_CALIB_STATE_ASSOC;
5649 	sc->calib_cnt = 0;
5650 	timeout_add_msec(&sc->calib_to, 500);
5651 
5652 	ieee80211_mira_node_init(&wn->mn);
5653 
5654 	/* Link LED always on while associated. */
5655 	iwn_set_led(sc, IWN_LED_LINK, 0, 1);
5656 	return 0;
5657 }
5658 
5659 /*
5660  * We support CCMP hardware encryption/decryption of unicast frames only.
5661  * HW support for TKIP really sucks.  We should let TKIP die anyway.
5662  */
5663 int
5664 iwn_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
5665     struct ieee80211_key *k)
5666 {
5667 	struct iwn_softc *sc = ic->ic_softc;
5668 	struct iwn_ops *ops = &sc->ops;
5669 	struct iwn_node *wn = (void *)ni;
5670 	struct iwn_node_info node;
5671 	uint16_t kflags;
5672 
5673 	if ((k->k_flags & IEEE80211_KEY_GROUP) ||
5674 	    k->k_cipher != IEEE80211_CIPHER_CCMP)
5675 		return ieee80211_set_key(ic, ni, k);
5676 
5677 	kflags = IWN_KFLAG_CCMP | IWN_KFLAG_MAP | IWN_KFLAG_KID(k->k_id);
5678 	if (k->k_flags & IEEE80211_KEY_GROUP)
5679 		kflags |= IWN_KFLAG_GROUP;
5680 
5681 	memset(&node, 0, sizeof node);
5682 	node.id = (k->k_flags & IEEE80211_KEY_GROUP) ?
5683 	    sc->broadcast_id : wn->id;
5684 	node.control = IWN_NODE_UPDATE;
5685 	node.flags = IWN_FLAG_SET_KEY;
5686 	node.kflags = htole16(kflags);
5687 	node.kid = k->k_id;
5688 	memcpy(node.key, k->k_key, k->k_len);
5689 	DPRINTF(("set key id=%d for node %d\n", k->k_id, node.id));
5690 	return ops->add_node(sc, &node, 1);
5691 }
5692 
5693 void
5694 iwn_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
5695     struct ieee80211_key *k)
5696 {
5697 	struct iwn_softc *sc = ic->ic_softc;
5698 	struct iwn_ops *ops = &sc->ops;
5699 	struct iwn_node *wn = (void *)ni;
5700 	struct iwn_node_info node;
5701 
5702 	if ((k->k_flags & IEEE80211_KEY_GROUP) ||
5703 	    k->k_cipher != IEEE80211_CIPHER_CCMP) {
5704 		/* See comment about other ciphers above. */
5705 		ieee80211_delete_key(ic, ni, k);
5706 		return;
5707 	}
5708 	if (ic->ic_state != IEEE80211_S_RUN)
5709 		return;	/* Nothing to do. */
5710 	memset(&node, 0, sizeof node);
5711 	node.id = (k->k_flags & IEEE80211_KEY_GROUP) ?
5712 	    sc->broadcast_id : wn->id;
5713 	node.control = IWN_NODE_UPDATE;
5714 	node.flags = IWN_FLAG_SET_KEY;
5715 	node.kflags = htole16(IWN_KFLAG_INVALID);
5716 	node.kid = 0xff;
5717 	DPRINTF(("delete keys for node %d\n", node.id));
5718 	(void)ops->add_node(sc, &node, 1);
5719 }
5720 
5721 /*
5722  * This function is called by upper layer when HT protection settings in
5723  * beacons have changed.
5724  */
5725 void
5726 iwn_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
5727 {
5728 	struct iwn_softc *sc = ic->ic_softc;
5729 	struct iwn_ops *ops = &sc->ops;
5730 	enum ieee80211_htprot htprot;
5731 	struct iwn_rxon_assoc rxon_assoc;
5732 	int s, error;
5733 
5734 	/* Update HT protection mode setting. */
5735 	htprot = (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK) >>
5736 	    IEEE80211_HTOP1_PROT_SHIFT;
5737 	sc->rxon.flags &= ~htole32(IWN_RXON_HT_PROTMODE(3));
5738 	sc->rxon.flags |= htole32(IWN_RXON_HT_PROTMODE(htprot));
5739 
5740 	/* Update RXON config. */
5741 	memset(&rxon_assoc, 0, sizeof(rxon_assoc));
5742 	rxon_assoc.flags = sc->rxon.flags;
5743 	rxon_assoc.filter = sc->rxon.filter;
5744 	rxon_assoc.ofdm_mask = sc->rxon.ofdm_mask;
5745 	rxon_assoc.cck_mask = sc->rxon.cck_mask;
5746 	rxon_assoc.ht_single_mask = sc->rxon.ht_single_mask;
5747 	rxon_assoc.ht_dual_mask = sc->rxon.ht_dual_mask;
5748 	rxon_assoc.ht_triple_mask = sc->rxon.ht_triple_mask;
5749 	rxon_assoc.rxchain = sc->rxon.rxchain;
5750 	rxon_assoc.acquisition = sc->rxon.acquisition;
5751 
5752 	s = splnet();
5753 
5754 	error = iwn_cmd(sc, IWN_CMD_RXON_ASSOC, &rxon_assoc,
5755 	    sizeof(rxon_assoc), 1);
5756 	if (error != 0)
5757 		printf("%s: RXON_ASSOC command failed\n", sc->sc_dev.dv_xname);
5758 
5759 	DELAY(100);
5760 
5761 	/* All RXONs wipe the firmware's txpower table. Restore it. */
5762 	error = ops->set_txpower(sc, 1);
5763 	if (error != 0)
5764 		printf("%s: could not set TX power\n", sc->sc_dev.dv_xname);
5765 
5766 	DELAY(100);
5767 
5768 	/* Restore power saving level */
5769 	if (ic->ic_flags & IEEE80211_F_PMGTON)
5770 		error = iwn_set_pslevel(sc, 0, 3, 1);
5771 	else
5772 		error = iwn_set_pslevel(sc, 0, 0, 1);
5773 	if (error != 0)
5774 		printf("%s: could not set PS level\n", sc->sc_dev.dv_xname);
5775 
5776 	splx(s);
5777 }
5778 
5779 /*
5780  * This function is called by upper layer when an ADDBA request is received
5781  * from another STA and before the ADDBA response is sent.
5782  */
5783 int
5784 iwn_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
5785     uint8_t tid)
5786 {
5787 	struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
5788 	struct iwn_softc *sc = ic->ic_softc;
5789 	struct iwn_ops *ops = &sc->ops;
5790 	struct iwn_node *wn = (void *)ni;
5791 	struct iwn_node_info node;
5792 
5793 	memset(&node, 0, sizeof node);
5794 	node.id = wn->id;
5795 	node.control = IWN_NODE_UPDATE;
5796 	node.flags = IWN_FLAG_SET_ADDBA;
5797 	node.addba_tid = tid;
5798 	node.addba_ssn = htole16(ba->ba_winstart);
5799 	DPRINTF(("ADDBA RA=%d TID=%d SSN=%d\n", wn->id, tid,
5800 	    ba->ba_winstart));
5801 	/* XXX async command, so firmware may still fail to add BA agreement */
5802 	return ops->add_node(sc, &node, 1);
5803 }
5804 
5805 /*
5806  * This function is called by upper layer on teardown of an HT-immediate
5807  * Block Ack agreement (eg. uppon receipt of a DELBA frame).
5808  */
5809 void
5810 iwn_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
5811     uint8_t tid)
5812 {
5813 	struct iwn_softc *sc = ic->ic_softc;
5814 	struct iwn_ops *ops = &sc->ops;
5815 	struct iwn_node *wn = (void *)ni;
5816 	struct iwn_node_info node;
5817 
5818 	memset(&node, 0, sizeof node);
5819 	node.id = wn->id;
5820 	node.control = IWN_NODE_UPDATE;
5821 	node.flags = IWN_FLAG_SET_DELBA;
5822 	node.delba_tid = tid;
5823 	DPRINTF(("DELBA RA=%d TID=%d\n", wn->id, tid));
5824 	(void)ops->add_node(sc, &node, 1);
5825 }
5826 
5827 /*
5828  * This function is called by upper layer when an ADDBA response is received
5829  * from another STA.
5830  */
5831 int
5832 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
5833     uint8_t tid)
5834 {
5835 	struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
5836 	struct iwn_softc *sc = ic->ic_softc;
5837 	struct iwn_ops *ops = &sc->ops;
5838 	struct iwn_node *wn = (void *)ni;
5839 	struct iwn_node_info node;
5840 	int qid = sc->first_agg_txq + tid;
5841 	int error;
5842 
5843 	/* Ensure we can map this TID to an aggregation queue. */
5844 	if (tid >= IWN_NUM_AMPDU_TID || ba->ba_winsize > IWN_SCHED_WINSZ ||
5845 	    qid > sc->ntxqs || (sc->agg_queue_mask & (1 << qid)))
5846 		return ENOSPC;
5847 
5848 	/* Enable TX for the specified RA/TID. */
5849 	wn->disable_tid &= ~(1 << tid);
5850 	memset(&node, 0, sizeof node);
5851 	node.id = wn->id;
5852 	node.control = IWN_NODE_UPDATE;
5853 	node.flags = IWN_FLAG_SET_DISABLE_TID;
5854 	node.disable_tid = htole16(wn->disable_tid);
5855 	error = ops->add_node(sc, &node, 1);
5856 	if (error != 0)
5857 		return error;
5858 
5859 	if ((error = iwn_nic_lock(sc)) != 0)
5860 		return error;
5861 	ops->ampdu_tx_start(sc, ni, tid, ba->ba_winstart);
5862 	iwn_nic_unlock(sc);
5863 
5864 	sc->agg_queue_mask |= (1 << qid);
5865 	sc->sc_tx_ba[tid].wn = wn;
5866 	ba->ba_bitmap = 0;
5867 
5868 	return 0;
5869 }
5870 
5871 void
5872 iwn_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
5873     uint8_t tid)
5874 {
5875 	struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
5876 	struct iwn_softc *sc = ic->ic_softc;
5877 	struct iwn_ops *ops = &sc->ops;
5878 	int qid = sc->first_agg_txq + tid;
5879 	struct iwn_node *wn = (void *)ni;
5880 	struct iwn_node_info node;
5881 
5882 	/* Discard all frames in the current window. */
5883 	iwn_ampdu_txq_advance(sc, &sc->txq[qid], qid,
5884 	    IWN_AGG_SSN_TO_TXQ_IDX(ba->ba_winend));
5885 
5886 	if (iwn_nic_lock(sc) != 0)
5887 		return;
5888 	ops->ampdu_tx_stop(sc, tid, ba->ba_winstart);
5889 	iwn_nic_unlock(sc);
5890 
5891 	sc->agg_queue_mask &= ~(1 << qid);
5892 	sc->sc_tx_ba[tid].wn = NULL;
5893 	ba->ba_bitmap = 0;
5894 
5895 	/* Disable TX for the specified RA/TID. */
5896 	wn->disable_tid |= (1 << tid);
5897 	memset(&node, 0, sizeof node);
5898 	node.id = wn->id;
5899 	node.control = IWN_NODE_UPDATE;
5900 	node.flags = IWN_FLAG_SET_DISABLE_TID;
5901 	node.disable_tid = htole16(wn->disable_tid);
5902 	ops->add_node(sc, &node, 1);
5903 }
5904 
5905 void
5906 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
5907     uint8_t tid, uint16_t ssn)
5908 {
5909 	struct iwn_node *wn = (void *)ni;
5910 	int qid = IWN4965_FIRST_AGG_TXQUEUE + tid;
5911 	uint16_t idx = IWN_AGG_SSN_TO_TXQ_IDX(ssn);
5912 
5913 	/* Stop TX scheduler while we're changing its configuration. */
5914 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5915 	    IWN4965_TXQ_STATUS_CHGACT);
5916 
5917 	/* Assign RA/TID translation to the queue. */
5918 	iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid),
5919 	    wn->id << 4 | tid);
5920 
5921 	/* Enable chain-building mode for the queue. */
5922 	iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid);
5923 
5924 	/* Set starting sequence number from the ADDBA request. */
5925 	sc->txq[qid].cur = sc->txq[qid].read = idx;
5926 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | idx);
5927 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
5928 
5929 	/* Set scheduler window size. */
5930 	iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid),
5931 	    IWN_SCHED_WINSZ);
5932 	/* Set scheduler frame limit. */
5933 	iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
5934 	    IWN_SCHED_LIMIT << 16);
5935 
5936 	/* Enable interrupts for the queue. */
5937 	iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
5938 
5939 	/* Mark the queue as active. */
5940 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5941 	    IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA |
5942 	    iwn_tid2fifo[tid] << 1);
5943 }
5944 
5945 void
5946 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn)
5947 {
5948 	int qid = IWN4965_FIRST_AGG_TXQUEUE + tid;
5949 	uint16_t idx = IWN_AGG_SSN_TO_TXQ_IDX(ssn);
5950 
5951 	/* Stop TX scheduler while we're changing its configuration. */
5952 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5953 	    IWN4965_TXQ_STATUS_CHGACT);
5954 
5955 	/* Set starting sequence number from the ADDBA request. */
5956 	sc->txq[qid].cur = sc->txq[qid].read = idx;
5957 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | idx);
5958 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
5959 
5960 	/* Disable interrupts for the queue. */
5961 	iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
5962 
5963 	/* Mark the queue as inactive. */
5964 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5965 	    IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1);
5966 }
5967 
5968 void
5969 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
5970     uint8_t tid, uint16_t ssn)
5971 {
5972 	int qid = IWN5000_FIRST_AGG_TXQUEUE + tid;
5973 	int idx = IWN_AGG_SSN_TO_TXQ_IDX(ssn);
5974 	struct iwn_node *wn = (void *)ni;
5975 
5976 	/* Stop TX scheduler while we're changing its configuration. */
5977 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5978 	    IWN5000_TXQ_STATUS_CHGACT);
5979 
5980 	/* Assign RA/TID translation to the queue. */
5981 	iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid),
5982 	    wn->id << 4 | tid);
5983 
5984 	/* Enable chain-building mode for the queue. */
5985 	iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid);
5986 
5987 	/* Enable aggregation for the queue. */
5988 	iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
5989 
5990 	/* Set starting sequence number from the ADDBA request. */
5991 	sc->txq[qid].cur = sc->txq[qid].read = idx;
5992 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | idx);
5993 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
5994 
5995 	/* Set scheduler window size and frame limit. */
5996 	iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
5997 	    IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
5998 
5999 	/* Enable interrupts for the queue. */
6000 	iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
6001 
6002 	/* Mark the queue as active. */
6003 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6004 	    IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]);
6005 }
6006 
6007 void
6008 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn)
6009 {
6010 	int qid = IWN5000_FIRST_AGG_TXQUEUE + tid;
6011 	int idx = IWN_AGG_SSN_TO_TXQ_IDX(ssn);
6012 
6013 	/* Stop TX scheduler while we're changing its configuration. */
6014 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6015 	    IWN5000_TXQ_STATUS_CHGACT);
6016 
6017 	/* Disable aggregation for the queue. */
6018 	iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
6019 
6020 	/* Set starting sequence number from the ADDBA request. */
6021 	sc->txq[qid].cur = sc->txq[qid].read = idx;
6022 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | idx);
6023 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
6024 
6025 	/* Disable interrupts for the queue. */
6026 	iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
6027 
6028 	/* Mark the queue as inactive. */
6029 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6030 	    IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]);
6031 }
6032 
6033 /*
6034  * Query calibration tables from the initialization firmware.  We do this
6035  * only once at first boot.  Called from a process context.
6036  */
6037 int
6038 iwn5000_query_calibration(struct iwn_softc *sc)
6039 {
6040 	struct iwn5000_calib_config cmd;
6041 	int error;
6042 
6043 	memset(&cmd, 0, sizeof cmd);
6044 	cmd.ucode.once.enable = 0xffffffff;
6045 	cmd.ucode.once.start  = 0xffffffff;
6046 	cmd.ucode.once.send   = 0xffffffff;
6047 	cmd.ucode.flags       = 0xffffffff;
6048 	DPRINTF(("sending calibration query\n"));
6049 	error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0);
6050 	if (error != 0)
6051 		return error;
6052 
6053 	/* Wait at most two seconds for calibration to complete. */
6054 	if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE))
6055 		error = tsleep_nsec(sc, PCATCH, "iwncal", SEC_TO_NSEC(2));
6056 	return error;
6057 }
6058 
6059 /*
6060  * Send calibration results to the runtime firmware.  These results were
6061  * obtained on first boot from the initialization firmware.
6062  */
6063 int
6064 iwn5000_send_calibration(struct iwn_softc *sc)
6065 {
6066 	int idx, error;
6067 
6068 	for (idx = 0; idx < 5; idx++) {
6069 		if (sc->calibcmd[idx].buf == NULL)
6070 			continue;	/* No results available. */
6071 		DPRINTF(("send calibration result idx=%d len=%d\n",
6072 		    idx, sc->calibcmd[idx].len));
6073 		error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf,
6074 		    sc->calibcmd[idx].len, 0);
6075 		if (error != 0) {
6076 			printf("%s: could not send calibration result\n",
6077 			    sc->sc_dev.dv_xname);
6078 			return error;
6079 		}
6080 	}
6081 	return 0;
6082 }
6083 
6084 int
6085 iwn5000_send_wimax_coex(struct iwn_softc *sc)
6086 {
6087 	struct iwn5000_wimax_coex wimax;
6088 
6089 #ifdef notyet
6090 	if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
6091 		/* Enable WiMAX coexistence for combo adapters. */
6092 		wimax.flags =
6093 		    IWN_WIMAX_COEX_ASSOC_WA_UNMASK |
6094 		    IWN_WIMAX_COEX_UNASSOC_WA_UNMASK |
6095 		    IWN_WIMAX_COEX_STA_TABLE_VALID |
6096 		    IWN_WIMAX_COEX_ENABLE;
6097 		memcpy(wimax.events, iwn6050_wimax_events,
6098 		    sizeof iwn6050_wimax_events);
6099 	} else
6100 #endif
6101 	{
6102 		/* Disable WiMAX coexistence. */
6103 		wimax.flags = 0;
6104 		memset(wimax.events, 0, sizeof wimax.events);
6105 	}
6106 	DPRINTF(("Configuring WiMAX coexistence\n"));
6107 	return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0);
6108 }
6109 
6110 int
6111 iwn5000_crystal_calib(struct iwn_softc *sc)
6112 {
6113 	struct iwn5000_phy_calib_crystal cmd;
6114 
6115 	memset(&cmd, 0, sizeof cmd);
6116 	cmd.code = IWN5000_PHY_CALIB_CRYSTAL;
6117 	cmd.ngroups = 1;
6118 	cmd.isvalid = 1;
6119 	cmd.cap_pin[0] = letoh32(sc->eeprom_crystal) & 0xff;
6120 	cmd.cap_pin[1] = (letoh32(sc->eeprom_crystal) >> 16) & 0xff;
6121 	DPRINTF(("sending crystal calibration %d, %d\n",
6122 	    cmd.cap_pin[0], cmd.cap_pin[1]));
6123 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
6124 }
6125 
6126 int
6127 iwn6000_temp_offset_calib(struct iwn_softc *sc)
6128 {
6129 	struct iwn6000_phy_calib_temp_offset cmd;
6130 
6131 	memset(&cmd, 0, sizeof cmd);
6132 	cmd.code = IWN6000_PHY_CALIB_TEMP_OFFSET;
6133 	cmd.ngroups = 1;
6134 	cmd.isvalid = 1;
6135 	if (sc->eeprom_temp != 0)
6136 		cmd.offset = htole16(sc->eeprom_temp);
6137 	else
6138 		cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET);
6139 	DPRINTF(("setting radio sensor offset to %d\n", letoh16(cmd.offset)));
6140 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
6141 }
6142 
6143 int
6144 iwn2000_temp_offset_calib(struct iwn_softc *sc)
6145 {
6146 	struct iwn2000_phy_calib_temp_offset cmd;
6147 
6148 	memset(&cmd, 0, sizeof cmd);
6149 	cmd.code = IWN2000_PHY_CALIB_TEMP_OFFSET;
6150 	cmd.ngroups = 1;
6151 	cmd.isvalid = 1;
6152 	if (sc->eeprom_rawtemp != 0) {
6153 		cmd.offset_low = htole16(sc->eeprom_rawtemp);
6154 		cmd.offset_high = htole16(sc->eeprom_temp);
6155 	} else {
6156 		cmd.offset_low = htole16(IWN_DEFAULT_TEMP_OFFSET);
6157 		cmd.offset_high = htole16(IWN_DEFAULT_TEMP_OFFSET);
6158 	}
6159 	cmd.burnt_voltage_ref = htole16(sc->eeprom_voltage);
6160 	DPRINTF(("setting radio sensor offset to %d:%d, voltage to %d\n",
6161 	    letoh16(cmd.offset_low), letoh16(cmd.offset_high),
6162 	    letoh16(cmd.burnt_voltage_ref)));
6163 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
6164 }
6165 
6166 /*
6167  * This function is called after the runtime firmware notifies us of its
6168  * readiness (called in a process context).
6169  */
6170 int
6171 iwn4965_post_alive(struct iwn_softc *sc)
6172 {
6173 	int error, qid;
6174 
6175 	if ((error = iwn_nic_lock(sc)) != 0)
6176 		return error;
6177 
6178 	/* Clear TX scheduler state in SRAM. */
6179 	sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
6180 	iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0,
6181 	    IWN4965_SCHED_CTX_LEN / sizeof (uint32_t));
6182 
6183 	/* Set physical address of TX scheduler rings (1KB aligned). */
6184 	iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
6185 
6186 	IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
6187 
6188 	/* Disable chain mode for all our 16 queues. */
6189 	iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0);
6190 
6191 	for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) {
6192 		iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0);
6193 		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
6194 
6195 		/* Set scheduler window size. */
6196 		iwn_mem_write(sc, sc->sched_base +
6197 		    IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ);
6198 		/* Set scheduler frame limit. */
6199 		iwn_mem_write(sc, sc->sched_base +
6200 		    IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
6201 		    IWN_SCHED_LIMIT << 16);
6202 	}
6203 
6204 	/* Enable interrupts for all our 16 queues. */
6205 	iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff);
6206 	/* Identify TX FIFO rings (0-7). */
6207 	iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff);
6208 
6209 	/* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
6210 	for (qid = 0; qid < 7; qid++) {
6211 		static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 };
6212 		iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
6213 		    IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1);
6214 	}
6215 	iwn_nic_unlock(sc);
6216 	return 0;
6217 }
6218 
6219 /*
6220  * This function is called after the initialization or runtime firmware
6221  * notifies us of its readiness (called in a process context).
6222  */
6223 int
6224 iwn5000_post_alive(struct iwn_softc *sc)
6225 {
6226 	int error, qid;
6227 
6228 	/* Switch to using ICT interrupt mode. */
6229 	iwn5000_ict_reset(sc);
6230 
6231 	if ((error = iwn_nic_lock(sc)) != 0)
6232 		return error;
6233 
6234 	/* Clear TX scheduler state in SRAM. */
6235 	sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
6236 	iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0,
6237 	    IWN5000_SCHED_CTX_LEN / sizeof (uint32_t));
6238 
6239 	/* Set physical address of TX scheduler rings (1KB aligned). */
6240 	iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
6241 
6242 	/* Disable scheduler chain extension (enabled by default in HW). */
6243 	iwn_prph_write(sc, IWN5000_SCHED_CHAINEXT_EN, 0);
6244 
6245 	IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
6246 
6247 	/* Enable chain mode for all queues, except command queue. */
6248 	iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef);
6249 	iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0);
6250 
6251 	for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) {
6252 		iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0);
6253 		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
6254 
6255 		iwn_mem_write(sc, sc->sched_base +
6256 		    IWN5000_SCHED_QUEUE_OFFSET(qid), 0);
6257 		/* Set scheduler window size and frame limit. */
6258 		iwn_mem_write(sc, sc->sched_base +
6259 		    IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
6260 		    IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
6261 	}
6262 
6263 	/* Enable interrupts for all our 20 queues. */
6264 	iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff);
6265 	/* Identify TX FIFO rings (0-7). */
6266 	iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff);
6267 
6268 	/* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
6269 	for (qid = 0; qid < 7; qid++) {
6270 		static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 };
6271 		iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6272 		    IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]);
6273 	}
6274 	iwn_nic_unlock(sc);
6275 
6276 	/* Configure WiMAX coexistence for combo adapters. */
6277 	error = iwn5000_send_wimax_coex(sc);
6278 	if (error != 0) {
6279 		printf("%s: could not configure WiMAX coexistence\n",
6280 		    sc->sc_dev.dv_xname);
6281 		return error;
6282 	}
6283 	if (sc->hw_type != IWN_HW_REV_TYPE_5150) {
6284 		/* Perform crystal calibration. */
6285 		error = iwn5000_crystal_calib(sc);
6286 		if (error != 0) {
6287 			printf("%s: crystal calibration failed\n",
6288 			    sc->sc_dev.dv_xname);
6289 			return error;
6290 		}
6291 	}
6292 	if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) {
6293 		/* Query calibration from the initialization firmware. */
6294 		if ((error = iwn5000_query_calibration(sc)) != 0) {
6295 			printf("%s: could not query calibration\n",
6296 			    sc->sc_dev.dv_xname);
6297 			return error;
6298 		}
6299 		/*
6300 		 * We have the calibration results now, reboot with the
6301 		 * runtime firmware (call ourselves recursively!)
6302 		 */
6303 		iwn_hw_stop(sc);
6304 		error = iwn_hw_init(sc);
6305 	} else {
6306 		/* Send calibration results to runtime firmware. */
6307 		error = iwn5000_send_calibration(sc);
6308 	}
6309 	return error;
6310 }
6311 
6312 /*
6313  * The firmware boot code is small and is intended to be copied directly into
6314  * the NIC internal memory (no DMA transfer).
6315  */
6316 int
6317 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size)
6318 {
6319 	int error, ntries;
6320 
6321 	size /= sizeof (uint32_t);
6322 
6323 	if ((error = iwn_nic_lock(sc)) != 0)
6324 		return error;
6325 
6326 	/* Copy microcode image into NIC memory. */
6327 	iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE,
6328 	    (const uint32_t *)ucode, size);
6329 
6330 	iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0);
6331 	iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE);
6332 	iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size);
6333 
6334 	/* Start boot load now. */
6335 	iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START);
6336 
6337 	/* Wait for transfer to complete. */
6338 	for (ntries = 0; ntries < 1000; ntries++) {
6339 		if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) &
6340 		    IWN_BSM_WR_CTRL_START))
6341 			break;
6342 		DELAY(10);
6343 	}
6344 	if (ntries == 1000) {
6345 		printf("%s: could not load boot firmware\n",
6346 		    sc->sc_dev.dv_xname);
6347 		iwn_nic_unlock(sc);
6348 		return ETIMEDOUT;
6349 	}
6350 
6351 	/* Enable boot after power up. */
6352 	iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN);
6353 
6354 	iwn_nic_unlock(sc);
6355 	return 0;
6356 }
6357 
6358 int
6359 iwn4965_load_firmware(struct iwn_softc *sc)
6360 {
6361 	struct iwn_fw_info *fw = &sc->fw;
6362 	struct iwn_dma_info *dma = &sc->fw_dma;
6363 	int error;
6364 
6365 	/* Copy initialization sections into pre-allocated DMA-safe memory. */
6366 	memcpy(dma->vaddr, fw->init.data, fw->init.datasz);
6367 	bus_dmamap_sync(sc->sc_dmat, dma->map, 0, fw->init.datasz,
6368 	    BUS_DMASYNC_PREWRITE);
6369 	memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
6370 	    fw->init.text, fw->init.textsz);
6371 	bus_dmamap_sync(sc->sc_dmat, dma->map, IWN4965_FW_DATA_MAXSZ,
6372 	    fw->init.textsz, BUS_DMASYNC_PREWRITE);
6373 
6374 	/* Tell adapter where to find initialization sections. */
6375 	if ((error = iwn_nic_lock(sc)) != 0)
6376 		return error;
6377 	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
6378 	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz);
6379 	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
6380 	    (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
6381 	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz);
6382 	iwn_nic_unlock(sc);
6383 
6384 	/* Load firmware boot code. */
6385 	error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz);
6386 	if (error != 0) {
6387 		printf("%s: could not load boot firmware\n",
6388 		    sc->sc_dev.dv_xname);
6389 		return error;
6390 	}
6391 	/* Now press "execute". */
6392 	IWN_WRITE(sc, IWN_RESET, 0);
6393 
6394 	/* Wait at most one second for first alive notification. */
6395 	if ((error = tsleep_nsec(sc, PCATCH, "iwninit", SEC_TO_NSEC(1))) != 0) {
6396 		printf("%s: timeout waiting for adapter to initialize\n",
6397 		    sc->sc_dev.dv_xname);
6398 		return error;
6399 	}
6400 
6401 	/* Retrieve current temperature for initial TX power calibration. */
6402 	sc->rawtemp = sc->ucode_info.temp[3].chan20MHz;
6403 	sc->temp = iwn4965_get_temperature(sc);
6404 
6405 	/* Copy runtime sections into pre-allocated DMA-safe memory. */
6406 	memcpy(dma->vaddr, fw->main.data, fw->main.datasz);
6407 	bus_dmamap_sync(sc->sc_dmat, dma->map, 0, fw->main.datasz,
6408 	    BUS_DMASYNC_PREWRITE);
6409 	memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
6410 	    fw->main.text, fw->main.textsz);
6411 	bus_dmamap_sync(sc->sc_dmat, dma->map, IWN4965_FW_DATA_MAXSZ,
6412 	    fw->main.textsz, BUS_DMASYNC_PREWRITE);
6413 
6414 	/* Tell adapter where to find runtime sections. */
6415 	if ((error = iwn_nic_lock(sc)) != 0)
6416 		return error;
6417 	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
6418 	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz);
6419 	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
6420 	    (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
6421 	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE,
6422 	    IWN_FW_UPDATED | fw->main.textsz);
6423 	iwn_nic_unlock(sc);
6424 
6425 	return 0;
6426 }
6427 
6428 int
6429 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst,
6430     const uint8_t *section, int size)
6431 {
6432 	struct iwn_dma_info *dma = &sc->fw_dma;
6433 	int error;
6434 
6435 	/* Copy firmware section into pre-allocated DMA-safe memory. */
6436 	memcpy(dma->vaddr, section, size);
6437 	bus_dmamap_sync(sc->sc_dmat, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
6438 
6439 	if ((error = iwn_nic_lock(sc)) != 0)
6440 		return error;
6441 
6442 	IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
6443 	    IWN_FH_TX_CONFIG_DMA_PAUSE);
6444 
6445 	IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst);
6446 	IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL),
6447 	    IWN_LOADDR(dma->paddr));
6448 	IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL),
6449 	    IWN_HIADDR(dma->paddr) << 28 | size);
6450 	IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL),
6451 	    IWN_FH_TXBUF_STATUS_TBNUM(1) |
6452 	    IWN_FH_TXBUF_STATUS_TBIDX(1) |
6453 	    IWN_FH_TXBUF_STATUS_TFBD_VALID);
6454 
6455 	/* Kick Flow Handler to start DMA transfer. */
6456 	IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
6457 	    IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD);
6458 
6459 	iwn_nic_unlock(sc);
6460 
6461 	/* Wait at most five seconds for FH DMA transfer to complete. */
6462 	return tsleep_nsec(sc, PCATCH, "iwninit", SEC_TO_NSEC(5));
6463 }
6464 
6465 int
6466 iwn5000_load_firmware(struct iwn_softc *sc)
6467 {
6468 	struct iwn_fw_part *fw;
6469 	int error;
6470 
6471 	/* Load the initialization firmware on first boot only. */
6472 	fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ?
6473 	    &sc->fw.main : &sc->fw.init;
6474 
6475 	error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE,
6476 	    fw->text, fw->textsz);
6477 	if (error != 0) {
6478 		printf("%s: could not load firmware %s section\n",
6479 		    sc->sc_dev.dv_xname, ".text");
6480 		return error;
6481 	}
6482 	error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE,
6483 	    fw->data, fw->datasz);
6484 	if (error != 0) {
6485 		printf("%s: could not load firmware %s section\n",
6486 		    sc->sc_dev.dv_xname, ".data");
6487 		return error;
6488 	}
6489 
6490 	/* Now press "execute". */
6491 	IWN_WRITE(sc, IWN_RESET, 0);
6492 	return 0;
6493 }
6494 
6495 /*
6496  * Extract text and data sections from a legacy firmware image.
6497  */
6498 int
6499 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw)
6500 {
6501 	const uint32_t *ptr;
6502 	size_t hdrlen = 24;
6503 	uint32_t rev;
6504 
6505 	ptr = (const uint32_t *)fw->data;
6506 	rev = letoh32(*ptr++);
6507 
6508 	/* Check firmware API version. */
6509 	if (IWN_FW_API(rev) <= 1) {
6510 		printf("%s: bad firmware, need API version >=2\n",
6511 		    sc->sc_dev.dv_xname);
6512 		return EINVAL;
6513 	}
6514 	if (IWN_FW_API(rev) >= 3) {
6515 		/* Skip build number (version 2 header). */
6516 		hdrlen += 4;
6517 		ptr++;
6518 	}
6519 	if (fw->size < hdrlen) {
6520 		printf("%s: firmware too short: %zu bytes\n",
6521 		    sc->sc_dev.dv_xname, fw->size);
6522 		return EINVAL;
6523 	}
6524 	fw->main.textsz = letoh32(*ptr++);
6525 	fw->main.datasz = letoh32(*ptr++);
6526 	fw->init.textsz = letoh32(*ptr++);
6527 	fw->init.datasz = letoh32(*ptr++);
6528 	fw->boot.textsz = letoh32(*ptr++);
6529 
6530 	/* Check that all firmware sections fit. */
6531 	if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz +
6532 	    fw->init.textsz + fw->init.datasz + fw->boot.textsz) {
6533 		printf("%s: firmware too short: %zu bytes\n",
6534 		    sc->sc_dev.dv_xname, fw->size);
6535 		return EINVAL;
6536 	}
6537 
6538 	/* Get pointers to firmware sections. */
6539 	fw->main.text = (const uint8_t *)ptr;
6540 	fw->main.data = fw->main.text + fw->main.textsz;
6541 	fw->init.text = fw->main.data + fw->main.datasz;
6542 	fw->init.data = fw->init.text + fw->init.textsz;
6543 	fw->boot.text = fw->init.data + fw->init.datasz;
6544 	return 0;
6545 }
6546 
6547 /*
6548  * Extract text and data sections from a TLV firmware image.
6549  */
6550 int
6551 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw,
6552     uint16_t alt)
6553 {
6554 	const struct iwn_fw_tlv_hdr *hdr;
6555 	const struct iwn_fw_tlv *tlv;
6556 	const uint8_t *ptr, *end;
6557 	uint64_t altmask;
6558 	uint32_t len;
6559 
6560 	if (fw->size < sizeof (*hdr)) {
6561 		printf("%s: firmware too short: %zu bytes\n",
6562 		    sc->sc_dev.dv_xname, fw->size);
6563 		return EINVAL;
6564 	}
6565 	hdr = (const struct iwn_fw_tlv_hdr *)fw->data;
6566 	if (hdr->signature != htole32(IWN_FW_SIGNATURE)) {
6567 		printf("%s: bad firmware signature 0x%08x\n",
6568 		    sc->sc_dev.dv_xname, letoh32(hdr->signature));
6569 		return EINVAL;
6570 	}
6571 	DPRINTF(("FW: \"%.64s\", build 0x%x\n", hdr->descr,
6572 	    letoh32(hdr->build)));
6573 
6574 	/*
6575 	 * Select the closest supported alternative that is less than
6576 	 * or equal to the specified one.
6577 	 */
6578 	altmask = letoh64(hdr->altmask);
6579 	while (alt > 0 && !(altmask & (1ULL << alt)))
6580 		alt--;	/* Downgrade. */
6581 	DPRINTF(("using alternative %d\n", alt));
6582 
6583 	ptr = (const uint8_t *)(hdr + 1);
6584 	end = (const uint8_t *)(fw->data + fw->size);
6585 
6586 	/* Parse type-length-value fields. */
6587 	while (ptr + sizeof (*tlv) <= end) {
6588 		tlv = (const struct iwn_fw_tlv *)ptr;
6589 		len = letoh32(tlv->len);
6590 
6591 		ptr += sizeof (*tlv);
6592 		if (ptr + len > end) {
6593 			printf("%s: firmware too short: %zu bytes\n",
6594 			    sc->sc_dev.dv_xname, fw->size);
6595 			return EINVAL;
6596 		}
6597 		/* Skip other alternatives. */
6598 		if (tlv->alt != 0 && tlv->alt != htole16(alt))
6599 			goto next;
6600 
6601 		switch (letoh16(tlv->type)) {
6602 		case IWN_FW_TLV_MAIN_TEXT:
6603 			fw->main.text = ptr;
6604 			fw->main.textsz = len;
6605 			break;
6606 		case IWN_FW_TLV_MAIN_DATA:
6607 			fw->main.data = ptr;
6608 			fw->main.datasz = len;
6609 			break;
6610 		case IWN_FW_TLV_INIT_TEXT:
6611 			fw->init.text = ptr;
6612 			fw->init.textsz = len;
6613 			break;
6614 		case IWN_FW_TLV_INIT_DATA:
6615 			fw->init.data = ptr;
6616 			fw->init.datasz = len;
6617 			break;
6618 		case IWN_FW_TLV_BOOT_TEXT:
6619 			fw->boot.text = ptr;
6620 			fw->boot.textsz = len;
6621 			break;
6622 		case IWN_FW_TLV_ENH_SENS:
6623 			if (len !=  0) {
6624 				printf("%s: TLV type %d has invalid size %u\n",
6625 				    sc->sc_dev.dv_xname, letoh16(tlv->type),
6626 				    len);
6627 				goto next;
6628 			}
6629 			sc->sc_flags |= IWN_FLAG_ENH_SENS;
6630 			break;
6631 		case IWN_FW_TLV_PHY_CALIB:
6632 			if (len != sizeof(uint32_t)) {
6633 				printf("%s: TLV type %d has invalid size %u\n",
6634 				    sc->sc_dev.dv_xname, letoh16(tlv->type),
6635 				    len);
6636 				goto next;
6637 			}
6638 			if (letoh32(*ptr) <= IWN5000_PHY_CALIB_MAX) {
6639 				sc->reset_noise_gain = letoh32(*ptr);
6640 				sc->noise_gain = letoh32(*ptr) + 1;
6641 			}
6642 			break;
6643 		case IWN_FW_TLV_FLAGS:
6644 			if (len < sizeof(uint32_t))
6645 				break;
6646 			if (len % sizeof(uint32_t))
6647 				break;
6648 			sc->tlv_feature_flags = letoh32(*ptr);
6649 			DPRINTF(("feature: 0x%08x\n", sc->tlv_feature_flags));
6650 			break;
6651 		default:
6652 			DPRINTF(("TLV type %d not handled\n",
6653 			    letoh16(tlv->type)));
6654 			break;
6655 		}
6656  next:		/* TLV fields are 32-bit aligned. */
6657 		ptr += (len + 3) & ~3;
6658 	}
6659 	return 0;
6660 }
6661 
6662 int
6663 iwn_read_firmware(struct iwn_softc *sc)
6664 {
6665 	struct iwn_fw_info *fw = &sc->fw;
6666 	int error;
6667 
6668 	/*
6669 	 * Some PHY calibration commands are firmware-dependent; these
6670 	 * are the default values that will be overridden if
6671 	 * necessary.
6672 	 */
6673 	sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN;
6674 	sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN;
6675 
6676 	memset(fw, 0, sizeof (*fw));
6677 
6678 	/* Read firmware image from filesystem. */
6679 	if ((error = loadfirmware(sc->fwname, &fw->data, &fw->size)) != 0) {
6680 		printf("%s: could not read firmware %s (error %d)\n",
6681 		    sc->sc_dev.dv_xname, sc->fwname, error);
6682 		return error;
6683 	}
6684 	if (fw->size < sizeof (uint32_t)) {
6685 		printf("%s: firmware too short: %zu bytes\n",
6686 		    sc->sc_dev.dv_xname, fw->size);
6687 		free(fw->data, M_DEVBUF, fw->size);
6688 		return EINVAL;
6689 	}
6690 
6691 	/* Retrieve text and data sections. */
6692 	if (*(const uint32_t *)fw->data != 0)	/* Legacy image. */
6693 		error = iwn_read_firmware_leg(sc, fw);
6694 	else
6695 		error = iwn_read_firmware_tlv(sc, fw, 1);
6696 	if (error != 0) {
6697 		printf("%s: could not read firmware sections\n",
6698 		    sc->sc_dev.dv_xname);
6699 		free(fw->data, M_DEVBUF, fw->size);
6700 		return error;
6701 	}
6702 
6703 	/* Make sure text and data sections fit in hardware memory. */
6704 	if (fw->main.textsz > sc->fw_text_maxsz ||
6705 	    fw->main.datasz > sc->fw_data_maxsz ||
6706 	    fw->init.textsz > sc->fw_text_maxsz ||
6707 	    fw->init.datasz > sc->fw_data_maxsz ||
6708 	    fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ ||
6709 	    (fw->boot.textsz & 3) != 0) {
6710 		printf("%s: firmware sections too large\n",
6711 		    sc->sc_dev.dv_xname);
6712 		free(fw->data, M_DEVBUF, fw->size);
6713 		return EINVAL;
6714 	}
6715 
6716 	/* We can proceed with loading the firmware. */
6717 	return 0;
6718 }
6719 
6720 int
6721 iwn_clock_wait(struct iwn_softc *sc)
6722 {
6723 	int ntries;
6724 
6725 	/* Set "initialization complete" bit. */
6726 	IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
6727 
6728 	/* Wait for clock stabilization. */
6729 	for (ntries = 0; ntries < 2500; ntries++) {
6730 		if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY)
6731 			return 0;
6732 		DELAY(10);
6733 	}
6734 	printf("%s: timeout waiting for clock stabilization\n",
6735 	    sc->sc_dev.dv_xname);
6736 	return ETIMEDOUT;
6737 }
6738 
6739 int
6740 iwn_apm_init(struct iwn_softc *sc)
6741 {
6742 	pcireg_t reg;
6743 	int error;
6744 
6745 	/* Disable L0s exit timer (NMI bug workaround). */
6746 	IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER);
6747 	/* Don't wait for ICH L0s (ICH bug workaround). */
6748 	IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX);
6749 
6750 	/* Set FH wait threshold to max (HW bug under stress workaround). */
6751 	IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000);
6752 
6753 	/* Enable HAP INTA to move adapter from L1a to L0s. */
6754 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A);
6755 
6756 	/* Retrieve PCIe Active State Power Management (ASPM). */
6757 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
6758 	    sc->sc_cap_off + PCI_PCIE_LCSR);
6759 	/* Workaround for HW instability in PCIe L0->L0s->L1 transition. */
6760 	if (reg & PCI_PCIE_LCSR_ASPM_L1)	/* L1 Entry enabled. */
6761 		IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
6762 	else
6763 		IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
6764 
6765 	if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
6766 	    sc->hw_type <= IWN_HW_REV_TYPE_1000)
6767 		IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT);
6768 
6769 	/* Wait for clock stabilization before accessing prph. */
6770 	if ((error = iwn_clock_wait(sc)) != 0)
6771 		return error;
6772 
6773 	if ((error = iwn_nic_lock(sc)) != 0)
6774 		return error;
6775 	if (sc->hw_type == IWN_HW_REV_TYPE_4965) {
6776 		/* Enable DMA and BSM (Bootstrap State Machine). */
6777 		iwn_prph_write(sc, IWN_APMG_CLK_EN,
6778 		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT |
6779 		    IWN_APMG_CLK_CTRL_BSM_CLK_RQT);
6780 	} else {
6781 		/* Enable DMA. */
6782 		iwn_prph_write(sc, IWN_APMG_CLK_EN,
6783 		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
6784 	}
6785 	DELAY(20);
6786 	/* Disable L1-Active. */
6787 	iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS);
6788 	iwn_nic_unlock(sc);
6789 
6790 	return 0;
6791 }
6792 
6793 void
6794 iwn_apm_stop_master(struct iwn_softc *sc)
6795 {
6796 	int ntries;
6797 
6798 	/* Stop busmaster DMA activity. */
6799 	IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER);
6800 	for (ntries = 0; ntries < 100; ntries++) {
6801 		if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED)
6802 			return;
6803 		DELAY(10);
6804 	}
6805 	printf("%s: timeout waiting for master\n", sc->sc_dev.dv_xname);
6806 }
6807 
6808 void
6809 iwn_apm_stop(struct iwn_softc *sc)
6810 {
6811 	iwn_apm_stop_master(sc);
6812 
6813 	/* Reset the entire device. */
6814 	IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW);
6815 	DELAY(10);
6816 	/* Clear "initialization complete" bit. */
6817 	IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
6818 }
6819 
6820 int
6821 iwn4965_nic_config(struct iwn_softc *sc)
6822 {
6823 	if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) {
6824 		/*
6825 		 * I don't believe this to be correct but this is what the
6826 		 * vendor driver is doing. Probably the bits should not be
6827 		 * shifted in IWN_RFCFG_*.
6828 		 */
6829 		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6830 		    IWN_RFCFG_TYPE(sc->rfcfg) |
6831 		    IWN_RFCFG_STEP(sc->rfcfg) |
6832 		    IWN_RFCFG_DASH(sc->rfcfg));
6833 	}
6834 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6835 	    IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
6836 	return 0;
6837 }
6838 
6839 int
6840 iwn5000_nic_config(struct iwn_softc *sc)
6841 {
6842 	uint32_t tmp;
6843 	int error;
6844 
6845 	if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) {
6846 		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6847 		    IWN_RFCFG_TYPE(sc->rfcfg) |
6848 		    IWN_RFCFG_STEP(sc->rfcfg) |
6849 		    IWN_RFCFG_DASH(sc->rfcfg));
6850 	}
6851 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6852 	    IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
6853 
6854 	if ((error = iwn_nic_lock(sc)) != 0)
6855 		return error;
6856 	iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS);
6857 
6858 	if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
6859 		/*
6860 		 * Select first Switching Voltage Regulator (1.32V) to
6861 		 * solve a stability issue related to noisy DC2DC line
6862 		 * in the silicon of 1000 Series.
6863 		 */
6864 		tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR);
6865 		tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK;
6866 		tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32;
6867 		iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp);
6868 	}
6869 	iwn_nic_unlock(sc);
6870 
6871 	if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) {
6872 		/* Use internal power amplifier only. */
6873 		IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA);
6874 	}
6875 	if ((sc->hw_type == IWN_HW_REV_TYPE_6050 ||
6876 	     sc->hw_type == IWN_HW_REV_TYPE_6005) && sc->calib_ver >= 6) {
6877 		/* Indicate that ROM calibration version is >=6. */
6878 		IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6);
6879 	}
6880 	if (sc->hw_type == IWN_HW_REV_TYPE_6005)
6881 		IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_6050_1X2);
6882 	if (sc->hw_type == IWN_HW_REV_TYPE_2030 ||
6883 	    sc->hw_type == IWN_HW_REV_TYPE_2000 ||
6884 	    sc->hw_type == IWN_HW_REV_TYPE_135 ||
6885 	    sc->hw_type == IWN_HW_REV_TYPE_105)
6886 		IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_IQ_INVERT);
6887 	return 0;
6888 }
6889 
6890 /*
6891  * Take NIC ownership over Intel Active Management Technology (AMT).
6892  */
6893 int
6894 iwn_hw_prepare(struct iwn_softc *sc)
6895 {
6896 	int ntries;
6897 
6898 	/* Check if hardware is ready. */
6899 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
6900 	for (ntries = 0; ntries < 5; ntries++) {
6901 		if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
6902 		    IWN_HW_IF_CONFIG_NIC_READY)
6903 			return 0;
6904 		DELAY(10);
6905 	}
6906 
6907 	/* Hardware not ready, force into ready state. */
6908 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE);
6909 	for (ntries = 0; ntries < 15000; ntries++) {
6910 		if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) &
6911 		    IWN_HW_IF_CONFIG_PREPARE_DONE))
6912 			break;
6913 		DELAY(10);
6914 	}
6915 	if (ntries == 15000)
6916 		return ETIMEDOUT;
6917 
6918 	/* Hardware should be ready now. */
6919 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
6920 	for (ntries = 0; ntries < 5; ntries++) {
6921 		if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
6922 		    IWN_HW_IF_CONFIG_NIC_READY)
6923 			return 0;
6924 		DELAY(10);
6925 	}
6926 	return ETIMEDOUT;
6927 }
6928 
6929 int
6930 iwn_hw_init(struct iwn_softc *sc)
6931 {
6932 	struct iwn_ops *ops = &sc->ops;
6933 	int error, chnl, qid;
6934 
6935 	/* Clear pending interrupts. */
6936 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
6937 
6938 	if ((error = iwn_apm_init(sc)) != 0) {
6939 		printf("%s: could not power on adapter\n",
6940 		    sc->sc_dev.dv_xname);
6941 		return error;
6942 	}
6943 
6944 	/* Select VMAIN power source. */
6945 	if ((error = iwn_nic_lock(sc)) != 0)
6946 		return error;
6947 	iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK);
6948 	iwn_nic_unlock(sc);
6949 
6950 	/* Perform adapter-specific initialization. */
6951 	if ((error = ops->nic_config(sc)) != 0)
6952 		return error;
6953 
6954 	/* Initialize RX ring. */
6955 	if ((error = iwn_nic_lock(sc)) != 0)
6956 		return error;
6957 	IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
6958 	IWN_WRITE(sc, IWN_FH_RX_WPTR, 0);
6959 	/* Set physical address of RX ring (256-byte aligned). */
6960 	IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8);
6961 	/* Set physical address of RX status (16-byte aligned). */
6962 	IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4);
6963 	/* Enable RX. */
6964 	IWN_WRITE(sc, IWN_FH_RX_CONFIG,
6965 	    IWN_FH_RX_CONFIG_ENA           |
6966 	    IWN_FH_RX_CONFIG_IGN_RXF_EMPTY |	/* HW bug workaround */
6967 	    IWN_FH_RX_CONFIG_IRQ_DST_HOST  |
6968 	    IWN_FH_RX_CONFIG_SINGLE_FRAME  |
6969 	    IWN_FH_RX_CONFIG_RB_TIMEOUT(0x11) | /* about 1/2 msec */
6970 	    IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG));
6971 	iwn_nic_unlock(sc);
6972 	IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7);
6973 
6974 	if ((error = iwn_nic_lock(sc)) != 0)
6975 		return error;
6976 
6977 	/* Initialize TX scheduler. */
6978 	iwn_prph_write(sc, sc->sched_txfact_addr, 0);
6979 
6980 	/* Set physical address of "keep warm" page (16-byte aligned). */
6981 	IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4);
6982 
6983 	/* Initialize TX rings. */
6984 	for (qid = 0; qid < sc->ntxqs; qid++) {
6985 		struct iwn_tx_ring *txq = &sc->txq[qid];
6986 
6987 		/* Set physical address of TX ring (256-byte aligned). */
6988 		IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid),
6989 		    txq->desc_dma.paddr >> 8);
6990 	}
6991 	iwn_nic_unlock(sc);
6992 
6993 	/* Enable DMA channels. */
6994 	for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
6995 		IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl),
6996 		    IWN_FH_TX_CONFIG_DMA_ENA |
6997 		    IWN_FH_TX_CONFIG_DMA_CREDIT_ENA);
6998 	}
6999 
7000 	/* Clear "radio off" and "commands blocked" bits. */
7001 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
7002 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED);
7003 
7004 	/* Clear pending interrupts. */
7005 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
7006 	/* Enable interrupt coalescing. */
7007 	IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8);
7008 	/* Enable interrupts. */
7009 	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
7010 
7011 	/* _Really_ make sure "radio off" bit is cleared! */
7012 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
7013 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
7014 
7015 	/* Enable shadow registers. */
7016 	if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
7017 		IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff);
7018 
7019 	if ((error = ops->load_firmware(sc)) != 0) {
7020 		printf("%s: could not load firmware\n", sc->sc_dev.dv_xname);
7021 		return error;
7022 	}
7023 	/* Wait at most one second for firmware alive notification. */
7024 	if ((error = tsleep_nsec(sc, PCATCH, "iwninit", SEC_TO_NSEC(1))) != 0) {
7025 		printf("%s: timeout waiting for adapter to initialize\n",
7026 		    sc->sc_dev.dv_xname);
7027 		return error;
7028 	}
7029 	/* Do post-firmware initialization. */
7030 	return ops->post_alive(sc);
7031 }
7032 
7033 void
7034 iwn_hw_stop(struct iwn_softc *sc)
7035 {
7036 	int chnl, qid, ntries;
7037 
7038 	IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO);
7039 
7040 	/* Disable interrupts. */
7041 	IWN_WRITE(sc, IWN_INT_MASK, 0);
7042 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
7043 	IWN_WRITE(sc, IWN_FH_INT, 0xffffffff);
7044 	sc->sc_flags &= ~IWN_FLAG_USE_ICT;
7045 
7046 	/* Make sure we no longer hold the NIC lock. */
7047 	iwn_nic_unlock(sc);
7048 
7049 	/* Stop TX scheduler. */
7050 	iwn_prph_write(sc, sc->sched_txfact_addr, 0);
7051 
7052 	/* Stop all DMA channels. */
7053 	if (iwn_nic_lock(sc) == 0) {
7054 		for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
7055 			IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0);
7056 			for (ntries = 0; ntries < 200; ntries++) {
7057 				if (IWN_READ(sc, IWN_FH_TX_STATUS) &
7058 				    IWN_FH_TX_STATUS_IDLE(chnl))
7059 					break;
7060 				DELAY(10);
7061 			}
7062 		}
7063 		iwn_nic_unlock(sc);
7064 	}
7065 
7066 	/* Stop RX ring. */
7067 	iwn_reset_rx_ring(sc, &sc->rxq);
7068 
7069 	/* Reset all TX rings. */
7070 	for (qid = 0; qid < sc->ntxqs; qid++)
7071 		iwn_reset_tx_ring(sc, &sc->txq[qid]);
7072 
7073 	if (iwn_nic_lock(sc) == 0) {
7074 		iwn_prph_write(sc, IWN_APMG_CLK_DIS,
7075 		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
7076 		iwn_nic_unlock(sc);
7077 	}
7078 	DELAY(5);
7079 	/* Power OFF adapter. */
7080 	iwn_apm_stop(sc);
7081 }
7082 
7083 int
7084 iwn_init(struct ifnet *ifp)
7085 {
7086 	struct iwn_softc *sc = ifp->if_softc;
7087 	struct ieee80211com *ic = &sc->sc_ic;
7088 	int error;
7089 
7090 	memset(sc->bss_node_addr, 0, sizeof(sc->bss_node_addr));
7091 	sc->agg_queue_mask = 0;
7092 	memset(sc->sc_tx_ba, 0, sizeof(sc->sc_tx_ba));
7093 
7094 	if ((error = iwn_hw_prepare(sc)) != 0) {
7095 		printf("%s: hardware not ready\n", sc->sc_dev.dv_xname);
7096 		goto fail;
7097 	}
7098 
7099 	/* Initialize interrupt mask to default value. */
7100 	sc->int_mask = IWN_INT_MASK_DEF;
7101 	sc->sc_flags &= ~IWN_FLAG_USE_ICT;
7102 
7103 	/* Check that the radio is not disabled by hardware switch. */
7104 	if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) {
7105 		printf("%s: radio is disabled by hardware switch\n",
7106 		    sc->sc_dev.dv_xname);
7107 		error = EPERM;	/* :-) */
7108 		/* Re-enable interrupts. */
7109 		IWN_WRITE(sc, IWN_INT, 0xffffffff);
7110 		IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
7111 		return error;
7112 	}
7113 
7114 	/* Read firmware images from the filesystem. */
7115 	if ((error = iwn_read_firmware(sc)) != 0) {
7116 		printf("%s: could not read firmware\n", sc->sc_dev.dv_xname);
7117 		goto fail;
7118 	}
7119 
7120 	/* Initialize hardware and upload firmware. */
7121 	error = iwn_hw_init(sc);
7122 	free(sc->fw.data, M_DEVBUF, sc->fw.size);
7123 	if (error != 0) {
7124 		printf("%s: could not initialize hardware\n",
7125 		    sc->sc_dev.dv_xname);
7126 		goto fail;
7127 	}
7128 
7129 	/* Configure adapter now that it is ready. */
7130 	if ((error = iwn_config(sc)) != 0) {
7131 		printf("%s: could not configure device\n",
7132 		    sc->sc_dev.dv_xname);
7133 		goto fail;
7134 	}
7135 
7136 	ifq_clr_oactive(&ifp->if_snd);
7137 	ifp->if_flags |= IFF_RUNNING;
7138 
7139 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
7140 		ieee80211_begin_scan(ifp);
7141 	else
7142 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
7143 
7144 	return 0;
7145 
7146 fail:	iwn_stop(ifp);
7147 	return error;
7148 }
7149 
7150 void
7151 iwn_stop(struct ifnet *ifp)
7152 {
7153 	struct iwn_softc *sc = ifp->if_softc;
7154 	struct ieee80211com *ic = &sc->sc_ic;
7155 
7156 	timeout_del(&sc->calib_to);
7157 	ifp->if_timer = sc->sc_tx_timer = 0;
7158 	ifp->if_flags &= ~IFF_RUNNING;
7159 	ifq_clr_oactive(&ifp->if_snd);
7160 
7161 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
7162 
7163 	/* Power OFF hardware. */
7164 	iwn_hw_stop(sc);
7165 }
7166