xref: /openbsd-src/sys/dev/pci/if_iwm.c (revision 4b70baf6e17fc8b27fc1f7fa7929335753fa94c3)
1 /*	$OpenBSD: if_iwm.c,v 1.239 2019/04/01 10:47:13 kn Exp $	*/
2 
3 /*
4  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5  *   Author: Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2014 Fixup Software Ltd.
7  * Copyright (c) 2017 Stefan Sperling <stsp@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ***********************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
34  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35  * Copyright(c) 2016 Intel Deutschland GmbH
36  *
37  * This program is free software; you can redistribute it and/or modify
38  * it under the terms of version 2 of the GNU General Public License as
39  * published by the Free Software Foundation.
40  *
41  * This program is distributed in the hope that it will be useful, but
42  * WITHOUT ANY WARRANTY; without even the implied warranty of
43  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
44  * General Public License for more details.
45  *
46  * You should have received a copy of the GNU General Public License
47  * along with this program; if not, write to the Free Software
48  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49  * USA
50  *
51  * The full GNU General Public License is included in this distribution
52  * in the file called COPYING.
53  *
54  * Contact Information:
55  *  Intel Linux Wireless <ilw@linux.intel.com>
56  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57  *
58  *
59  * BSD LICENSE
60  *
61  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
62  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
63  * Copyright(c) 2016 Intel Deutschland GmbH
64  * All rights reserved.
65  *
66  * Redistribution and use in source and binary forms, with or without
67  * modification, are permitted provided that the following conditions
68  * are met:
69  *
70  *  * Redistributions of source code must retain the above copyright
71  *    notice, this list of conditions and the following disclaimer.
72  *  * Redistributions in binary form must reproduce the above copyright
73  *    notice, this list of conditions and the following disclaimer in
74  *    the documentation and/or other materials provided with the
75  *    distribution.
76  *  * Neither the name Intel Corporation nor the names of its
77  *    contributors may be used to endorse or promote products derived
78  *    from this software without specific prior written permission.
79  *
80  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
81  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
82  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
83  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
84  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
86  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
87  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
88  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
89  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
90  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91  */
92 
93 /*-
94  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
95  *
96  * Permission to use, copy, modify, and distribute this software for any
97  * purpose with or without fee is hereby granted, provided that the above
98  * copyright notice and this permission notice appear in all copies.
99  *
100  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
101  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
102  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
103  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
104  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
105  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
106  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
107  */
108 
109 #include "bpfilter.h"
110 
111 #include <sys/param.h>
112 #include <sys/conf.h>
113 #include <sys/kernel.h>
114 #include <sys/malloc.h>
115 #include <sys/mbuf.h>
116 #include <sys/mutex.h>
117 #include <sys/proc.h>
118 #include <sys/rwlock.h>
119 #include <sys/socket.h>
120 #include <sys/sockio.h>
121 #include <sys/systm.h>
122 #include <sys/endian.h>
123 
124 #include <sys/refcnt.h>
125 #include <sys/task.h>
126 #include <machine/bus.h>
127 #include <machine/intr.h>
128 
129 #include <dev/pci/pcireg.h>
130 #include <dev/pci/pcivar.h>
131 #include <dev/pci/pcidevs.h>
132 
133 #if NBPFILTER > 0
134 #include <net/bpf.h>
135 #endif
136 #include <net/if.h>
137 #include <net/if_dl.h>
138 #include <net/if_media.h>
139 
140 #include <netinet/in.h>
141 #include <netinet/if_ether.h>
142 
143 #include <net80211/ieee80211_var.h>
144 #include <net80211/ieee80211_amrr.h>
145 #include <net80211/ieee80211_mira.h>
146 #include <net80211/ieee80211_radiotap.h>
147 
148 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
149 
150 #define IC2IFP(_ic_) (&(_ic_)->ic_if)
151 
152 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
153 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
154 
155 #ifdef IWM_DEBUG
156 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
157 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
158 int iwm_debug = 1;
159 #else
160 #define DPRINTF(x)	do { ; } while (0)
161 #define DPRINTFN(n, x)	do { ; } while (0)
162 #endif
163 
164 #include <dev/pci/if_iwmreg.h>
165 #include <dev/pci/if_iwmvar.h>
166 
167 const uint8_t iwm_nvm_channels[] = {
168 	/* 2.4 GHz */
169 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
170 	/* 5 GHz */
171 	36, 40, 44 , 48, 52, 56, 60, 64,
172 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
173 	149, 153, 157, 161, 165
174 };
175 
176 const uint8_t iwm_nvm_channels_8000[] = {
177 	/* 2.4 GHz */
178 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
179 	/* 5 GHz */
180 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
181 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
182 	149, 153, 157, 161, 165, 169, 173, 177, 181
183 };
184 
185 #define IWM_NUM_2GHZ_CHANNELS	14
186 
187 const struct iwm_rate {
188 	uint16_t rate;
189 	uint8_t plcp;
190 	uint8_t ht_plcp;
191 } iwm_rates[] = {
192 		/* Legacy */		/* HT */
193 	{   2,	IWM_RATE_1M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
194 	{   4,	IWM_RATE_2M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
195 	{  11,	IWM_RATE_5M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
196 	{  22,	IWM_RATE_11M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
197 	{  12,	IWM_RATE_6M_PLCP,	IWM_RATE_HT_SISO_MCS_0_PLCP },
198 	{  18,	IWM_RATE_9M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
199 	{  24,	IWM_RATE_12M_PLCP,	IWM_RATE_HT_SISO_MCS_1_PLCP },
200 	{  26,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_8_PLCP },
201 	{  36,	IWM_RATE_18M_PLCP,	IWM_RATE_HT_SISO_MCS_2_PLCP },
202 	{  48,	IWM_RATE_24M_PLCP,	IWM_RATE_HT_SISO_MCS_3_PLCP },
203 	{  52,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_9_PLCP },
204 	{  72,	IWM_RATE_36M_PLCP,	IWM_RATE_HT_SISO_MCS_4_PLCP },
205 	{  78,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_10_PLCP },
206 	{  96,	IWM_RATE_48M_PLCP,	IWM_RATE_HT_SISO_MCS_5_PLCP },
207 	{ 104,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_11_PLCP },
208 	{ 108,	IWM_RATE_54M_PLCP,	IWM_RATE_HT_SISO_MCS_6_PLCP },
209 	{ 128,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_SISO_MCS_7_PLCP },
210 	{ 156,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_12_PLCP },
211 	{ 208,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_13_PLCP },
212 	{ 234,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_14_PLCP },
213 	{ 260,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_15_PLCP },
214 };
215 #define IWM_RIDX_CCK	0
216 #define IWM_RIDX_OFDM	4
217 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
218 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
219 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
220 #define IWM_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
221 
222 /* Convert an MCS index into an iwm_rates[] index. */
223 const int iwm_mcs2ridx[] = {
224 	IWM_RATE_MCS_0_INDEX,
225 	IWM_RATE_MCS_1_INDEX,
226 	IWM_RATE_MCS_2_INDEX,
227 	IWM_RATE_MCS_3_INDEX,
228 	IWM_RATE_MCS_4_INDEX,
229 	IWM_RATE_MCS_5_INDEX,
230 	IWM_RATE_MCS_6_INDEX,
231 	IWM_RATE_MCS_7_INDEX,
232 	IWM_RATE_MCS_8_INDEX,
233 	IWM_RATE_MCS_9_INDEX,
234 	IWM_RATE_MCS_10_INDEX,
235 	IWM_RATE_MCS_11_INDEX,
236 	IWM_RATE_MCS_12_INDEX,
237 	IWM_RATE_MCS_13_INDEX,
238 	IWM_RATE_MCS_14_INDEX,
239 	IWM_RATE_MCS_15_INDEX,
240 };
241 
242 struct iwm_nvm_section {
243 	uint16_t length;
244 	uint8_t *data;
245 };
246 
247 int	iwm_is_mimo_ht_plcp(uint8_t);
248 int	iwm_is_mimo_mcs(int);
249 int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
250 int	iwm_firmware_store_section(struct iwm_softc *, enum iwm_ucode_type,
251 	    uint8_t *, size_t);
252 int	iwm_set_default_calib(struct iwm_softc *, const void *);
253 void	iwm_fw_info_free(struct iwm_fw_info *);
254 int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
255 uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
256 void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
257 int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
258 int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
259 int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
260 int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
261 int	iwm_nic_lock(struct iwm_softc *);
262 void	iwm_nic_assert_locked(struct iwm_softc *);
263 void	iwm_nic_unlock(struct iwm_softc *);
264 void	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
265 	    uint32_t);
266 void	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
267 void	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
268 int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *, bus_size_t,
269 	    bus_size_t);
270 void	iwm_dma_contig_free(struct iwm_dma_info *);
271 int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
272 void	iwm_disable_rx_dma(struct iwm_softc *);
273 void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
274 void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
275 int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *, int);
276 void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
277 void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
278 void	iwm_enable_rfkill_int(struct iwm_softc *);
279 int	iwm_check_rfkill(struct iwm_softc *);
280 void	iwm_enable_interrupts(struct iwm_softc *);
281 void	iwm_restore_interrupts(struct iwm_softc *);
282 void	iwm_disable_interrupts(struct iwm_softc *);
283 void	iwm_ict_reset(struct iwm_softc *);
284 int	iwm_set_hw_ready(struct iwm_softc *);
285 int	iwm_prepare_card_hw(struct iwm_softc *);
286 void	iwm_apm_config(struct iwm_softc *);
287 int	iwm_apm_init(struct iwm_softc *);
288 void	iwm_apm_stop(struct iwm_softc *);
289 int	iwm_allow_mcast(struct iwm_softc *);
290 int	iwm_start_hw(struct iwm_softc *);
291 void	iwm_stop_device(struct iwm_softc *);
292 void	iwm_nic_config(struct iwm_softc *);
293 int	iwm_nic_rx_init(struct iwm_softc *);
294 int	iwm_nic_tx_init(struct iwm_softc *);
295 int	iwm_nic_init(struct iwm_softc *);
296 int	iwm_enable_txq(struct iwm_softc *, int, int, int);
297 int	iwm_post_alive(struct iwm_softc *);
298 struct iwm_phy_db_entry *iwm_phy_db_get_section(struct iwm_softc *, uint16_t,
299 	    uint16_t);
300 int	iwm_phy_db_set_section(struct iwm_softc *,
301 	    struct iwm_calib_res_notif_phy_db *);
302 int	iwm_is_valid_channel(uint16_t);
303 uint8_t	iwm_ch_id_to_ch_index(uint16_t);
304 uint16_t iwm_channel_id_to_papd(uint16_t);
305 uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
306 int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t, uint8_t **,
307 	    uint16_t *, uint16_t);
308 int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t, void *);
309 int	iwm_phy_db_send_all_channel_groups(struct iwm_softc *, uint16_t,
310 	    uint8_t);
311 int	iwm_send_phy_db_data(struct iwm_softc *);
312 void	iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
313 	    struct iwm_time_event_cmd_v1 *);
314 int	iwm_send_time_event_cmd(struct iwm_softc *,
315 	    const struct iwm_time_event_cmd_v2 *);
316 void	iwm_protect_session(struct iwm_softc *, struct iwm_node *, uint32_t,
317 	    uint32_t);
318 void	iwm_unprotect_session(struct iwm_softc *, struct iwm_node *);
319 int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t, uint16_t,
320 	    uint8_t *, uint16_t *);
321 int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
322 	    uint16_t *, size_t);
323 void	iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
324 	    const uint8_t *nvm_channels, size_t nchan);
325 void	iwm_setup_ht_rates(struct iwm_softc *);
326 void	iwm_htprot_task(void *);
327 void	iwm_update_htprot(struct ieee80211com *, struct ieee80211_node *);
328 int	iwm_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
329 	    uint8_t);
330 void	iwm_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
331 	    uint8_t);
332 void	iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *, uint8_t,
333 	    uint16_t, int);
334 #ifdef notyet
335 int	iwm_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
336 	    uint8_t);
337 void	iwm_ampdu_tx_stop(struct ieee80211com *, struct ieee80211_node *,
338 	    uint8_t);
339 #endif
340 void	iwm_ba_task(void *);
341 
342 int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
343 	    const uint16_t *, const uint16_t *,
344 	    const uint16_t *, const uint16_t *,
345 	    const uint16_t *);
346 void	iwm_set_hw_address_8000(struct iwm_softc *, struct iwm_nvm_data *,
347 	    const uint16_t *, const uint16_t *);
348 int	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
349 int	iwm_nvm_init(struct iwm_softc *);
350 int	iwm_firmware_load_sect(struct iwm_softc *, uint32_t, const uint8_t *,
351 	    uint32_t);
352 int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t, const uint8_t *,
353 	    uint32_t);
354 int	iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
355 int	iwm_load_cpu_sections_8000(struct iwm_softc *, struct iwm_fw_sects *,
356 	    int , int *);
357 int	iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
358 int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
359 int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
360 int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
361 int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
362 int	iwm_load_ucode_wait_alive(struct iwm_softc *, enum iwm_ucode_type);
363 int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
364 int	iwm_rx_addbuf(struct iwm_softc *, int, int);
365 int	iwm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
366 int	iwm_get_signal_strength(struct iwm_softc *, struct iwm_rx_phy_info *);
367 void	iwm_rx_rx_phy_cmd(struct iwm_softc *, struct iwm_rx_packet *,
368 	    struct iwm_rx_data *);
369 int	iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
370 void	iwm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
371 	    struct iwm_rx_data *);
372 void	iwm_enable_ht_cck_fallback(struct iwm_softc *, struct iwm_node *);
373 void	iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,
374 	    struct iwm_node *);
375 void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
376 	    struct iwm_rx_data *);
377 void	iwm_rx_bmiss(struct iwm_softc *, struct iwm_rx_packet *,
378 	    struct iwm_rx_data *);
379 int	iwm_binding_cmd(struct iwm_softc *, struct iwm_node *, uint32_t);
380 void	iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
381 	    struct iwm_phy_context_cmd *, uint32_t, uint32_t);
382 void	iwm_phy_ctxt_cmd_data(struct iwm_softc *, struct iwm_phy_context_cmd *,
383 	    struct ieee80211_channel *, uint8_t, uint8_t);
384 int	iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t,
385 	    uint8_t, uint32_t, uint32_t);
386 int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
387 int	iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t, uint16_t,
388 	    const void *);
389 int	iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
390 	    uint32_t *);
391 int	iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
392 	    const void *, uint32_t *);
393 void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
394 void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
395 void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t, uint16_t);
396 const struct iwm_rate *iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
397 	    struct ieee80211_frame *, struct iwm_tx_cmd *);
398 int	iwm_tx(struct iwm_softc *, struct mbuf *, struct ieee80211_node *, int);
399 int	iwm_flush_tx_path(struct iwm_softc *, int);
400 void	iwm_led_enable(struct iwm_softc *);
401 void	iwm_led_disable(struct iwm_softc *);
402 int	iwm_led_is_enabled(struct iwm_softc *);
403 void	iwm_led_blink_timeout(void *);
404 void	iwm_led_blink_start(struct iwm_softc *);
405 void	iwm_led_blink_stop(struct iwm_softc *);
406 int	iwm_beacon_filter_send_cmd(struct iwm_softc *,
407 	    struct iwm_beacon_filter_cmd *);
408 void	iwm_beacon_filter_set_cqm_params(struct iwm_softc *, struct iwm_node *,
409 	    struct iwm_beacon_filter_cmd *);
410 int	iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *, int);
411 void	iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
412 	    struct iwm_mac_power_cmd *);
413 int	iwm_power_mac_update_mode(struct iwm_softc *, struct iwm_node *);
414 int	iwm_power_update_device(struct iwm_softc *);
415 int	iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
416 int	iwm_disable_beacon_filter(struct iwm_softc *);
417 int	iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
418 int	iwm_add_aux_sta(struct iwm_softc *);
419 int	iwm_rm_sta_cmd(struct iwm_softc *, struct iwm_node *);
420 uint16_t iwm_scan_rx_chain(struct iwm_softc *);
421 uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
422 uint8_t	iwm_lmac_scan_fill_channels(struct iwm_softc *,
423 	    struct iwm_scan_channel_cfg_lmac *, int, int);
424 int	iwm_fill_probe_req(struct iwm_softc *, struct iwm_scan_probe_req *);
425 int	iwm_lmac_scan(struct iwm_softc *, int);
426 int	iwm_config_umac_scan(struct iwm_softc *);
427 int	iwm_umac_scan(struct iwm_softc *, int);
428 uint8_t	iwm_ridx2rate(struct ieee80211_rateset *, int);
429 int	iwm_rval2ridx(int);
430 void	iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *, int *);
431 void	iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
432 	    struct iwm_mac_ctx_cmd *, uint32_t, int);
433 void	iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
434 	    struct iwm_mac_data_sta *, int);
435 int	iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *, uint32_t, int);
436 int	iwm_update_quotas(struct iwm_softc *, struct iwm_node *, int);
437 void	iwm_add_task(struct iwm_softc *, struct taskq *, struct task *);
438 void	iwm_del_task(struct iwm_softc *, struct taskq *, struct task *);
439 int	iwm_scan(struct iwm_softc *);
440 int	iwm_bgscan(struct ieee80211com *);
441 int	iwm_umac_scan_abort(struct iwm_softc *);
442 int	iwm_lmac_scan_abort(struct iwm_softc *);
443 int	iwm_scan_abort(struct iwm_softc *);
444 int	iwm_auth(struct iwm_softc *);
445 int	iwm_deauth(struct iwm_softc *);
446 int	iwm_assoc(struct iwm_softc *);
447 int	iwm_disassoc(struct iwm_softc *);
448 int	iwm_run(struct iwm_softc *);
449 int	iwm_run_stop(struct iwm_softc *);
450 struct ieee80211_node *iwm_node_alloc(struct ieee80211com *);
451 void	iwm_calib_timeout(void *);
452 int	iwm_media_change(struct ifnet *);
453 void	iwm_newstate_task(void *);
454 int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
455 void	iwm_endscan(struct iwm_softc *);
456 void	iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
457 	    struct ieee80211_node *);
458 int	iwm_sf_config(struct iwm_softc *, int);
459 int	iwm_send_bt_init_conf(struct iwm_softc *);
460 int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
461 void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
462 int	iwm_init_hw(struct iwm_softc *);
463 int	iwm_init(struct ifnet *);
464 void	iwm_start(struct ifnet *);
465 void	iwm_stop(struct ifnet *);
466 void	iwm_watchdog(struct ifnet *);
467 int	iwm_ioctl(struct ifnet *, u_long, caddr_t);
468 #ifdef IWM_DEBUG
469 const char *iwm_desc_lookup(uint32_t);
470 void	iwm_nic_error(struct iwm_softc *);
471 void	iwm_nic_umac_error(struct iwm_softc *);
472 #endif
473 void	iwm_notif_intr(struct iwm_softc *);
474 int	iwm_intr(void *);
475 int	iwm_match(struct device *, void *, void *);
476 int	iwm_preinit(struct iwm_softc *);
477 void	iwm_attach_hook(struct device *);
478 void	iwm_attach(struct device *, struct device *, void *);
479 void	iwm_init_task(void *);
480 int	iwm_activate(struct device *, int);
481 int	iwm_resume(struct iwm_softc *);
482 
483 #if NBPFILTER > 0
484 void	iwm_radiotap_attach(struct iwm_softc *);
485 #endif
486 
487 int
488 iwm_is_mimo_ht_plcp(uint8_t ht_plcp)
489 {
490 	return (ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP &&
491 	    (ht_plcp & IWM_RATE_HT_MCS_NSS_MSK));
492 }
493 
494 int
495 iwm_is_mimo_mcs(int mcs)
496 {
497 	int ridx = iwm_mcs2ridx[mcs];
498 	return iwm_is_mimo_ht_plcp(iwm_rates[ridx].ht_plcp);
499 
500 }
501 
502 int
503 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
504 {
505 	struct iwm_fw_cscheme_list *l = (void *)data;
506 
507 	if (dlen < sizeof(*l) ||
508 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
509 		return EINVAL;
510 
511 	/* we don't actually store anything for now, always use s/w crypto */
512 
513 	return 0;
514 }
515 
516 int
517 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
518     uint8_t *data, size_t dlen)
519 {
520 	struct iwm_fw_sects *fws;
521 	struct iwm_fw_onesect *fwone;
522 
523 	if (type >= IWM_UCODE_TYPE_MAX)
524 		return EINVAL;
525 	if (dlen < sizeof(uint32_t))
526 		return EINVAL;
527 
528 	fws = &sc->sc_fw.fw_sects[type];
529 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
530 		return EINVAL;
531 
532 	fwone = &fws->fw_sect[fws->fw_count];
533 
534 	/* first 32bit are device load offset */
535 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
536 
537 	/* rest is data */
538 	fwone->fws_data = data + sizeof(uint32_t);
539 	fwone->fws_len = dlen - sizeof(uint32_t);
540 
541 	fws->fw_count++;
542 	fws->fw_totlen += fwone->fws_len;
543 
544 	return 0;
545 }
546 
547 #define IWM_DEFAULT_SCAN_CHANNELS 40
548 
549 struct iwm_tlv_calib_data {
550 	uint32_t ucode_type;
551 	struct iwm_tlv_calib_ctrl calib;
552 } __packed;
553 
554 int
555 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
556 {
557 	const struct iwm_tlv_calib_data *def_calib = data;
558 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
559 
560 	if (ucode_type >= IWM_UCODE_TYPE_MAX)
561 		return EINVAL;
562 
563 	sc->sc_default_calib[ucode_type].flow_trigger =
564 	    def_calib->calib.flow_trigger;
565 	sc->sc_default_calib[ucode_type].event_trigger =
566 	    def_calib->calib.event_trigger;
567 
568 	return 0;
569 }
570 
571 void
572 iwm_fw_info_free(struct iwm_fw_info *fw)
573 {
574 	free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
575 	fw->fw_rawdata = NULL;
576 	fw->fw_rawsize = 0;
577 	/* don't touch fw->fw_status */
578 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
579 }
580 
581 int
582 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
583 {
584 	struct iwm_fw_info *fw = &sc->sc_fw;
585 	struct iwm_tlv_ucode_header *uhdr;
586 	struct iwm_ucode_tlv tlv;
587 	uint32_t tlv_type;
588 	uint8_t *data;
589 	int err;
590 	size_t len;
591 
592 	if (fw->fw_status == IWM_FW_STATUS_DONE &&
593 	    ucode_type != IWM_UCODE_TYPE_INIT)
594 		return 0;
595 
596 	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
597 		tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
598 	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
599 
600 	if (fw->fw_rawdata != NULL)
601 		iwm_fw_info_free(fw);
602 
603 	err = loadfirmware(sc->sc_fwname,
604 	    (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
605 	if (err) {
606 		printf("%s: could not read firmware %s (error %d)\n",
607 		    DEVNAME(sc), sc->sc_fwname, err);
608 		goto out;
609 	}
610 
611 	sc->sc_capaflags = 0;
612 	sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
613 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
614 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
615 
616 	uhdr = (void *)fw->fw_rawdata;
617 	if (*(uint32_t *)fw->fw_rawdata != 0
618 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
619 		printf("%s: invalid firmware %s\n",
620 		    DEVNAME(sc), sc->sc_fwname);
621 		err = EINVAL;
622 		goto out;
623 	}
624 
625 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
626 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
627 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
628 	    IWM_UCODE_API(le32toh(uhdr->ver)));
629 	data = uhdr->data;
630 	len = fw->fw_rawsize - sizeof(*uhdr);
631 
632 	while (len >= sizeof(tlv)) {
633 		size_t tlv_len;
634 		void *tlv_data;
635 
636 		memcpy(&tlv, data, sizeof(tlv));
637 		tlv_len = le32toh(tlv.length);
638 		tlv_type = le32toh(tlv.type);
639 
640 		len -= sizeof(tlv);
641 		data += sizeof(tlv);
642 		tlv_data = data;
643 
644 		if (len < tlv_len) {
645 			printf("%s: firmware too short: %zu bytes\n",
646 			    DEVNAME(sc), len);
647 			err = EINVAL;
648 			goto parse_out;
649 		}
650 
651 		switch (tlv_type) {
652 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
653 			if (tlv_len < sizeof(uint32_t)) {
654 				err = EINVAL;
655 				goto parse_out;
656 			}
657 			sc->sc_capa_max_probe_len
658 			    = le32toh(*(uint32_t *)tlv_data);
659 			if (sc->sc_capa_max_probe_len >
660 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
661 				err = EINVAL;
662 				goto parse_out;
663 			}
664 			break;
665 		case IWM_UCODE_TLV_PAN:
666 			if (tlv_len) {
667 				err = EINVAL;
668 				goto parse_out;
669 			}
670 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
671 			break;
672 		case IWM_UCODE_TLV_FLAGS:
673 			if (tlv_len < sizeof(uint32_t)) {
674 				err = EINVAL;
675 				goto parse_out;
676 			}
677 			/*
678 			 * Apparently there can be many flags, but Linux driver
679 			 * parses only the first one, and so do we.
680 			 *
681 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
682 			 * Intentional or a bug?  Observations from
683 			 * current firmware file:
684 			 *  1) TLV_PAN is parsed first
685 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
686 			 * ==> this resets TLV_PAN to itself... hnnnk
687 			 */
688 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
689 			break;
690 		case IWM_UCODE_TLV_CSCHEME:
691 			err = iwm_store_cscheme(sc, tlv_data, tlv_len);
692 			if (err)
693 				goto parse_out;
694 			break;
695 		case IWM_UCODE_TLV_NUM_OF_CPU: {
696 			uint32_t num_cpu;
697 			if (tlv_len != sizeof(uint32_t)) {
698 				err = EINVAL;
699 				goto parse_out;
700 			}
701 			num_cpu = le32toh(*(uint32_t *)tlv_data);
702 			if (num_cpu < 1 || num_cpu > 2) {
703 				err = EINVAL;
704 				goto parse_out;
705 			}
706 			break;
707 		}
708 		case IWM_UCODE_TLV_SEC_RT:
709 			err = iwm_firmware_store_section(sc,
710 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
711 			if (err)
712 				goto parse_out;
713 			break;
714 		case IWM_UCODE_TLV_SEC_INIT:
715 			err = iwm_firmware_store_section(sc,
716 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
717 			if (err)
718 				goto parse_out;
719 			break;
720 		case IWM_UCODE_TLV_SEC_WOWLAN:
721 			err = iwm_firmware_store_section(sc,
722 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
723 			if (err)
724 				goto parse_out;
725 			break;
726 		case IWM_UCODE_TLV_DEF_CALIB:
727 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
728 				err = EINVAL;
729 				goto parse_out;
730 			}
731 			err = iwm_set_default_calib(sc, tlv_data);
732 			if (err)
733 				goto parse_out;
734 			break;
735 		case IWM_UCODE_TLV_PHY_SKU:
736 			if (tlv_len != sizeof(uint32_t)) {
737 				err = EINVAL;
738 				goto parse_out;
739 			}
740 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
741 			break;
742 
743 		case IWM_UCODE_TLV_API_CHANGES_SET: {
744 			struct iwm_ucode_api *api;
745 			if (tlv_len != sizeof(*api)) {
746 				err = EINVAL;
747 				goto parse_out;
748 			}
749 			api = (struct iwm_ucode_api *)tlv_data;
750 			/* Flags may exceed 32 bits in future firmware. */
751 			if (le32toh(api->api_index) > 0) {
752 				goto parse_out;
753 			}
754 			sc->sc_ucode_api = le32toh(api->api_flags);
755 			break;
756 		}
757 
758 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
759 			struct iwm_ucode_capa *capa;
760 			int idx, i;
761 			if (tlv_len != sizeof(*capa)) {
762 				err = EINVAL;
763 				goto parse_out;
764 			}
765 			capa = (struct iwm_ucode_capa *)tlv_data;
766 			idx = le32toh(capa->api_index);
767 			if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
768 				goto parse_out;
769 			}
770 			for (i = 0; i < 32; i++) {
771 				if ((le32toh(capa->api_capa) & (1 << i)) == 0)
772 					continue;
773 				setbit(sc->sc_enabled_capa, i + (32 * idx));
774 			}
775 			break;
776 		}
777 
778 		case 48: /* undocumented TLV */
779 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
780 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
781 			/* ignore, not used by current driver */
782 			break;
783 
784 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
785 			err = iwm_firmware_store_section(sc,
786 			    IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
787 			    tlv_len);
788 			if (err)
789 				goto parse_out;
790 			break;
791 
792 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
793 			if (tlv_len != sizeof(uint32_t)) {
794 				err = EINVAL;
795 				goto parse_out;
796 			}
797 			sc->sc_capa_n_scan_channels =
798 			  le32toh(*(uint32_t *)tlv_data);
799 			break;
800 
801 		case IWM_UCODE_TLV_FW_VERSION:
802 			if (tlv_len != sizeof(uint32_t) * 3) {
803 				err = EINVAL;
804 				goto parse_out;
805 			}
806 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
807 			    "%d.%d.%d",
808 			    le32toh(((uint32_t *)tlv_data)[0]),
809 			    le32toh(((uint32_t *)tlv_data)[1]),
810 			    le32toh(((uint32_t *)tlv_data)[2]));
811 			break;
812 
813 		case IWM_UCODE_TLV_FW_MEM_SEG:
814 			break;
815 
816 		default:
817 			err = EINVAL;
818 			goto parse_out;
819 		}
820 
821 		len -= roundup(tlv_len, 4);
822 		data += roundup(tlv_len, 4);
823 	}
824 
825 	KASSERT(err == 0);
826 
827  parse_out:
828 	if (err) {
829 		printf("%s: firmware parse error %d, "
830 		    "section type %d\n", DEVNAME(sc), err, tlv_type);
831 	}
832 
833 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
834 		printf("%s: device uses unsupported power ops\n", DEVNAME(sc));
835 		err = ENOTSUP;
836 	}
837 
838  out:
839 	if (err) {
840 		fw->fw_status = IWM_FW_STATUS_NONE;
841 		if (fw->fw_rawdata != NULL)
842 			iwm_fw_info_free(fw);
843 	} else
844 		fw->fw_status = IWM_FW_STATUS_DONE;
845 	wakeup(&sc->sc_fw);
846 
847 	return err;
848 }
849 
850 uint32_t
851 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
852 {
853 	iwm_nic_assert_locked(sc);
854 	IWM_WRITE(sc,
855 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
856 	IWM_BARRIER_READ_WRITE(sc);
857 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
858 }
859 
860 void
861 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
862 {
863 	iwm_nic_assert_locked(sc);
864 	IWM_WRITE(sc,
865 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
866 	IWM_BARRIER_WRITE(sc);
867 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
868 }
869 
870 int
871 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
872 {
873 	int offs, err = 0;
874 	uint32_t *vals = buf;
875 
876 	if (iwm_nic_lock(sc)) {
877 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
878 		for (offs = 0; offs < dwords; offs++)
879 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
880 		iwm_nic_unlock(sc);
881 	} else {
882 		err = EBUSY;
883 	}
884 	return err;
885 }
886 
887 int
888 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
889 {
890 	int offs;
891 	const uint32_t *vals = buf;
892 
893 	if (iwm_nic_lock(sc)) {
894 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
895 		/* WADDR auto-increments */
896 		for (offs = 0; offs < dwords; offs++) {
897 			uint32_t val = vals ? vals[offs] : 0;
898 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
899 		}
900 		iwm_nic_unlock(sc);
901 	} else {
902 		return EBUSY;
903 	}
904 	return 0;
905 }
906 
907 int
908 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
909 {
910 	return iwm_write_mem(sc, addr, &val, 1);
911 }
912 
913 int
914 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
915     int timo)
916 {
917 	for (;;) {
918 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
919 			return 1;
920 		}
921 		if (timo < 10) {
922 			return 0;
923 		}
924 		timo -= 10;
925 		DELAY(10);
926 	}
927 }
928 
929 int
930 iwm_nic_lock(struct iwm_softc *sc)
931 {
932 	if (sc->sc_nic_locks > 0) {
933 		iwm_nic_assert_locked(sc);
934 		sc->sc_nic_locks++;
935 		return 1; /* already locked */
936 	}
937 
938 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
939 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
940 
941 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
942 		DELAY(2);
943 
944 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
945 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
946 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
947 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
948 		sc->sc_nic_locks++;
949 		return 1;
950 	}
951 
952 	printf("%s: acquiring device failed\n", DEVNAME(sc));
953 	return 0;
954 }
955 
956 void
957 iwm_nic_assert_locked(struct iwm_softc *sc)
958 {
959 	uint32_t reg = IWM_READ(sc, IWM_CSR_GP_CNTRL);
960 	if ((reg & IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) == 0)
961 		panic("%s: mac clock not ready", DEVNAME(sc));
962 	if (reg & IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)
963 		panic("%s: mac gone to sleep", DEVNAME(sc));
964 	if (sc->sc_nic_locks <= 0)
965 		panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
966 }
967 
968 void
969 iwm_nic_unlock(struct iwm_softc *sc)
970 {
971 	if (sc->sc_nic_locks > 0) {
972 		if (--sc->sc_nic_locks == 0)
973 			IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
974 			    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
975 	} else
976 		printf("%s: NIC already unlocked\n", DEVNAME(sc));
977 }
978 
979 void
980 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
981     uint32_t mask)
982 {
983 	uint32_t val;
984 
985 	/* XXX: no error path? */
986 	if (iwm_nic_lock(sc)) {
987 		val = iwm_read_prph(sc, reg) & mask;
988 		val |= bits;
989 		iwm_write_prph(sc, reg, val);
990 		iwm_nic_unlock(sc);
991 	}
992 }
993 
994 void
995 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
996 {
997 	iwm_set_bits_mask_prph(sc, reg, bits, ~0);
998 }
999 
1000 void
1001 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1002 {
1003 	iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1004 }
1005 
1006 int
1007 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1008     bus_size_t size, bus_size_t alignment)
1009 {
1010 	int nsegs, err;
1011 	caddr_t va;
1012 
1013 	dma->tag = tag;
1014 	dma->size = size;
1015 
1016 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1017 	    &dma->map);
1018 	if (err)
1019 		goto fail;
1020 
1021 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1022 	    BUS_DMA_NOWAIT);
1023 	if (err)
1024 		goto fail;
1025 
1026 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
1027 	    BUS_DMA_NOWAIT);
1028 	if (err)
1029 		goto fail;
1030 	dma->vaddr = va;
1031 
1032 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1033 	    BUS_DMA_NOWAIT);
1034 	if (err)
1035 		goto fail;
1036 
1037 	memset(dma->vaddr, 0, size);
1038 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1039 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1040 
1041 	return 0;
1042 
1043 fail:	iwm_dma_contig_free(dma);
1044 	return err;
1045 }
1046 
1047 void
1048 iwm_dma_contig_free(struct iwm_dma_info *dma)
1049 {
1050 	if (dma->map != NULL) {
1051 		if (dma->vaddr != NULL) {
1052 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1053 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1054 			bus_dmamap_unload(dma->tag, dma->map);
1055 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1056 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1057 			dma->vaddr = NULL;
1058 		}
1059 		bus_dmamap_destroy(dma->tag, dma->map);
1060 		dma->map = NULL;
1061 	}
1062 }
1063 
1064 int
1065 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1066 {
1067 	bus_size_t size;
1068 	int i, err;
1069 
1070 	ring->cur = 0;
1071 
1072 	/* Allocate RX descriptors (256-byte aligned). */
1073 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1074 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1075 	if (err) {
1076 		printf("%s: could not allocate RX ring DMA memory\n",
1077 		    DEVNAME(sc));
1078 		goto fail;
1079 	}
1080 	ring->desc = ring->desc_dma.vaddr;
1081 
1082 	/* Allocate RX status area (16-byte aligned). */
1083 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1084 	    sizeof(*ring->stat), 16);
1085 	if (err) {
1086 		printf("%s: could not allocate RX status DMA memory\n",
1087 		    DEVNAME(sc));
1088 		goto fail;
1089 	}
1090 	ring->stat = ring->stat_dma.vaddr;
1091 
1092 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1093 		struct iwm_rx_data *data = &ring->data[i];
1094 
1095 		memset(data, 0, sizeof(*data));
1096 		err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1097 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1098 		    &data->map);
1099 		if (err) {
1100 			printf("%s: could not create RX buf DMA map\n",
1101 			    DEVNAME(sc));
1102 			goto fail;
1103 		}
1104 
1105 		err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1106 		if (err)
1107 			goto fail;
1108 	}
1109 	return 0;
1110 
1111 fail:	iwm_free_rx_ring(sc, ring);
1112 	return err;
1113 }
1114 
1115 void
1116 iwm_disable_rx_dma(struct iwm_softc *sc)
1117 {
1118 	int ntries;
1119 
1120 	if (iwm_nic_lock(sc)) {
1121 		IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1122 		for (ntries = 0; ntries < 1000; ntries++) {
1123 			if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
1124 			    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1125 				break;
1126 			DELAY(10);
1127 		}
1128 		iwm_nic_unlock(sc);
1129 	}
1130 }
1131 
1132 void
1133 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1134 {
1135 	ring->cur = 0;
1136 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1137 	    ring->stat_dma.size, BUS_DMASYNC_PREWRITE);
1138 	memset(ring->stat, 0, sizeof(*ring->stat));
1139 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1140 	    ring->stat_dma.size, BUS_DMASYNC_POSTWRITE);
1141 
1142 }
1143 
1144 void
1145 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1146 {
1147 	int i;
1148 
1149 	iwm_dma_contig_free(&ring->desc_dma);
1150 	iwm_dma_contig_free(&ring->stat_dma);
1151 
1152 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1153 		struct iwm_rx_data *data = &ring->data[i];
1154 
1155 		if (data->m != NULL) {
1156 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1157 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1158 			bus_dmamap_unload(sc->sc_dmat, data->map);
1159 			m_freem(data->m);
1160 			data->m = NULL;
1161 		}
1162 		if (data->map != NULL)
1163 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1164 	}
1165 }
1166 
1167 int
1168 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1169 {
1170 	bus_addr_t paddr;
1171 	bus_size_t size;
1172 	int i, err;
1173 
1174 	ring->qid = qid;
1175 	ring->queued = 0;
1176 	ring->cur = 0;
1177 
1178 	/* Allocate TX descriptors (256-byte aligned). */
1179 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1180 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1181 	if (err) {
1182 		printf("%s: could not allocate TX ring DMA memory\n",
1183 		    DEVNAME(sc));
1184 		goto fail;
1185 	}
1186 	ring->desc = ring->desc_dma.vaddr;
1187 
1188 	/*
1189 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1190 	 * to allocate commands space for other rings.
1191 	 */
1192 	if (qid > IWM_CMD_QUEUE)
1193 		return 0;
1194 
1195 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1196 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1197 	if (err) {
1198 		printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc));
1199 		goto fail;
1200 	}
1201 	ring->cmd = ring->cmd_dma.vaddr;
1202 
1203 	paddr = ring->cmd_dma.paddr;
1204 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1205 		struct iwm_tx_data *data = &ring->data[i];
1206 		size_t mapsize;
1207 
1208 		data->cmd_paddr = paddr;
1209 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1210 		    + offsetof(struct iwm_tx_cmd, scratch);
1211 		paddr += sizeof(struct iwm_device_cmd);
1212 
1213 		/* FW commands may require more mapped space than packets. */
1214 		if (qid == IWM_CMD_QUEUE)
1215 			mapsize = (sizeof(struct iwm_cmd_header) +
1216 			    IWM_MAX_CMD_PAYLOAD_SIZE);
1217 		else
1218 			mapsize = MCLBYTES;
1219 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
1220 		    IWM_NUM_OF_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,
1221 		    &data->map);
1222 		if (err) {
1223 			printf("%s: could not create TX buf DMA map\n",
1224 			    DEVNAME(sc));
1225 			goto fail;
1226 		}
1227 	}
1228 	KASSERT(paddr == ring->cmd_dma.paddr + size);
1229 	return 0;
1230 
1231 fail:	iwm_free_tx_ring(sc, ring);
1232 	return err;
1233 }
1234 
1235 void
1236 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1237 {
1238 	int i;
1239 
1240 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1241 		struct iwm_tx_data *data = &ring->data[i];
1242 
1243 		if (data->m != NULL) {
1244 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1245 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1246 			bus_dmamap_unload(sc->sc_dmat, data->map);
1247 			m_freem(data->m);
1248 			data->m = NULL;
1249 		}
1250 	}
1251 	/* Clear TX descriptors. */
1252 	memset(ring->desc, 0, ring->desc_dma.size);
1253 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1254 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1255 	sc->qfullmsk &= ~(1 << ring->qid);
1256 	/* 7000 family NICs are locked while commands are in progress. */
1257 	if (ring->qid == IWM_CMD_QUEUE && ring->queued > 0) {
1258 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1259 			iwm_nic_unlock(sc);
1260 	}
1261 	ring->queued = 0;
1262 	ring->cur = 0;
1263 }
1264 
1265 void
1266 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1267 {
1268 	int i;
1269 
1270 	iwm_dma_contig_free(&ring->desc_dma);
1271 	iwm_dma_contig_free(&ring->cmd_dma);
1272 
1273 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1274 		struct iwm_tx_data *data = &ring->data[i];
1275 
1276 		if (data->m != NULL) {
1277 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1278 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1279 			bus_dmamap_unload(sc->sc_dmat, data->map);
1280 			m_freem(data->m);
1281 			data->m = NULL;
1282 		}
1283 		if (data->map != NULL)
1284 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1285 	}
1286 }
1287 
1288 void
1289 iwm_enable_rfkill_int(struct iwm_softc *sc)
1290 {
1291 	sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1292 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1293 }
1294 
1295 int
1296 iwm_check_rfkill(struct iwm_softc *sc)
1297 {
1298 	uint32_t v;
1299 	int s;
1300 	int rv;
1301 
1302 	s = splnet();
1303 
1304 	/*
1305 	 * "documentation" is not really helpful here:
1306 	 *  27:	HW_RF_KILL_SW
1307 	 *	Indicates state of (platform's) hardware RF-Kill switch
1308 	 *
1309 	 * But apparently when it's off, it's on ...
1310 	 */
1311 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1312 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1313 	if (rv) {
1314 		sc->sc_flags |= IWM_FLAG_RFKILL;
1315 	} else {
1316 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
1317 	}
1318 
1319 	splx(s);
1320 	return rv;
1321 }
1322 
1323 void
1324 iwm_enable_interrupts(struct iwm_softc *sc)
1325 {
1326 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1327 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1328 }
1329 
1330 void
1331 iwm_restore_interrupts(struct iwm_softc *sc)
1332 {
1333 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1334 }
1335 
1336 void
1337 iwm_disable_interrupts(struct iwm_softc *sc)
1338 {
1339 	int s = splnet();
1340 
1341 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1342 
1343 	/* acknowledge all interrupts */
1344 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1345 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1346 
1347 	splx(s);
1348 }
1349 
1350 void
1351 iwm_ict_reset(struct iwm_softc *sc)
1352 {
1353 	iwm_disable_interrupts(sc);
1354 
1355 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1356 	sc->ict_cur = 0;
1357 
1358 	/* Set physical address of ICT (4KB aligned). */
1359 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1360 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1361 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1362 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1363 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1364 
1365 	/* Switch to ICT interrupt mode in driver. */
1366 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1367 
1368 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1369 	iwm_enable_interrupts(sc);
1370 }
1371 
1372 #define IWM_HW_READY_TIMEOUT 50
1373 int
1374 iwm_set_hw_ready(struct iwm_softc *sc)
1375 {
1376 	int ready;
1377 
1378 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1379 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1380 
1381 	ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1382 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1383 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1384 	    IWM_HW_READY_TIMEOUT);
1385 	if (ready)
1386 		IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1387 		    IWM_CSR_MBOX_SET_REG_OS_ALIVE);
1388 
1389 	return ready;
1390 }
1391 #undef IWM_HW_READY_TIMEOUT
1392 
1393 int
1394 iwm_prepare_card_hw(struct iwm_softc *sc)
1395 {
1396 	int t = 0;
1397 
1398 	if (iwm_set_hw_ready(sc))
1399 		return 0;
1400 
1401 	DELAY(100);
1402 
1403 	/* If HW is not ready, prepare the conditions to check again */
1404 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1405 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1406 
1407 	do {
1408 		if (iwm_set_hw_ready(sc))
1409 			return 0;
1410 		DELAY(200);
1411 		t += 200;
1412 	} while (t < 150000);
1413 
1414 	return ETIMEDOUT;
1415 }
1416 
1417 void
1418 iwm_apm_config(struct iwm_softc *sc)
1419 {
1420 	pcireg_t reg;
1421 
1422 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1423 	    sc->sc_cap_off + PCI_PCIE_LCSR);
1424 	if (reg & PCI_PCIE_LCSR_ASPM_L1) {
1425 		/* Um the Linux driver prints "Disabling L0S for this one ... */
1426 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1427 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1428 	} else {
1429 		/* ... and "Enabling" here */
1430 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1431 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1432 	}
1433 }
1434 
1435 /*
1436  * Start up NIC's basic functionality after it has been reset
1437  * e.g. after platform boot or shutdown.
1438  * NOTE:  This does not load uCode nor start the embedded processor
1439  */
1440 int
1441 iwm_apm_init(struct iwm_softc *sc)
1442 {
1443 	int err = 0;
1444 
1445 	/* Disable L0S exit timer (platform NMI workaround) */
1446 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
1447 		IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1448 		    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1449 
1450 	/*
1451 	 * Disable L0s without affecting L1;
1452 	 *  don't wait for ICH L0s (ICH bug W/A)
1453 	 */
1454 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1455 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1456 
1457 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
1458 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1459 
1460 	/*
1461 	 * Enable HAP INTA (interrupt from management bus) to
1462 	 * wake device's PCI Express link L1a -> L0s
1463 	 */
1464 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1465 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1466 
1467 	iwm_apm_config(sc);
1468 
1469 #if 0 /* not for 7k/8k */
1470 	/* Configure analog phase-lock-loop before activating to D0A */
1471 	if (trans->cfg->base_params->pll_cfg_val)
1472 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1473 		    trans->cfg->base_params->pll_cfg_val);
1474 #endif
1475 
1476 	/*
1477 	 * Set "initialization complete" bit to move adapter from
1478 	 * D0U* --> D0A* (powered-up active) state.
1479 	 */
1480 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1481 
1482 	/*
1483 	 * Wait for clock stabilization; once stabilized, access to
1484 	 * device-internal resources is supported, e.g. iwm_write_prph()
1485 	 * and accesses to uCode SRAM.
1486 	 */
1487 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1488 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1489 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1490 		printf("%s: timeout waiting for clock stabilization\n",
1491 		    DEVNAME(sc));
1492 		err = ETIMEDOUT;
1493 		goto out;
1494 	}
1495 
1496 	if (sc->host_interrupt_operation_mode) {
1497 		/*
1498 		 * This is a bit of an abuse - This is needed for 7260 / 3160
1499 		 * only check host_interrupt_operation_mode even if this is
1500 		 * not related to host_interrupt_operation_mode.
1501 		 *
1502 		 * Enable the oscillator to count wake up time for L1 exit. This
1503 		 * consumes slightly more power (100uA) - but allows to be sure
1504 		 * that we wake up from L1 on time.
1505 		 *
1506 		 * This looks weird: read twice the same register, discard the
1507 		 * value, set a bit, and yet again, read that same register
1508 		 * just to discard the value. But that's the way the hardware
1509 		 * seems to like it.
1510 		 */
1511 		if (iwm_nic_lock(sc)) {
1512 			iwm_read_prph(sc, IWM_OSC_CLK);
1513 			iwm_read_prph(sc, IWM_OSC_CLK);
1514 			iwm_nic_unlock(sc);
1515 		}
1516 		iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
1517 		if (iwm_nic_lock(sc)) {
1518 			iwm_read_prph(sc, IWM_OSC_CLK);
1519 			iwm_read_prph(sc, IWM_OSC_CLK);
1520 			iwm_nic_unlock(sc);
1521 		}
1522 	}
1523 
1524 	/*
1525 	 * Enable DMA clock and wait for it to stabilize.
1526 	 *
1527 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1528 	 * do not disable clocks.  This preserves any hardware bits already
1529 	 * set by default in "CLK_CTRL_REG" after reset.
1530 	 */
1531 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1532 		if (iwm_nic_lock(sc)) {
1533 			iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1534 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1535 			iwm_nic_unlock(sc);
1536 		}
1537 		DELAY(20);
1538 
1539 		/* Disable L1-Active */
1540 		iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1541 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1542 
1543 		/* Clear the interrupt in APMG if the NIC is in RFKILL */
1544 		if (iwm_nic_lock(sc)) {
1545 			iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1546 			    IWM_APMG_RTC_INT_STT_RFKILL);
1547 			iwm_nic_unlock(sc);
1548 		}
1549 	}
1550  out:
1551 	if (err)
1552 		printf("%s: apm init error %d\n", DEVNAME(sc), err);
1553 	return err;
1554 }
1555 
1556 void
1557 iwm_apm_stop(struct iwm_softc *sc)
1558 {
1559 	/* stop device's busmaster DMA activity */
1560 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1561 
1562 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1563 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1564 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1565 		printf("%s: timeout waiting for master\n", DEVNAME(sc));
1566 }
1567 
1568 int
1569 iwm_start_hw(struct iwm_softc *sc)
1570 {
1571 	int err;
1572 
1573 	err = iwm_prepare_card_hw(sc);
1574 	if (err)
1575 		return err;
1576 
1577 	/* Reset the entire device */
1578 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1579 	DELAY(10);
1580 
1581 	err = iwm_apm_init(sc);
1582 	if (err)
1583 		return err;
1584 
1585 	iwm_enable_rfkill_int(sc);
1586 	iwm_check_rfkill(sc);
1587 
1588 	return 0;
1589 }
1590 
1591 
1592 void
1593 iwm_stop_device(struct iwm_softc *sc)
1594 {
1595 	int chnl, ntries;
1596 	int qid;
1597 
1598 	iwm_disable_interrupts(sc);
1599 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1600 
1601 	/* Stop all DMA channels. */
1602 	if (iwm_nic_lock(sc)) {
1603 		/* Deactivate TX scheduler. */
1604 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1605 
1606 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1607 			IWM_WRITE(sc,
1608 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1609 			for (ntries = 0; ntries < 200; ntries++) {
1610 				uint32_t r;
1611 
1612 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1613 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1614 				    chnl))
1615 					break;
1616 				DELAY(20);
1617 			}
1618 		}
1619 		iwm_nic_unlock(sc);
1620 	}
1621 	iwm_disable_rx_dma(sc);
1622 
1623 	iwm_reset_rx_ring(sc, &sc->rxq);
1624 
1625 	for (qid = 0; qid < nitems(sc->txq); qid++)
1626 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1627 
1628 	if (iwm_nic_lock(sc)) {
1629 		/* Power-down device's busmaster DMA clocks */
1630 		iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1631 		    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1632 		iwm_nic_unlock(sc);
1633 	}
1634 	DELAY(5);
1635 
1636 	/* Make sure (redundant) we've released our request to stay awake */
1637 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1638 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1639 	if (sc->sc_nic_locks > 0)
1640 		printf("%s: %d active NIC locks forcefully cleared\n",
1641 		    DEVNAME(sc), sc->sc_nic_locks);
1642 	sc->sc_nic_locks = 0;
1643 
1644 	/* Stop the device, and put it in low power state */
1645 	iwm_apm_stop(sc);
1646 
1647 	/*
1648 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1649 	 * Clear the interrupt again.
1650 	 */
1651 	iwm_disable_interrupts(sc);
1652 
1653 	/* Reset the on-board processor. */
1654 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1655 
1656 	/* Even though we stop the HW we still want the RF kill interrupt. */
1657 	iwm_enable_rfkill_int(sc);
1658 	iwm_check_rfkill(sc);
1659 }
1660 
1661 void
1662 iwm_nic_config(struct iwm_softc *sc)
1663 {
1664 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1665 	uint32_t reg_val = 0;
1666 
1667 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1668 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1669 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1670 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1671 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1672 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1673 
1674 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1675 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1676 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1677 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1678 
1679 	/* radio configuration */
1680 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1681 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1682 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1683 
1684 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1685 
1686 	/*
1687 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1688 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1689 	 * to lose ownership and not being able to obtain it back.
1690 	 */
1691 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1692 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1693 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1694 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1695 }
1696 
1697 int
1698 iwm_nic_rx_init(struct iwm_softc *sc)
1699 {
1700 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1701 
1702 	iwm_disable_rx_dma(sc);
1703 
1704 	if (!iwm_nic_lock(sc))
1705 		return EBUSY;
1706 
1707 	/* reset and flush pointers */
1708 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1709 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1710 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1711 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1712 
1713 	/* Set physical address of RX ring (256-byte aligned). */
1714 	IWM_WRITE(sc,
1715 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1716 
1717 	/* Set physical address of RX status (16-byte aligned). */
1718 	IWM_WRITE(sc,
1719 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1720 
1721 	/* Enable RX. */
1722 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1723 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1724 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1725 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1726 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK	|
1727 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1728 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1729 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1730 
1731 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1732 
1733 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1734 	if (sc->host_interrupt_operation_mode)
1735 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1736 
1737 	/*
1738 	 * This value should initially be 0 (before preparing any RBs),
1739 	 * and should be 8 after preparing the first 8 RBs (for example).
1740 	 */
1741 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1742 
1743 	iwm_nic_unlock(sc);
1744 
1745 	return 0;
1746 }
1747 
1748 int
1749 iwm_nic_tx_init(struct iwm_softc *sc)
1750 {
1751 	int qid;
1752 
1753 	if (!iwm_nic_lock(sc))
1754 		return EBUSY;
1755 
1756 	/* Deactivate TX scheduler. */
1757 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1758 
1759 	/* Set physical address of "keep warm" page (16-byte aligned). */
1760 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1761 
1762 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1763 		struct iwm_tx_ring *txq = &sc->txq[qid];
1764 
1765 		/* Set physical address of TX ring (256-byte aligned). */
1766 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1767 		    txq->desc_dma.paddr >> 8);
1768 	}
1769 
1770 	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1771 
1772 	iwm_nic_unlock(sc);
1773 
1774 	return 0;
1775 }
1776 
1777 int
1778 iwm_nic_init(struct iwm_softc *sc)
1779 {
1780 	int err;
1781 
1782 	iwm_apm_init(sc);
1783 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1784 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1785 		    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
1786 		    ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
1787 
1788 	iwm_nic_config(sc);
1789 
1790 	err = iwm_nic_rx_init(sc);
1791 	if (err)
1792 		return err;
1793 
1794 	err = iwm_nic_tx_init(sc);
1795 	if (err)
1796 		return err;
1797 
1798 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1799 
1800 	return 0;
1801 }
1802 
1803 const uint8_t iwm_ac_to_tx_fifo[] = {
1804 	IWM_TX_FIFO_VO,
1805 	IWM_TX_FIFO_VI,
1806 	IWM_TX_FIFO_BE,
1807 	IWM_TX_FIFO_BK,
1808 };
1809 
1810 int
1811 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1812 {
1813 	iwm_nic_assert_locked(sc);
1814 
1815 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1816 
1817 	if (qid == IWM_CMD_QUEUE) {
1818 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1819 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1820 		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1821 
1822 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1823 
1824 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1825 
1826 		iwm_write_mem32(sc,
1827 		    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1828 
1829 		/* Set scheduler window size and frame limit. */
1830 		iwm_write_mem32(sc,
1831 		    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1832 		    sizeof(uint32_t),
1833 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1834 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1835 		    ((IWM_FRAME_LIMIT
1836 		        << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1837 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1838 
1839 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1840 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1841 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1842 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1843 		    IWM_SCD_QUEUE_STTS_REG_MSK);
1844 	} else {
1845 		struct iwm_scd_txq_cfg_cmd cmd;
1846 		int err;
1847 
1848 		memset(&cmd, 0, sizeof(cmd));
1849 		cmd.scd_queue = qid;
1850 		cmd.enable = 1;
1851 		cmd.sta_id = sta_id;
1852 		cmd.tx_fifo = fifo;
1853 		cmd.aggregate = 0;
1854 		cmd.window = IWM_FRAME_LIMIT;
1855 
1856 		err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0,
1857 		    sizeof(cmd), &cmd);
1858 		if (err)
1859 			return err;
1860 	}
1861 
1862 	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1863 	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1864 
1865 	return 0;
1866 }
1867 
1868 int
1869 iwm_post_alive(struct iwm_softc *sc)
1870 {
1871 	int nwords;
1872 	int err, chnl;
1873 	uint32_t base;
1874 
1875 	if (!iwm_nic_lock(sc))
1876 		return EBUSY;
1877 
1878 	base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1879 
1880 	iwm_ict_reset(sc);
1881 
1882 	/* Clear TX scheduler state in SRAM. */
1883 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1884 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1885 	    / sizeof(uint32_t);
1886 	err = iwm_write_mem(sc,
1887 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1888 	    NULL, nwords);
1889 	if (err)
1890 		goto out;
1891 
1892 	/* Set physical address of TX scheduler rings (1KB aligned). */
1893 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1894 
1895 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1896 
1897 	/* enable command channel */
1898 	err = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
1899 	if (err)
1900 		goto out;
1901 
1902 	/* Activate TX scheduler. */
1903 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1904 
1905 	/* Enable DMA channels. */
1906 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1907 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1908 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1909 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1910 	}
1911 
1912 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1913 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1914 
1915 	/* Enable L1-Active */
1916 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
1917 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1918 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1919 
1920  out:
1921  	iwm_nic_unlock(sc);
1922 	return err;
1923 }
1924 
1925 struct iwm_phy_db_entry *
1926 iwm_phy_db_get_section(struct iwm_softc *sc, uint16_t type, uint16_t chg_id)
1927 {
1928 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
1929 
1930 	if (type >= IWM_PHY_DB_MAX)
1931 		return NULL;
1932 
1933 	switch (type) {
1934 	case IWM_PHY_DB_CFG:
1935 		return &phy_db->cfg;
1936 	case IWM_PHY_DB_CALIB_NCH:
1937 		return &phy_db->calib_nch;
1938 	case IWM_PHY_DB_CALIB_CHG_PAPD:
1939 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
1940 			return NULL;
1941 		return &phy_db->calib_ch_group_papd[chg_id];
1942 	case IWM_PHY_DB_CALIB_CHG_TXP:
1943 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
1944 			return NULL;
1945 		return &phy_db->calib_ch_group_txp[chg_id];
1946 	default:
1947 		return NULL;
1948 	}
1949 	return NULL;
1950 }
1951 
1952 int
1953 iwm_phy_db_set_section(struct iwm_softc *sc,
1954     struct iwm_calib_res_notif_phy_db *phy_db_notif)
1955 {
1956 	uint16_t type = le16toh(phy_db_notif->type);
1957 	uint16_t size  = le16toh(phy_db_notif->length);
1958 	struct iwm_phy_db_entry *entry;
1959 	uint16_t chg_id = 0;
1960 
1961 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
1962 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
1963 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
1964 
1965 	entry = iwm_phy_db_get_section(sc, type, chg_id);
1966 	if (!entry)
1967 		return EINVAL;
1968 
1969 	if (entry->data)
1970 		free(entry->data, M_DEVBUF, entry->size);
1971 	entry->data = malloc(size, M_DEVBUF, M_NOWAIT);
1972 	if (!entry->data) {
1973 		entry->size = 0;
1974 		return ENOMEM;
1975 	}
1976 	memcpy(entry->data, phy_db_notif->data, size);
1977 	entry->size = size;
1978 
1979 	return 0;
1980 }
1981 
1982 int
1983 iwm_is_valid_channel(uint16_t ch_id)
1984 {
1985 	if (ch_id <= 14 ||
1986 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
1987 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
1988 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
1989 		return 1;
1990 	return 0;
1991 }
1992 
1993 uint8_t
1994 iwm_ch_id_to_ch_index(uint16_t ch_id)
1995 {
1996 	if (!iwm_is_valid_channel(ch_id))
1997 		return 0xff;
1998 
1999 	if (ch_id <= 14)
2000 		return ch_id - 1;
2001 	if (ch_id <= 64)
2002 		return (ch_id + 20) / 4;
2003 	if (ch_id <= 140)
2004 		return (ch_id - 12) / 4;
2005 	return (ch_id - 13) / 4;
2006 }
2007 
2008 
2009 uint16_t
2010 iwm_channel_id_to_papd(uint16_t ch_id)
2011 {
2012 	if (!iwm_is_valid_channel(ch_id))
2013 		return 0xff;
2014 
2015 	if (1 <= ch_id && ch_id <= 14)
2016 		return 0;
2017 	if (36 <= ch_id && ch_id <= 64)
2018 		return 1;
2019 	if (100 <= ch_id && ch_id <= 140)
2020 		return 2;
2021 	return 3;
2022 }
2023 
2024 uint16_t
2025 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2026 {
2027 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2028 	struct iwm_phy_db_chg_txp *txp_chg;
2029 	int i;
2030 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2031 
2032 	if (ch_index == 0xff)
2033 		return 0xff;
2034 
2035 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2036 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2037 		if (!txp_chg)
2038 			return 0xff;
2039 		/*
2040 		 * Looking for the first channel group the max channel
2041 		 * of which is higher than the requested channel.
2042 		 */
2043 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2044 			return i;
2045 	}
2046 	return 0xff;
2047 }
2048 
2049 int
2050 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2051     uint16_t *size, uint16_t ch_id)
2052 {
2053 	struct iwm_phy_db_entry *entry;
2054 	uint16_t ch_group_id = 0;
2055 
2056 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2057 		ch_group_id = iwm_channel_id_to_papd(ch_id);
2058 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2059 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2060 
2061 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2062 	if (!entry)
2063 		return EINVAL;
2064 
2065 	*data = entry->data;
2066 	*size = entry->size;
2067 
2068 	return 0;
2069 }
2070 
2071 int
2072 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2073     void *data)
2074 {
2075 	struct iwm_phy_db_cmd phy_db_cmd;
2076 	struct iwm_host_cmd cmd = {
2077 		.id = IWM_PHY_DB_CMD,
2078 		.flags = IWM_CMD_ASYNC,
2079 	};
2080 
2081 	phy_db_cmd.type = le16toh(type);
2082 	phy_db_cmd.length = le16toh(length);
2083 
2084 	cmd.data[0] = &phy_db_cmd;
2085 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2086 	cmd.data[1] = data;
2087 	cmd.len[1] = length;
2088 
2089 	return iwm_send_cmd(sc, &cmd);
2090 }
2091 
2092 int
2093 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc, uint16_t type,
2094     uint8_t max_ch_groups)
2095 {
2096 	uint16_t i;
2097 	int err;
2098 	struct iwm_phy_db_entry *entry;
2099 
2100 	for (i = 0; i < max_ch_groups; i++) {
2101 		entry = iwm_phy_db_get_section(sc, type, i);
2102 		if (!entry)
2103 			return EINVAL;
2104 
2105 		if (!entry->size)
2106 			continue;
2107 
2108 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2109 		if (err)
2110 			return err;
2111 
2112 		DELAY(1000);
2113 	}
2114 
2115 	return 0;
2116 }
2117 
2118 int
2119 iwm_send_phy_db_data(struct iwm_softc *sc)
2120 {
2121 	uint8_t *data = NULL;
2122 	uint16_t size = 0;
2123 	int err;
2124 
2125 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2126 	if (err)
2127 		return err;
2128 
2129 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2130 	if (err)
2131 		return err;
2132 
2133 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2134 	    &data, &size, 0);
2135 	if (err)
2136 		return err;
2137 
2138 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2139 	if (err)
2140 		return err;
2141 
2142 	err = iwm_phy_db_send_all_channel_groups(sc,
2143 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2144 	if (err)
2145 		return err;
2146 
2147 	err = iwm_phy_db_send_all_channel_groups(sc,
2148 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2149 	if (err)
2150 		return err;
2151 
2152 	return 0;
2153 }
2154 
2155 /*
2156  * For the high priority TE use a time event type that has similar priority to
2157  * the FW's action scan priority.
2158  */
2159 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2160 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2161 
2162 /* used to convert from time event API v2 to v1 */
2163 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
2164 			     IWM_TE_V2_EVENT_SOCIOPATHIC)
2165 static inline uint16_t
2166 iwm_te_v2_get_notify(uint16_t policy)
2167 {
2168 	return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
2169 }
2170 
2171 static inline uint16_t
2172 iwm_te_v2_get_dep_policy(uint16_t policy)
2173 {
2174 	return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
2175 		IWM_TE_V2_PLACEMENT_POS;
2176 }
2177 
2178 static inline uint16_t
2179 iwm_te_v2_get_absence(uint16_t policy)
2180 {
2181 	return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
2182 }
2183 
2184 void
2185 iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
2186     struct iwm_time_event_cmd_v1 *cmd_v1)
2187 {
2188 	cmd_v1->id_and_color = cmd_v2->id_and_color;
2189 	cmd_v1->action = cmd_v2->action;
2190 	cmd_v1->id = cmd_v2->id;
2191 	cmd_v1->apply_time = cmd_v2->apply_time;
2192 	cmd_v1->max_delay = cmd_v2->max_delay;
2193 	cmd_v1->depends_on = cmd_v2->depends_on;
2194 	cmd_v1->interval = cmd_v2->interval;
2195 	cmd_v1->duration = cmd_v2->duration;
2196 	if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
2197 		cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
2198 	else
2199 		cmd_v1->repeat = htole32(cmd_v2->repeat);
2200 	cmd_v1->max_frags = htole32(cmd_v2->max_frags);
2201 	cmd_v1->interval_reciprocal = 0; /* unused */
2202 
2203 	cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
2204 	cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
2205 	cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
2206 }
2207 
2208 int
2209 iwm_send_time_event_cmd(struct iwm_softc *sc,
2210     const struct iwm_time_event_cmd_v2 *cmd)
2211 {
2212 	struct iwm_time_event_cmd_v1 cmd_v1;
2213 	struct iwm_rx_packet *pkt;
2214 	struct iwm_time_event_resp *resp;
2215 	struct iwm_host_cmd hcmd = {
2216 		.id = IWM_TIME_EVENT_CMD,
2217 		.flags = IWM_CMD_WANT_RESP,
2218 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2219 	};
2220 	uint32_t resp_len;
2221 	int err;
2222 
2223 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2) {
2224 		hcmd.data[0] = cmd;
2225 		hcmd.len[0] = sizeof(*cmd);
2226 	} else {
2227 		iwm_te_v2_to_v1(cmd, &cmd_v1);
2228 		hcmd.data[0] = &cmd_v1;
2229 		hcmd.len[0] = sizeof(cmd_v1);
2230 	}
2231 	err = iwm_send_cmd(sc, &hcmd);
2232 	if (err)
2233 		return err;
2234 
2235 	pkt = hcmd.resp_pkt;
2236 	if (!pkt || (pkt->hdr.flags & IWM_CMD_FAILED_MSK)) {
2237 		err = EIO;
2238 		goto out;
2239 	}
2240 
2241 	resp_len = iwm_rx_packet_payload_len(pkt);
2242 	if (resp_len != sizeof(*resp)) {
2243 		err = EIO;
2244 		goto out;
2245 	}
2246 
2247 	resp = (void *)pkt->data;
2248 	if (le32toh(resp->status) == 0)
2249 		sc->sc_time_event_uid = le32toh(resp->unique_id);
2250 	else
2251 		err = EIO;
2252 out:
2253 	iwm_free_resp(sc, &hcmd);
2254 	return err;
2255 }
2256 
2257 void
2258 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2259     uint32_t duration, uint32_t max_delay)
2260 {
2261 	struct iwm_time_event_cmd_v2 time_cmd;
2262 
2263 	/* Do nothing if a time event is already scheduled. */
2264 	if (sc->sc_flags & IWM_FLAG_TE_ACTIVE)
2265 		return;
2266 
2267 	memset(&time_cmd, 0, sizeof(time_cmd));
2268 
2269 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2270 	time_cmd.id_and_color =
2271 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2272 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2273 
2274 	time_cmd.apply_time = htole32(0);
2275 
2276 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2277 	time_cmd.max_delay = htole32(max_delay);
2278 	/* TODO: why do we need to interval = bi if it is not periodic? */
2279 	time_cmd.interval = htole32(1);
2280 	time_cmd.duration = htole32(duration);
2281 	time_cmd.repeat = 1;
2282 	time_cmd.policy
2283 	    = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2284 	        IWM_TE_V2_NOTIF_HOST_EVENT_END |
2285 		IWM_T2_V2_START_IMMEDIATELY);
2286 
2287 	if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2288 		sc->sc_flags |= IWM_FLAG_TE_ACTIVE;
2289 
2290 	DELAY(100);
2291 }
2292 
2293 void
2294 iwm_unprotect_session(struct iwm_softc *sc, struct iwm_node *in)
2295 {
2296 	struct iwm_time_event_cmd_v2 time_cmd;
2297 
2298 	/* Do nothing if the time event has already ended. */
2299 	if ((sc->sc_flags & IWM_FLAG_TE_ACTIVE) == 0)
2300 		return;
2301 
2302 	memset(&time_cmd, 0, sizeof(time_cmd));
2303 
2304 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE);
2305 	time_cmd.id_and_color =
2306 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2307 	time_cmd.id = htole32(sc->sc_time_event_uid);
2308 
2309 	if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2310 		sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
2311 
2312 	DELAY(100);
2313 }
2314 
2315 /*
2316  * NVM read access and content parsing.  We do not support
2317  * external NVM or writing NVM.
2318  */
2319 
2320 /* list of NVM sections we are allowed/need to read */
2321 const int iwm_nvm_to_read[] = {
2322 	IWM_NVM_SECTION_TYPE_HW,
2323 	IWM_NVM_SECTION_TYPE_SW,
2324 	IWM_NVM_SECTION_TYPE_REGULATORY,
2325 	IWM_NVM_SECTION_TYPE_CALIBRATION,
2326 	IWM_NVM_SECTION_TYPE_PRODUCTION,
2327 	IWM_NVM_SECTION_TYPE_HW_8000,
2328 	IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
2329 	IWM_NVM_SECTION_TYPE_PHY_SKU,
2330 };
2331 
2332 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
2333 #define IWM_MAX_NVM_SECTION_SIZE	8192
2334 
2335 #define IWM_NVM_WRITE_OPCODE 1
2336 #define IWM_NVM_READ_OPCODE 0
2337 
2338 int
2339 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2340     uint16_t length, uint8_t *data, uint16_t *len)
2341 {
2342 	offset = 0;
2343 	struct iwm_nvm_access_cmd nvm_access_cmd = {
2344 		.offset = htole16(offset),
2345 		.length = htole16(length),
2346 		.type = htole16(section),
2347 		.op_code = IWM_NVM_READ_OPCODE,
2348 	};
2349 	struct iwm_nvm_access_resp *nvm_resp;
2350 	struct iwm_rx_packet *pkt;
2351 	struct iwm_host_cmd cmd = {
2352 		.id = IWM_NVM_ACCESS_CMD,
2353 		.flags = (IWM_CMD_WANT_RESP | IWM_CMD_SEND_IN_RFKILL),
2354 		.resp_pkt_len = IWM_CMD_RESP_MAX,
2355 		.data = { &nvm_access_cmd, },
2356 	};
2357 	int err, offset_read;
2358 	size_t bytes_read;
2359 	uint8_t *resp_data;
2360 
2361 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2362 
2363 	err = iwm_send_cmd(sc, &cmd);
2364 	if (err)
2365 		return err;
2366 
2367 	pkt = cmd.resp_pkt;
2368 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2369 		err = EIO;
2370 		goto exit;
2371 	}
2372 
2373 	/* Extract NVM response */
2374 	nvm_resp = (void *)pkt->data;
2375 	if (nvm_resp == NULL)
2376 		return EIO;
2377 
2378 	err = le16toh(nvm_resp->status);
2379 	bytes_read = le16toh(nvm_resp->length);
2380 	offset_read = le16toh(nvm_resp->offset);
2381 	resp_data = nvm_resp->data;
2382 	if (err) {
2383 		err = EINVAL;
2384 		goto exit;
2385 	}
2386 
2387 	if (offset_read != offset) {
2388 		err = EINVAL;
2389 		goto exit;
2390 	}
2391 
2392 	if (bytes_read > length) {
2393 		err = EINVAL;
2394 		goto exit;
2395 	}
2396 
2397 	memcpy(data + offset, resp_data, bytes_read);
2398 	*len = bytes_read;
2399 
2400  exit:
2401 	iwm_free_resp(sc, &cmd);
2402 	return err;
2403 }
2404 
2405 /*
2406  * Reads an NVM section completely.
2407  * NICs prior to 7000 family doesn't have a real NVM, but just read
2408  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2409  * by uCode, we need to manually check in this case that we don't
2410  * overflow and try to read more than the EEPROM size.
2411  */
2412 int
2413 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
2414     uint16_t *len, size_t max_len)
2415 {
2416 	uint16_t chunklen, seglen;
2417 	int err = 0;
2418 
2419 	chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2420 	*len = 0;
2421 
2422 	/* Read NVM chunks until exhausted (reading less than requested) */
2423 	while (seglen == chunklen && *len < max_len) {
2424 		err = iwm_nvm_read_chunk(sc,
2425 		    section, *len, chunklen, data, &seglen);
2426 		if (err)
2427 			return err;
2428 
2429 		*len += seglen;
2430 	}
2431 
2432 	return err;
2433 }
2434 
2435 uint8_t
2436 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
2437 {
2438 	uint8_t tx_ant;
2439 
2440 	tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
2441 	    >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
2442 
2443 	if (sc->sc_nvm.valid_tx_ant)
2444 		tx_ant &= sc->sc_nvm.valid_tx_ant;
2445 
2446 	return tx_ant;
2447 }
2448 
2449 uint8_t
2450 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
2451 {
2452 	uint8_t rx_ant;
2453 
2454 	rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
2455 	    >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
2456 
2457 	if (sc->sc_nvm.valid_rx_ant)
2458 		rx_ant &= sc->sc_nvm.valid_rx_ant;
2459 
2460 	return rx_ant;
2461 }
2462 
2463 void
2464 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
2465     const uint8_t *nvm_channels, size_t nchan)
2466 {
2467 	struct ieee80211com *ic = &sc->sc_ic;
2468 	struct iwm_nvm_data *data = &sc->sc_nvm;
2469 	int ch_idx;
2470 	struct ieee80211_channel *channel;
2471 	uint16_t ch_flags;
2472 	int is_5ghz;
2473 	int flags, hw_value;
2474 
2475 	for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
2476 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2477 
2478 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
2479 		    !data->sku_cap_band_52GHz_enable)
2480 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
2481 
2482 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID))
2483 			continue;
2484 
2485 		hw_value = nvm_channels[ch_idx];
2486 		channel = &ic->ic_channels[hw_value];
2487 
2488 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
2489 		if (!is_5ghz) {
2490 			flags = IEEE80211_CHAN_2GHZ;
2491 			channel->ic_flags
2492 			    = IEEE80211_CHAN_CCK
2493 			    | IEEE80211_CHAN_OFDM
2494 			    | IEEE80211_CHAN_DYN
2495 			    | IEEE80211_CHAN_2GHZ;
2496 		} else {
2497 			flags = IEEE80211_CHAN_5GHZ;
2498 			channel->ic_flags =
2499 			    IEEE80211_CHAN_A;
2500 		}
2501 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2502 
2503 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
2504 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2505 
2506 		if (data->sku_cap_11n_enable)
2507 			channel->ic_flags |= IEEE80211_CHAN_HT;
2508 	}
2509 }
2510 
2511 void
2512 iwm_setup_ht_rates(struct iwm_softc *sc)
2513 {
2514 	struct ieee80211com *ic = &sc->sc_ic;
2515 	uint8_t rx_ant;
2516 
2517 	/* TX is supported with the same MCS as RX. */
2518 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2519 
2520 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
2521 
2522 	if (sc->sc_nvm.sku_cap_mimo_disable)
2523 		return;
2524 
2525 	rx_ant = iwm_fw_valid_rx_ant(sc);
2526 	if ((rx_ant & IWM_ANT_AB) == IWM_ANT_AB ||
2527 	    (rx_ant & IWM_ANT_BC) == IWM_ANT_BC)
2528 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
2529 }
2530 
2531 #define IWM_MAX_RX_BA_SESSIONS 16
2532 
2533 void
2534 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
2535     uint16_t ssn, int start)
2536 {
2537 	struct ieee80211com *ic = &sc->sc_ic;
2538 	struct iwm_add_sta_cmd_v7 cmd;
2539 	struct iwm_node *in = (void *)ni;
2540 	int err, s;
2541 	uint32_t status;
2542 
2543 	if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
2544 		ieee80211_addba_req_refuse(ic, ni, tid);
2545 		return;
2546 	}
2547 
2548 	memset(&cmd, 0, sizeof(cmd));
2549 
2550 	cmd.sta_id = IWM_STATION_ID;
2551 	cmd.mac_id_n_color
2552 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2553 	cmd.add_modify = IWM_STA_MODE_MODIFY;
2554 
2555 	if (start) {
2556 		cmd.add_immediate_ba_tid = (uint8_t)tid;
2557 		cmd.add_immediate_ba_ssn = ssn;
2558 	} else {
2559 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
2560 	}
2561 	cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
2562 	    IWM_STA_MODIFY_REMOVE_BA_TID;
2563 
2564 	status = IWM_ADD_STA_SUCCESS;
2565 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
2566 	    &status);
2567 
2568 	s = splnet();
2569 	if (err == 0 && status == IWM_ADD_STA_SUCCESS) {
2570 		if (start) {
2571 			sc->sc_rx_ba_sessions++;
2572 			ieee80211_addba_req_accept(ic, ni, tid);
2573 		} else if (sc->sc_rx_ba_sessions > 0)
2574 			sc->sc_rx_ba_sessions--;
2575 	} else if (start)
2576 		ieee80211_addba_req_refuse(ic, ni, tid);
2577 
2578 	splx(s);
2579 }
2580 
2581 void
2582 iwm_htprot_task(void *arg)
2583 {
2584 	struct iwm_softc *sc = arg;
2585 	struct ieee80211com *ic = &sc->sc_ic;
2586 	struct iwm_node *in = (void *)ic->ic_bss;
2587 	int err, s = splnet();
2588 
2589 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
2590 		refcnt_rele_wake(&sc->task_refs);
2591 		splx(s);
2592 		return;
2593 	}
2594 
2595 	/* This call updates HT protection based on in->in_ni.ni_htop1. */
2596 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
2597 	if (err)
2598 		printf("%s: could not change HT protection: error %d\n",
2599 		    DEVNAME(sc), err);
2600 
2601 	refcnt_rele_wake(&sc->task_refs);
2602 	splx(s);
2603 }
2604 
2605 /*
2606  * This function is called by upper layer when HT protection settings in
2607  * beacons have changed.
2608  */
2609 void
2610 iwm_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
2611 {
2612 	struct iwm_softc *sc = ic->ic_softc;
2613 
2614 	/* assumes that ni == ic->ic_bss */
2615 	iwm_add_task(sc, systq, &sc->htprot_task);
2616 }
2617 
2618 void
2619 iwm_ba_task(void *arg)
2620 {
2621 	struct iwm_softc *sc = arg;
2622 	struct ieee80211com *ic = &sc->sc_ic;
2623 	struct ieee80211_node *ni = ic->ic_bss;
2624 	int s = splnet();
2625 
2626 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
2627 		refcnt_rele_wake(&sc->task_refs);
2628 		splx(s);
2629 		return;
2630 	}
2631 
2632 	if (sc->ba_start)
2633 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn, 1);
2634 	else
2635 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0);
2636 
2637 	refcnt_rele_wake(&sc->task_refs);
2638 	splx(s);
2639 }
2640 
2641 /*
2642  * This function is called by upper layer when an ADDBA request is received
2643  * from another STA and before the ADDBA response is sent.
2644  */
2645 int
2646 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
2647     uint8_t tid)
2648 {
2649 	struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
2650 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2651 
2652 	if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS)
2653 		return ENOSPC;
2654 
2655 	sc->ba_start = 1;
2656 	sc->ba_tid = tid;
2657 	sc->ba_ssn = htole16(ba->ba_winstart);
2658 	iwm_add_task(sc, systq, &sc->ba_task);
2659 
2660 	return EBUSY;
2661 }
2662 
2663 /*
2664  * This function is called by upper layer on teardown of an HT-immediate
2665  * Block Ack agreement (eg. upon receipt of a DELBA frame).
2666  */
2667 void
2668 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
2669     uint8_t tid)
2670 {
2671 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2672 
2673 	sc->ba_start = 0;
2674 	sc->ba_tid = tid;
2675 	iwm_add_task(sc, systq, &sc->ba_task);
2676 }
2677 
2678 void
2679 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2680     const uint16_t *mac_override, const uint16_t *nvm_hw)
2681 {
2682 	const uint8_t *hw_addr;
2683 
2684 	if (mac_override) {
2685 		static const uint8_t reserved_mac[] = {
2686 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2687 		};
2688 
2689 		hw_addr = (const uint8_t *)(mac_override +
2690 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2691 
2692 		/*
2693 		 * Store the MAC address from MAO section.
2694 		 * No byte swapping is required in MAO section
2695 		 */
2696 		memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
2697 
2698 		/*
2699 		 * Force the use of the OTP MAC address in case of reserved MAC
2700 		 * address in the NVM, or if address is given but invalid.
2701 		 */
2702 		if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
2703 		    (memcmp(etherbroadcastaddr, data->hw_addr,
2704 		    sizeof(etherbroadcastaddr)) != 0) &&
2705 		    (memcmp(etheranyaddr, data->hw_addr,
2706 		    sizeof(etheranyaddr)) != 0) &&
2707 		    !ETHER_IS_MULTICAST(data->hw_addr))
2708 			return;
2709 	}
2710 
2711 	if (nvm_hw) {
2712 		/* Read the mac address from WFMP registers. */
2713 		uint32_t mac_addr0, mac_addr1;
2714 
2715 		if (!iwm_nic_lock(sc))
2716 			goto out;
2717 		mac_addr0 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2718 		mac_addr1 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2719 		iwm_nic_unlock(sc);
2720 
2721 		hw_addr = (const uint8_t *)&mac_addr0;
2722 		data->hw_addr[0] = hw_addr[3];
2723 		data->hw_addr[1] = hw_addr[2];
2724 		data->hw_addr[2] = hw_addr[1];
2725 		data->hw_addr[3] = hw_addr[0];
2726 
2727 		hw_addr = (const uint8_t *)&mac_addr1;
2728 		data->hw_addr[4] = hw_addr[1];
2729 		data->hw_addr[5] = hw_addr[0];
2730 
2731 		return;
2732 	}
2733 out:
2734 	printf("%s: mac address not found\n", DEVNAME(sc));
2735 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2736 }
2737 
2738 int
2739 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
2740     const uint16_t *nvm_sw, const uint16_t *nvm_calib,
2741     const uint16_t *mac_override, const uint16_t *phy_sku,
2742     const uint16_t *regulatory)
2743 {
2744 	struct iwm_nvm_data *data = &sc->sc_nvm;
2745 	uint8_t hw_addr[ETHER_ADDR_LEN];
2746 	uint32_t sku;
2747 
2748 	data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2749 
2750 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2751 		uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2752 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2753 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2754 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2755 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2756 
2757 		sku = le16_to_cpup(nvm_sw + IWM_SKU);
2758 	} else {
2759 		uint32_t radio_cfg =
2760 		    le32_to_cpup((uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2761 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2762 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2763 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2764 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
2765 		data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2766 		data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2767 
2768 		sku = le32_to_cpup((uint32_t *)(phy_sku + IWM_SKU_8000));
2769 	}
2770 
2771 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2772 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2773 	data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
2774 	data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
2775 
2776 	data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2777 
2778 	/* The byte order is little endian 16 bit, meaning 214365 */
2779 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2780 		memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
2781 		data->hw_addr[0] = hw_addr[1];
2782 		data->hw_addr[1] = hw_addr[0];
2783 		data->hw_addr[2] = hw_addr[3];
2784 		data->hw_addr[3] = hw_addr[2];
2785 		data->hw_addr[4] = hw_addr[5];
2786 		data->hw_addr[5] = hw_addr[4];
2787 	} else
2788 		iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
2789 
2790 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2791 		iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
2792 		    iwm_nvm_channels, nitems(iwm_nvm_channels));
2793 	else
2794 		iwm_init_channel_map(sc, &regulatory[IWM_NVM_CHANNELS_8000],
2795 		    iwm_nvm_channels_8000, nitems(iwm_nvm_channels_8000));
2796 
2797 	data->calib_version = 255;   /* TODO:
2798 					this value will prevent some checks from
2799 					failing, we need to check if this
2800 					field is still needed, and if it does,
2801 					where is it in the NVM */
2802 
2803 	return 0;
2804 }
2805 
2806 int
2807 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2808 {
2809 	const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
2810 	const uint16_t *regulatory = NULL;
2811 
2812 	/* Checking for required sections */
2813 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2814 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2815 		    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
2816 			return ENOENT;
2817 		}
2818 
2819 		hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
2820 	} else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2821 		/* SW and REGULATORY sections are mandatory */
2822 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2823 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2824 			return ENOENT;
2825 		}
2826 		/* MAC_OVERRIDE or at least HW section must exist */
2827 		if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
2828 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2829 			return ENOENT;
2830 		}
2831 
2832 		/* PHY_SKU section is mandatory in B0 */
2833 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2834 			return ENOENT;
2835 		}
2836 
2837 		regulatory = (const uint16_t *)
2838 		    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2839 		hw = (const uint16_t *)
2840 		    sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
2841 		mac_override =
2842 			(const uint16_t *)
2843 			sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2844 		phy_sku = (const uint16_t *)
2845 		    sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2846 	} else {
2847 		panic("unknown device family %d\n", sc->sc_device_family);
2848 	}
2849 
2850 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2851 	calib = (const uint16_t *)
2852 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2853 
2854 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2855 	    phy_sku, regulatory);
2856 }
2857 
2858 int
2859 iwm_nvm_init(struct iwm_softc *sc)
2860 {
2861 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2862 	int i, section, err;
2863 	uint16_t len;
2864 	uint8_t *buf;
2865 	const size_t bufsz = IWM_MAX_NVM_SECTION_SIZE;
2866 
2867 	memset(nvm_sections, 0, sizeof(nvm_sections));
2868 
2869 	buf = malloc(bufsz, M_DEVBUF, M_WAIT);
2870 	if (buf == NULL)
2871 		return ENOMEM;
2872 
2873 	for (i = 0; i < nitems(iwm_nvm_to_read); i++) {
2874 		section = iwm_nvm_to_read[i];
2875 		KASSERT(section <= nitems(nvm_sections));
2876 
2877 		err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
2878 		if (err) {
2879 			err = 0;
2880 			continue;
2881 		}
2882 		nvm_sections[section].data = malloc(len, M_DEVBUF, M_WAIT);
2883 		if (nvm_sections[section].data == NULL) {
2884 			err = ENOMEM;
2885 			break;
2886 		}
2887 		memcpy(nvm_sections[section].data, buf, len);
2888 		nvm_sections[section].length = len;
2889 	}
2890 	free(buf, M_DEVBUF, bufsz);
2891 	if (err == 0)
2892 		err = iwm_parse_nvm_sections(sc, nvm_sections);
2893 
2894 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
2895 		if (nvm_sections[i].data != NULL)
2896 			free(nvm_sections[i].data, M_DEVBUF,
2897 			    nvm_sections[i].length);
2898 	}
2899 
2900 	return err;
2901 }
2902 
2903 int
2904 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
2905     const uint8_t *section, uint32_t byte_cnt)
2906 {
2907 	int err = EINVAL;
2908 	uint32_t chunk_sz, offset;
2909 
2910 	chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
2911 
2912 	for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
2913 		uint32_t addr, len;
2914 		const uint8_t *data;
2915 
2916 		addr = dst_addr + offset;
2917 		len = MIN(chunk_sz, byte_cnt - offset);
2918 		data = section + offset;
2919 
2920 		err = iwm_firmware_load_chunk(sc, addr, data, len);
2921 		if (err)
2922 			break;
2923 	}
2924 
2925 	return err;
2926 }
2927 
2928 int
2929 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2930     const uint8_t *chunk, uint32_t byte_cnt)
2931 {
2932 	struct iwm_dma_info *dma = &sc->fw_dma;
2933 	int err;
2934 
2935 	/* Copy firmware chunk into pre-allocated DMA-safe memory. */
2936 	memcpy(dma->vaddr, chunk, byte_cnt);
2937 	bus_dmamap_sync(sc->sc_dmat,
2938 	    dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE);
2939 
2940 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2941 	    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2942 		iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2943 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2944 
2945 	sc->sc_fw_chunk_done = 0;
2946 
2947 	if (!iwm_nic_lock(sc))
2948 		return EBUSY;
2949 
2950 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2951 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2952 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2953 	    dst_addr);
2954 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2955 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2956 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2957 	    (iwm_get_dma_hi_addr(dma->paddr)
2958 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2959 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2960 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2961 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2962 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2963 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2964 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2965 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2966 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2967 
2968 	iwm_nic_unlock(sc);
2969 
2970 	/* Wait for this segment to load. */
2971 	err = 0;
2972 	while (!sc->sc_fw_chunk_done) {
2973 		err = tsleep(&sc->sc_fw, 0, "iwmfw", hz);
2974 		if (err)
2975 			break;
2976 	}
2977 
2978 	if (!sc->sc_fw_chunk_done)
2979 		printf("%s: fw chunk addr 0x%x len %d failed to load\n",
2980 		    DEVNAME(sc), dst_addr, byte_cnt);
2981 
2982 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2983 	    dst_addr <= IWM_FW_MEM_EXTENDED_END) {
2984 		iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2985 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2986 	}
2987 
2988 	return err;
2989 }
2990 
2991 int
2992 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2993 {
2994 	struct iwm_fw_sects *fws;
2995 	int err, i;
2996 	void *data;
2997 	uint32_t dlen;
2998 	uint32_t offset;
2999 
3000 	fws = &sc->sc_fw.fw_sects[ucode_type];
3001 	for (i = 0; i < fws->fw_count; i++) {
3002 		data = fws->fw_sect[i].fws_data;
3003 		dlen = fws->fw_sect[i].fws_len;
3004 		offset = fws->fw_sect[i].fws_devoff;
3005 		if (dlen > sc->sc_fwdmasegsz) {
3006 			err = EFBIG;
3007 		} else
3008 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
3009 		if (err) {
3010 			printf("%s: could not load firmware chunk %u of %u\n",
3011 			    DEVNAME(sc), i, fws->fw_count);
3012 			return err;
3013 		}
3014 	}
3015 
3016 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
3017 
3018 	return 0;
3019 }
3020 
3021 int
3022 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
3023     int cpu, int *first_ucode_section)
3024 {
3025 	int shift_param;
3026 	int i, err = 0, sec_num = 0x1;
3027 	uint32_t val, last_read_idx = 0;
3028 	void *data;
3029 	uint32_t dlen;
3030 	uint32_t offset;
3031 
3032 	if (cpu == 1) {
3033 		shift_param = 0;
3034 		*first_ucode_section = 0;
3035 	} else {
3036 		shift_param = 16;
3037 		(*first_ucode_section)++;
3038 	}
3039 
3040 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
3041 		last_read_idx = i;
3042 		data = fws->fw_sect[i].fws_data;
3043 		dlen = fws->fw_sect[i].fws_len;
3044 		offset = fws->fw_sect[i].fws_devoff;
3045 
3046 		/*
3047 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
3048 		 * CPU1 to CPU2.
3049 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
3050 		 * CPU2 non paged to CPU2 paging sec.
3051 		 */
3052 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
3053 		    offset == IWM_PAGING_SEPARATOR_SECTION)
3054 			break;
3055 
3056 		if (dlen > sc->sc_fwdmasegsz) {
3057 			err = EFBIG;
3058 		} else
3059 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
3060 		if (err) {
3061 			printf("%s: could not load firmware chunk %d "
3062 			    "(error %d)\n", DEVNAME(sc), i, err);
3063 			return err;
3064 		}
3065 
3066 		/* Notify the ucode of the loaded section number and status */
3067 		if (iwm_nic_lock(sc)) {
3068 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
3069 			val = val | (sec_num << shift_param);
3070 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
3071 			sec_num = (sec_num << 1) | 0x1;
3072 			iwm_nic_unlock(sc);
3073 		} else {
3074 			err = EBUSY;
3075 			printf("%s: could not load firmware chunk %d "
3076 			    "(error %d)\n", DEVNAME(sc), i, err);
3077 			return err;
3078 		}
3079 	}
3080 
3081 	*first_ucode_section = last_read_idx;
3082 
3083 	if (iwm_nic_lock(sc)) {
3084 		if (cpu == 1)
3085 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
3086 		else
3087 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
3088 		iwm_nic_unlock(sc);
3089 	} else {
3090 		err = EBUSY;
3091 		printf("%s: could not finalize firmware loading (error %d)\n",
3092 		    DEVNAME(sc), err);
3093 		return err;
3094 	}
3095 
3096 	return 0;
3097 }
3098 
3099 int
3100 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3101 {
3102 	struct iwm_fw_sects *fws;
3103 	int err = 0;
3104 	int first_ucode_section;
3105 
3106 	fws = &sc->sc_fw.fw_sects[ucode_type];
3107 
3108 	/* configure the ucode to be ready to get the secured image */
3109 	/* release CPU reset */
3110 	if (iwm_nic_lock(sc)) {
3111 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
3112 		    IWM_RELEASE_CPU_RESET_BIT);
3113 		iwm_nic_unlock(sc);
3114 	}
3115 
3116 	/* load to FW the binary Secured sections of CPU1 */
3117 	err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
3118 	if (err)
3119 		return err;
3120 
3121 	/* load to FW the binary sections of CPU2 */
3122 	return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
3123 }
3124 
3125 int
3126 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3127 {
3128 	int err, w;
3129 
3130 	sc->sc_uc.uc_intr = 0;
3131 
3132 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
3133 		err = iwm_load_firmware_8000(sc, ucode_type);
3134 	else
3135 		err = iwm_load_firmware_7000(sc, ucode_type);
3136 
3137 	if (err)
3138 		return err;
3139 
3140 	/* wait for the firmware to load */
3141 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
3142 		err = tsleep(&sc->sc_uc, 0, "iwmuc", hz/10);
3143 	}
3144 	if (err || !sc->sc_uc.uc_ok)
3145 		printf("%s: could not load firmware\n", DEVNAME(sc));
3146 
3147 	return err;
3148 }
3149 
3150 int
3151 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3152 {
3153 	int err;
3154 
3155 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
3156 
3157 	err = iwm_nic_init(sc);
3158 	if (err) {
3159 		printf("%s: unable to init nic\n", DEVNAME(sc));
3160 		return err;
3161 	}
3162 
3163 	/* make sure rfkill handshake bits are cleared */
3164 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3165 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
3166 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3167 
3168 	/* clear (again), then enable host interrupts */
3169 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
3170 	iwm_enable_interrupts(sc);
3171 
3172 	/* really make sure rfkill handshake bits are cleared */
3173 	/* maybe we should write a few times more?  just to make sure */
3174 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3175 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3176 
3177 	return iwm_load_firmware(sc, ucode_type);
3178 }
3179 
3180 int
3181 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
3182 {
3183 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
3184 		.valid = htole32(valid_tx_ant),
3185 	};
3186 
3187 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
3188 	    0, sizeof(tx_ant_cmd), &tx_ant_cmd);
3189 }
3190 
3191 int
3192 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
3193 {
3194 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
3195 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
3196 
3197 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3198 	phy_cfg_cmd.calib_control.event_trigger =
3199 	    sc->sc_default_calib[ucode_type].event_trigger;
3200 	phy_cfg_cmd.calib_control.flow_trigger =
3201 	    sc->sc_default_calib[ucode_type].flow_trigger;
3202 
3203 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
3204 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3205 }
3206 
3207 int
3208 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
3209 	enum iwm_ucode_type ucode_type)
3210 {
3211 	enum iwm_ucode_type old_type = sc->sc_uc_current;
3212 	int err;
3213 
3214 	err = iwm_read_firmware(sc, ucode_type);
3215 	if (err)
3216 		return err;
3217 
3218 	sc->sc_uc_current = ucode_type;
3219 	err = iwm_start_fw(sc, ucode_type);
3220 	if (err) {
3221 		sc->sc_uc_current = old_type;
3222 		return err;
3223 	}
3224 
3225 	return iwm_post_alive(sc);
3226 }
3227 
3228 int
3229 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3230 {
3231 	const int wait_flags = (IWM_INIT_COMPLETE | IWM_CALIB_COMPLETE);
3232 	int err;
3233 
3234 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3235 		printf("%s: radio is disabled by hardware switch\n",
3236 		    DEVNAME(sc));
3237 		return EPERM;
3238 	}
3239 
3240 	sc->sc_init_complete = 0;
3241 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
3242 	if (err) {
3243 		printf("%s: failed to load init firmware\n", DEVNAME(sc));
3244 		return err;
3245 	}
3246 
3247 	if (justnvm) {
3248 		err = iwm_nvm_init(sc);
3249 		if (err) {
3250 			printf("%s: failed to read nvm\n", DEVNAME(sc));
3251 			return err;
3252 		}
3253 
3254 		if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
3255 			IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
3256 			    sc->sc_nvm.hw_addr);
3257 
3258 		return 0;
3259 	}
3260 
3261 	err = iwm_send_bt_init_conf(sc);
3262 	if (err)
3263 		return err;
3264 
3265 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
3266 	if (err)
3267 		return err;
3268 
3269 	/* Send TX valid antennas before triggering calibrations */
3270 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
3271 	if (err)
3272 		return err;
3273 
3274 	/*
3275 	 * Send phy configurations command to init uCode
3276 	 * to start the 16.0 uCode init image internal calibrations.
3277 	 */
3278 	err = iwm_send_phy_cfg_cmd(sc);
3279 	if (err)
3280 		return err;
3281 
3282 	/*
3283 	 * Nothing to do but wait for the init complete and phy DB
3284 	 * notifications from the firmware.
3285 	 */
3286 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
3287 		err = tsleep(&sc->sc_init_complete, 0, "iwminit", 2*hz);
3288 		if (err)
3289 			break;
3290 	}
3291 
3292 	return err;
3293 }
3294 
3295 int
3296 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3297 {
3298 	struct iwm_rx_ring *ring = &sc->rxq;
3299 	struct iwm_rx_data *data = &ring->data[idx];
3300 	struct mbuf *m;
3301 	int err;
3302 	int fatal = 0;
3303 
3304 	m = m_gethdr(M_DONTWAIT, MT_DATA);
3305 	if (m == NULL)
3306 		return ENOBUFS;
3307 
3308 	if (size <= MCLBYTES) {
3309 		MCLGET(m, M_DONTWAIT);
3310 	} else {
3311 		MCLGETI(m, M_DONTWAIT, NULL, IWM_RBUF_SIZE);
3312 	}
3313 	if ((m->m_flags & M_EXT) == 0) {
3314 		m_freem(m);
3315 		return ENOBUFS;
3316 	}
3317 
3318 	if (data->m != NULL) {
3319 		bus_dmamap_unload(sc->sc_dmat, data->map);
3320 		fatal = 1;
3321 	}
3322 
3323 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3324 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3325 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
3326 	if (err) {
3327 		/* XXX */
3328 		if (fatal)
3329 			panic("iwm: could not load RX mbuf");
3330 		m_freem(m);
3331 		return err;
3332 	}
3333 	data->m = m;
3334 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3335 
3336 	/* Update RX descriptor. */
3337 	ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
3338 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3339 	    idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
3340 
3341 	return 0;
3342 }
3343 
3344 #define IWM_RSSI_OFFSET 50
3345 int
3346 iwm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3347 {
3348 	int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3349 	uint32_t agc_a, agc_b;
3350 	uint32_t val;
3351 
3352 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3353 	agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3354 	agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3355 
3356 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3357 	rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3358 	rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3359 
3360 	/*
3361 	 * dBm = rssi dB - agc dB - constant.
3362 	 * Higher AGC (higher radio gain) means lower signal.
3363 	 */
3364 	rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3365 	rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3366 	max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3367 
3368 	return max_rssi_dbm;
3369 }
3370 
3371 /*
3372  * RSSI values are reported by the FW as positive values - need to negate
3373  * to obtain their dBM.  Account for missing antennas by replacing 0
3374  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3375  */
3376 int
3377 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3378 {
3379 	int energy_a, energy_b, energy_c, max_energy;
3380 	uint32_t val;
3381 
3382 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3383 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3384 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3385 	energy_a = energy_a ? -energy_a : -256;
3386 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3387 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3388 	energy_b = energy_b ? -energy_b : -256;
3389 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3390 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3391 	energy_c = energy_c ? -energy_c : -256;
3392 	max_energy = MAX(energy_a, energy_b);
3393 	max_energy = MAX(max_energy, energy_c);
3394 
3395 	return max_energy;
3396 }
3397 
3398 void
3399 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3400     struct iwm_rx_data *data)
3401 {
3402 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3403 
3404 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3405 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3406 
3407 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3408 }
3409 
3410 /*
3411  * Retrieve the average noise (in dBm) among receivers.
3412  */
3413 int
3414 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
3415 {
3416 	int i, total, nbant, noise;
3417 
3418 	total = nbant = noise = 0;
3419 	for (i = 0; i < 3; i++) {
3420 		noise = letoh32(stats->beacon_silence_rssi[i]) & 0xff;
3421 		if (noise) {
3422 			total += noise;
3423 			nbant++;
3424 		}
3425 	}
3426 
3427 	/* There should be at least one antenna but check anyway. */
3428 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3429 }
3430 
3431 void
3432 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3433     struct iwm_rx_data *data)
3434 {
3435 	struct ieee80211com *ic = &sc->sc_ic;
3436 	struct ieee80211_frame *wh;
3437 	struct ieee80211_node *ni;
3438 	struct ieee80211_rxinfo rxi;
3439 	struct ieee80211_channel *bss_chan;
3440 	struct mbuf *m;
3441 	struct iwm_rx_phy_info *phy_info;
3442 	struct iwm_rx_mpdu_res_start *rx_res;
3443 	int device_timestamp;
3444 	uint32_t len;
3445 	uint32_t rx_pkt_status;
3446 	int rssi, chanidx;
3447 	uint8_t saved_bssid[IEEE80211_ADDR_LEN] = { 0 };
3448 
3449 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3450 	    BUS_DMASYNC_POSTREAD);
3451 
3452 	phy_info = &sc->sc_last_phy_info;
3453 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3454 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3455 	len = le16toh(rx_res->byte_count);
3456 	if (len < IEEE80211_MIN_LEN) {
3457 		ic->ic_stats.is_rx_tooshort++;
3458 		IC2IFP(ic)->if_ierrors++;
3459 		return;
3460 	}
3461 	if (len > IWM_RBUF_SIZE) {
3462 		IC2IFP(ic)->if_ierrors++;
3463 		return;
3464 	}
3465 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data +
3466 	    sizeof(*rx_res) + len));
3467 
3468 	m = data->m;
3469 	m->m_data = pkt->data + sizeof(*rx_res);
3470 	m->m_pkthdr.len = m->m_len = len;
3471 
3472 	if (__predict_false(phy_info->cfg_phy_cnt > 20))
3473 		return;
3474 
3475 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3476 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))
3477 		return; /* drop */
3478 
3479 	device_timestamp = le32toh(phy_info->system_timestamp);
3480 
3481 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3482 		rssi = iwm_get_signal_strength(sc, phy_info);
3483 	} else {
3484 		rssi = iwm_calc_rssi(sc, phy_info);
3485 	}
3486 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
3487 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
3488 
3489 	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
3490 		return;
3491 
3492 	chanidx = letoh32(phy_info->channel);
3493 	if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
3494 		chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
3495 
3496 	ni = ieee80211_find_rxnode(ic, wh);
3497 	if (ni == ic->ic_bss) {
3498 		/*
3499 		 * We may switch ic_bss's channel during scans.
3500 		 * Record the current channel so we can restore it later.
3501 		 */
3502 		bss_chan = ni->ni_chan;
3503 		IEEE80211_ADDR_COPY(&saved_bssid, ni->ni_macaddr);
3504 	}
3505 	ni->ni_chan = &ic->ic_channels[chanidx];
3506 
3507 	memset(&rxi, 0, sizeof(rxi));
3508 	rxi.rxi_rssi = rssi;
3509 	rxi.rxi_tstamp = device_timestamp;
3510 
3511 #if NBPFILTER > 0
3512 	if (sc->sc_drvbpf != NULL) {
3513 		struct mbuf mb;
3514 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3515 		uint16_t chan_flags;
3516 
3517 		tap->wr_flags = 0;
3518 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3519 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3520 		tap->wr_chan_freq =
3521 		    htole16(ic->ic_channels[chanidx].ic_freq);
3522 		chan_flags = ic->ic_channels[chanidx].ic_flags;
3523 		if (ic->ic_curmode != IEEE80211_MODE_11N)
3524 			chan_flags &= ~IEEE80211_CHAN_HT;
3525 		tap->wr_chan_flags = htole16(chan_flags);
3526 		tap->wr_dbm_antsignal = (int8_t)rssi;
3527 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3528 		tap->wr_tsft = phy_info->system_timestamp;
3529 		if (phy_info->phy_flags &
3530 		    htole16(IWM_RX_RES_PHY_FLAGS_OFDM_HT)) {
3531 			uint8_t mcs = (phy_info->rate_n_flags &
3532 			    htole32(IWM_RATE_HT_MCS_RATE_CODE_MSK |
3533 			        IWM_RATE_HT_MCS_NSS_MSK));
3534 			tap->wr_rate = (0x80 | mcs);
3535 		} else {
3536 			uint8_t rate = (phy_info->rate_n_flags &
3537 			    htole32(IWM_RATE_LEGACY_RATE_MSK));
3538 			switch (rate) {
3539 			/* CCK rates. */
3540 			case  10: tap->wr_rate =   2; break;
3541 			case  20: tap->wr_rate =   4; break;
3542 			case  55: tap->wr_rate =  11; break;
3543 			case 110: tap->wr_rate =  22; break;
3544 			/* OFDM rates. */
3545 			case 0xd: tap->wr_rate =  12; break;
3546 			case 0xf: tap->wr_rate =  18; break;
3547 			case 0x5: tap->wr_rate =  24; break;
3548 			case 0x7: tap->wr_rate =  36; break;
3549 			case 0x9: tap->wr_rate =  48; break;
3550 			case 0xb: tap->wr_rate =  72; break;
3551 			case 0x1: tap->wr_rate =  96; break;
3552 			case 0x3: tap->wr_rate = 108; break;
3553 			/* Unknown rate: should not happen. */
3554 			default:  tap->wr_rate =   0;
3555 			}
3556 		}
3557 
3558 		mb.m_data = (caddr_t)tap;
3559 		mb.m_len = sc->sc_rxtap_len;
3560 		mb.m_next = m;
3561 		mb.m_nextpkt = NULL;
3562 		mb.m_type = 0;
3563 		mb.m_flags = 0;
3564 		bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN);
3565 	}
3566 #endif
3567 	ieee80211_input(IC2IFP(ic), m, ni, &rxi);
3568 	/*
3569 	 * ieee80211_input() might have changed our BSS.
3570 	 * Restore ic_bss's channel if we are still in the same BSS.
3571 	 */
3572 	if (ni == ic->ic_bss && IEEE80211_ADDR_EQ(saved_bssid, ni->ni_macaddr))
3573 		ni->ni_chan = bss_chan;
3574 	ieee80211_release_node(ic, ni);
3575 }
3576 
3577 void
3578 iwm_enable_ht_cck_fallback(struct iwm_softc *sc, struct iwm_node *in)
3579 {
3580 	struct ieee80211com *ic = &sc->sc_ic;
3581 	struct ieee80211_node *ni = &in->in_ni;
3582 	struct ieee80211_rateset *rs = &ni->ni_rates;
3583 	uint8_t rval = (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL);
3584 	uint8_t min_rval = ieee80211_min_basic_rate(ic);
3585 	int i;
3586 
3587 	/* Are CCK frames forbidden in our BSS? */
3588 	if (IWM_RVAL_IS_OFDM(min_rval))
3589 		return;
3590 
3591 	in->ht_force_cck = 1;
3592 
3593 	ieee80211_mira_cancel_timeouts(&in->in_mn);
3594 	ieee80211_mira_node_init(&in->in_mn);
3595 	ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
3596 
3597 	/* Choose initial CCK Tx rate. */
3598 	ni->ni_txrate = 0;
3599 	for (i = 0; i < rs->rs_nrates; i++) {
3600 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
3601 		if (rval == min_rval) {
3602 			ni->ni_txrate = i;
3603 			break;
3604 		}
3605 	}
3606 }
3607 
3608 void
3609 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3610     struct iwm_node *in)
3611 {
3612 	struct ieee80211com *ic = &sc->sc_ic;
3613 	struct ieee80211_node *ni = &in->in_ni;
3614 	struct ifnet *ifp = IC2IFP(ic);
3615 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
3616 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3617 	int txfail;
3618 
3619 	KASSERT(tx_resp->frame_count == 1);
3620 
3621 	txfail = (status != IWM_TX_STATUS_SUCCESS &&
3622 	    status != IWM_TX_STATUS_DIRECT_DONE);
3623 
3624 	/* Update rate control statistics. */
3625 	if ((ni->ni_flags & IEEE80211_NODE_HT) == 0 || in->ht_force_cck) {
3626 		in->in_amn.amn_txcnt++;
3627 		if (in->ht_force_cck) {
3628 			/*
3629 			 * We want to move back to OFDM quickly if possible.
3630 			 * Only show actual Tx failures to AMRR, not retries.
3631 			 */
3632 			if (txfail)
3633 				in->in_amn.amn_retrycnt++;
3634 		} else if (tx_resp->failure_frame > 0)
3635 			in->in_amn.amn_retrycnt++;
3636 	} else if (ic->ic_fixed_mcs == -1) {
3637 		in->in_mn.frames += tx_resp->frame_count;
3638 		in->in_mn.ampdu_size = le16toh(tx_resp->byte_cnt);
3639 		in->in_mn.agglen = tx_resp->frame_count;
3640 		if (tx_resp->failure_frame > 0)
3641 			in->in_mn.retries += tx_resp->failure_frame;
3642 		if (txfail)
3643 			in->in_mn.txfail += tx_resp->frame_count;
3644 		if (ic->ic_state == IEEE80211_S_RUN && !in->ht_force_cck) {
3645 			int otxmcs = ni->ni_txmcs;
3646 
3647 			ieee80211_mira_choose(&in->in_mn, ic, &in->in_ni);
3648 
3649 			/* Fall back to CCK rates if MCS 0 is failing. */
3650 			if (txfail && IEEE80211_IS_CHAN_2GHZ(ni->ni_chan) &&
3651 			    otxmcs == 0 && ni->ni_txmcs == 0)
3652 				iwm_enable_ht_cck_fallback(sc, in);
3653 		}
3654 	}
3655 
3656 	if (txfail)
3657 		ifp->if_oerrors++;
3658 }
3659 
3660 void
3661 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3662     struct iwm_rx_data *data)
3663 {
3664 	struct ieee80211com *ic = &sc->sc_ic;
3665 	struct ifnet *ifp = IC2IFP(ic);
3666 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3667 	int idx = cmd_hdr->idx;
3668 	int qid = cmd_hdr->qid;
3669 	struct iwm_tx_ring *ring = &sc->txq[qid];
3670 	struct iwm_tx_data *txd = &ring->data[idx];
3671 	struct iwm_node *in = txd->in;
3672 
3673 	if (txd->done)
3674 		return;
3675 
3676 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3677 	    BUS_DMASYNC_POSTREAD);
3678 
3679 	sc->sc_tx_timer = 0;
3680 
3681 	iwm_rx_tx_cmd_single(sc, pkt, in);
3682 
3683 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
3684 	    BUS_DMASYNC_POSTWRITE);
3685 	bus_dmamap_unload(sc->sc_dmat, txd->map);
3686 	m_freem(txd->m);
3687 
3688 	KASSERT(txd->done == 0);
3689 	txd->done = 1;
3690 	KASSERT(txd->in);
3691 
3692 	txd->m = NULL;
3693 	txd->in = NULL;
3694 	ieee80211_release_node(ic, &in->in_ni);
3695 
3696 	if (--ring->queued < IWM_TX_RING_LOMARK) {
3697 		sc->qfullmsk &= ~(1 << ring->qid);
3698 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
3699 			ifq_clr_oactive(&ifp->if_snd);
3700 			/*
3701 			 * Well, we're in interrupt context, but then again
3702 			 * I guess net80211 does all sorts of stunts in
3703 			 * interrupt context, so maybe this is no biggie.
3704 			 */
3705 			(*ifp->if_start)(ifp);
3706 		}
3707 	}
3708 }
3709 
3710 void
3711 iwm_rx_bmiss(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3712     struct iwm_rx_data *data)
3713 {
3714 	struct ieee80211com *ic = &sc->sc_ic;
3715 	struct iwm_missed_beacons_notif *mbn = (void *)pkt->data;
3716 	uint32_t missed;
3717 
3718 	if ((ic->ic_opmode != IEEE80211_M_STA) ||
3719 	    (ic->ic_state != IEEE80211_S_RUN))
3720 		return;
3721 
3722 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3723 	    sizeof(*mbn), BUS_DMASYNC_POSTREAD);
3724 
3725 	missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
3726 	if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
3727 		/*
3728 		 * Rather than go directly to scan state, try to send a
3729 		 * directed probe request first. If that fails then the
3730 		 * state machine will drop us into scanning after timing
3731 		 * out waiting for a probe response.
3732 		 */
3733 		IEEE80211_SEND_MGMT(ic, ic->ic_bss,
3734 		    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
3735 	}
3736 
3737 }
3738 
3739 int
3740 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
3741 {
3742 	struct iwm_binding_cmd cmd;
3743 	struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
3744 	uint32_t mac_id = IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
3745 	int i, err, active = (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE);
3746 	uint32_t status;
3747 
3748 	if (action == IWM_FW_CTXT_ACTION_ADD && active)
3749 		panic("binding already added");
3750 	if (action == IWM_FW_CTXT_ACTION_REMOVE && !active)
3751 		panic("binding already removed");
3752 
3753 	if (phyctxt == NULL) /* XXX race with iwm_stop() */
3754 		return EINVAL;
3755 
3756 	memset(&cmd, 0, sizeof(cmd));
3757 
3758 	cmd.id_and_color
3759 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3760 	cmd.action = htole32(action);
3761 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3762 
3763 	cmd.macs[0] = htole32(mac_id);
3764 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
3765 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
3766 
3767 	status = 0;
3768 	err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
3769 	    sizeof(cmd), &cmd, &status);
3770 	if (err == 0 && status != 0)
3771 		err = EIO;
3772 
3773 	return err;
3774 }
3775 
3776 void
3777 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
3778     struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
3779 {
3780 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
3781 
3782 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
3783 	    ctxt->color));
3784 	cmd->action = htole32(action);
3785 	cmd->apply_time = htole32(apply_time);
3786 }
3787 
3788 void
3789 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
3790     struct ieee80211_channel *chan, uint8_t chains_static,
3791     uint8_t chains_dynamic)
3792 {
3793 	struct ieee80211com *ic = &sc->sc_ic;
3794 	uint8_t active_cnt, idle_cnt;
3795 
3796 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
3797 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
3798 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
3799 	cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
3800 	cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
3801 
3802 	/* Set rx the chains */
3803 	idle_cnt = chains_static;
3804 	active_cnt = chains_dynamic;
3805 
3806 	cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
3807 					IWM_PHY_RX_CHAIN_VALID_POS);
3808 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
3809 	cmd->rxchain_info |= htole32(active_cnt <<
3810 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
3811 
3812 	cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
3813 }
3814 
3815 int
3816 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
3817     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
3818     uint32_t apply_time)
3819 {
3820 	struct iwm_phy_context_cmd cmd;
3821 
3822 	iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
3823 
3824 	iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
3825 	    chains_static, chains_dynamic);
3826 
3827 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
3828 	    sizeof(struct iwm_phy_context_cmd), &cmd);
3829 }
3830 
3831 int
3832 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
3833 {
3834 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
3835 	struct iwm_tfd *desc;
3836 	struct iwm_tx_data *txdata;
3837 	struct iwm_device_cmd *cmd;
3838 	struct mbuf *m;
3839 	bus_addr_t paddr;
3840 	uint32_t addr_lo;
3841 	int err = 0, i, paylen, off, s;
3842 	int idx, code, async, group_id;
3843 	size_t hdrlen, datasz;
3844 	uint8_t *data;
3845 	int generation = sc->sc_generation;
3846 
3847 	code = hcmd->id;
3848 	async = hcmd->flags & IWM_CMD_ASYNC;
3849 	idx = ring->cur;
3850 
3851 	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
3852 		paylen += hcmd->len[i];
3853 	}
3854 
3855 	/* If this command waits for a response, allocate response buffer. */
3856 	hcmd->resp_pkt = NULL;
3857 	if (hcmd->flags & IWM_CMD_WANT_RESP) {
3858 		uint8_t *resp_buf;
3859 		KASSERT(!async);
3860 		KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwm_rx_packet));
3861 		KASSERT(hcmd->resp_pkt_len <= IWM_CMD_RESP_MAX);
3862 		if (sc->sc_cmd_resp_pkt[idx] != NULL)
3863 			return ENOSPC;
3864 		resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
3865 		    M_NOWAIT | M_ZERO);
3866 		if (resp_buf == NULL)
3867 			return ENOMEM;
3868 		sc->sc_cmd_resp_pkt[idx] = resp_buf;
3869 		sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
3870 	} else {
3871 		sc->sc_cmd_resp_pkt[idx] = NULL;
3872 	}
3873 
3874 	s = splnet();
3875 
3876 	desc = &ring->desc[idx];
3877 	txdata = &ring->data[idx];
3878 
3879 	group_id = iwm_cmd_groupid(code);
3880 	if (group_id != 0) {
3881 		hdrlen = sizeof(cmd->hdr_wide);
3882 		datasz = sizeof(cmd->data_wide);
3883 	} else {
3884 		hdrlen = sizeof(cmd->hdr);
3885 		datasz = sizeof(cmd->data);
3886 	}
3887 
3888 	if (paylen > datasz) {
3889 		/* Command is too large to fit in pre-allocated space. */
3890 		size_t totlen = hdrlen + paylen;
3891 		if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
3892 			printf("%s: firmware command too long (%zd bytes)\n",
3893 			    DEVNAME(sc), totlen);
3894 			err = EINVAL;
3895 			goto out;
3896 		}
3897 		m = MCLGETI(NULL, M_DONTWAIT, NULL, totlen);
3898 		if (m == NULL) {
3899 			printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
3900 			    DEVNAME(sc), totlen);
3901 			err = ENOMEM;
3902 			goto out;
3903 		}
3904 		cmd = mtod(m, struct iwm_device_cmd *);
3905 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
3906 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3907 		if (err) {
3908 			printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
3909 			    DEVNAME(sc), totlen);
3910 			m_freem(m);
3911 			goto out;
3912 		}
3913 		txdata->m = m; /* mbuf will be freed in iwm_cmd_done() */
3914 		paddr = txdata->map->dm_segs[0].ds_addr;
3915 	} else {
3916 		cmd = &ring->cmd[idx];
3917 		paddr = txdata->cmd_paddr;
3918 	}
3919 
3920 	if (group_id != 0) {
3921 		cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
3922 		cmd->hdr_wide.group_id = group_id;
3923 		cmd->hdr_wide.qid = ring->qid;
3924 		cmd->hdr_wide.idx = idx;
3925 		cmd->hdr_wide.length = htole16(paylen);
3926 		cmd->hdr_wide.version = iwm_cmd_version(code);
3927 		data = cmd->data_wide;
3928 	} else {
3929 		cmd->hdr.code = code;
3930 		cmd->hdr.flags = 0;
3931 		cmd->hdr.qid = ring->qid;
3932 		cmd->hdr.idx = idx;
3933 		data = cmd->data;
3934 	}
3935 
3936 	for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
3937 		if (hcmd->len[i] == 0)
3938 			continue;
3939 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
3940 		off += hcmd->len[i];
3941 	}
3942 	KASSERT(off == paylen);
3943 
3944 	/* lo field is not aligned */
3945 	addr_lo = htole32((uint32_t)paddr);
3946 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
3947 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
3948 	    | ((hdrlen + paylen) << 4));
3949 	desc->num_tbs = 1;
3950 
3951 	if (paylen > datasz) {
3952 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
3953 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
3954 	} else {
3955 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
3956 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
3957 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
3958 	}
3959 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3960 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
3961 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
3962 
3963 	/*
3964 	 * Wake up the NIC to make sure that the firmware will see the host
3965 	 * command - we will let the NIC sleep once all the host commands
3966 	 * returned. This needs to be done only on 7000 family NICs.
3967 	 */
3968 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3969 		if (ring->queued == 0 && !iwm_nic_lock(sc)) {
3970 			err = EBUSY;
3971 			goto out;
3972 		}
3973 	}
3974 
3975 #if 0
3976 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
3977 #endif
3978 	/* Kick command ring. */
3979 	ring->queued++;
3980 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3981 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3982 
3983 	if (!async) {
3984 		err = tsleep(desc, PCATCH, "iwmcmd", hz);
3985 		if (err == 0) {
3986 			/* if hardware is no longer up, return error */
3987 			if (generation != sc->sc_generation) {
3988 				err = ENXIO;
3989 				goto out;
3990 			}
3991 
3992 			/* Response buffer will be freed in iwm_free_resp(). */
3993 			hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
3994 			sc->sc_cmd_resp_pkt[idx] = NULL;
3995 		} else if (generation == sc->sc_generation) {
3996 			free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
3997 			    sc->sc_cmd_resp_len[idx]);
3998 			sc->sc_cmd_resp_pkt[idx] = NULL;
3999 		}
4000 	}
4001  out:
4002 	splx(s);
4003 
4004 	return err;
4005 }
4006 
4007 int
4008 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
4009     uint16_t len, const void *data)
4010 {
4011 	struct iwm_host_cmd cmd = {
4012 		.id = id,
4013 		.len = { len, },
4014 		.data = { data, },
4015 		.flags = flags,
4016 	};
4017 
4018 	return iwm_send_cmd(sc, &cmd);
4019 }
4020 
4021 int
4022 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
4023     uint32_t *status)
4024 {
4025 	struct iwm_rx_packet *pkt;
4026 	struct iwm_cmd_response *resp;
4027 	int err, resp_len;
4028 
4029 	KASSERT((cmd->flags & IWM_CMD_WANT_RESP) == 0);
4030 	cmd->flags |= IWM_CMD_WANT_RESP;
4031 	cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
4032 
4033 	err = iwm_send_cmd(sc, cmd);
4034 	if (err)
4035 		return err;
4036 
4037 	pkt = cmd->resp_pkt;
4038 	if (pkt == NULL || (pkt->hdr.flags & IWM_CMD_FAILED_MSK))
4039 		return EIO;
4040 
4041 	resp_len = iwm_rx_packet_payload_len(pkt);
4042 	if (resp_len != sizeof(*resp)) {
4043 		iwm_free_resp(sc, cmd);
4044 		return EIO;
4045 	}
4046 
4047 	resp = (void *)pkt->data;
4048 	*status = le32toh(resp->status);
4049 	iwm_free_resp(sc, cmd);
4050 	return err;
4051 }
4052 
4053 int
4054 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
4055     const void *data, uint32_t *status)
4056 {
4057 	struct iwm_host_cmd cmd = {
4058 		.id = id,
4059 		.len = { len, },
4060 		.data = { data, },
4061 	};
4062 
4063 	return iwm_send_cmd_status(sc, &cmd, status);
4064 }
4065 
4066 void
4067 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
4068 {
4069 	KASSERT((hcmd->flags & (IWM_CMD_WANT_RESP)) == IWM_CMD_WANT_RESP);
4070 	free(hcmd->resp_pkt, M_DEVBUF, hcmd->resp_pkt_len);
4071 	hcmd->resp_pkt = NULL;
4072 }
4073 
4074 void
4075 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
4076 {
4077 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
4078 	struct iwm_tx_data *data;
4079 
4080 	if (pkt->hdr.qid != IWM_CMD_QUEUE) {
4081 		return;	/* Not a command ack. */
4082 	}
4083 
4084 	data = &ring->data[pkt->hdr.idx];
4085 
4086 	if (data->m != NULL) {
4087 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
4088 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4089 		bus_dmamap_unload(sc->sc_dmat, data->map);
4090 		m_freem(data->m);
4091 		data->m = NULL;
4092 	}
4093 	wakeup(&ring->desc[pkt->hdr.idx]);
4094 
4095 	if (ring->queued == 0) {
4096 		DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
4097 		    DEVNAME(sc), IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code)));
4098 	} else if (--ring->queued == 0) {
4099 		/*
4100 		 * 7000 family NICs are locked while commands are in progress.
4101 		 * All commands are now done so we may unlock the NIC again.
4102 		 */
4103 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
4104 			iwm_nic_unlock(sc);
4105 	}
4106 }
4107 
4108 #if 0
4109 /*
4110  * necessary only for block ack mode
4111  */
4112 void
4113 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
4114     uint16_t len)
4115 {
4116 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
4117 	uint16_t w_val;
4118 
4119 	scd_bc_tbl = sc->sched_dma.vaddr;
4120 
4121 	len += 8; /* magic numbers came naturally from paris */
4122 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
4123 		len = roundup(len, 4) / 4;
4124 
4125 	w_val = htole16(sta_id << 12 | len);
4126 
4127 	/* Update TX scheduler. */
4128 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
4129 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4130 	    (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
4131 	    sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
4132 
4133 	/* I really wonder what this is ?!? */
4134 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
4135 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
4136 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4137 		    (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
4138 		    (char *)(void *)sc->sched_dma.vaddr,
4139 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
4140 	}
4141 }
4142 #endif
4143 
4144 /*
4145  * Fill in various bit for management frames, and leave them
4146  * unfilled for data frames (firmware takes care of that).
4147  * Return the selected TX rate.
4148  */
4149 const struct iwm_rate *
4150 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
4151     struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
4152 {
4153 	struct ieee80211com *ic = &sc->sc_ic;
4154 	struct ieee80211_node *ni = &in->in_ni;
4155 	struct ieee80211_rateset *rs = &ni->ni_rates;
4156 	const struct iwm_rate *rinfo;
4157 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4158 	int min_ridx = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
4159 	int ridx, rate_flags;
4160 
4161 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
4162 	tx->data_retry_limit = IWM_LOW_RETRY_LIMIT;
4163 
4164 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4165 	    type != IEEE80211_FC0_TYPE_DATA) {
4166 		/* for non-data, use the lowest supported rate */
4167 		ridx = min_ridx;
4168 		tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
4169 	} else if (ic->ic_fixed_mcs != -1) {
4170 		ridx = sc->sc_fixed_ridx;
4171 	} else if (ic->ic_fixed_rate != -1) {
4172 		ridx = sc->sc_fixed_ridx;
4173 	} else if ((ni->ni_flags & IEEE80211_NODE_HT) && !in->ht_force_cck) {
4174 		ridx = iwm_mcs2ridx[ni->ni_txmcs];
4175 	} else {
4176 		uint8_t rval;
4177 		rval = (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL);
4178 		ridx = iwm_rval2ridx(rval);
4179 		if (ridx < min_ridx)
4180 			ridx = min_ridx;
4181 	}
4182 
4183 	rinfo = &iwm_rates[ridx];
4184 	if (iwm_is_mimo_ht_plcp(rinfo->ht_plcp))
4185 		rate_flags = IWM_RATE_MCS_ANT_AB_MSK;
4186 	else
4187 		rate_flags = IWM_RATE_MCS_ANT_A_MSK;
4188 	if (IWM_RIDX_IS_CCK(ridx))
4189 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
4190 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4191 	    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
4192 		rate_flags |= IWM_RATE_MCS_HT_MSK;
4193 		tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
4194 	} else
4195 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
4196 
4197 	return rinfo;
4198 }
4199 
4200 #define TB0_SIZE 16
4201 int
4202 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
4203 {
4204 	struct ieee80211com *ic = &sc->sc_ic;
4205 	struct iwm_node *in = (void *)ni;
4206 	struct iwm_tx_ring *ring;
4207 	struct iwm_tx_data *data;
4208 	struct iwm_tfd *desc;
4209 	struct iwm_device_cmd *cmd;
4210 	struct iwm_tx_cmd *tx;
4211 	struct ieee80211_frame *wh;
4212 	struct ieee80211_key *k = NULL;
4213 	const struct iwm_rate *rinfo;
4214 	uint32_t flags;
4215 	u_int hdrlen;
4216 	bus_dma_segment_t *seg;
4217 	uint8_t tid, type;
4218 	int i, totlen, err, pad;
4219 	int hdrlen2, rtsthres = ic->ic_rtsthreshold;
4220 
4221 	wh = mtod(m, struct ieee80211_frame *);
4222 	hdrlen = ieee80211_get_hdrlen(wh);
4223 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4224 
4225 	hdrlen2 = (ieee80211_has_qos(wh)) ?
4226 	    sizeof (struct ieee80211_qosframe) :
4227 	    sizeof (struct ieee80211_frame);
4228 
4229 	tid = 0;
4230 
4231 	ring = &sc->txq[ac];
4232 	desc = &ring->desc[ring->cur];
4233 	memset(desc, 0, sizeof(*desc));
4234 	data = &ring->data[ring->cur];
4235 
4236 	cmd = &ring->cmd[ring->cur];
4237 	cmd->hdr.code = IWM_TX_CMD;
4238 	cmd->hdr.flags = 0;
4239 	cmd->hdr.qid = ring->qid;
4240 	cmd->hdr.idx = ring->cur;
4241 
4242 	tx = (void *)cmd->data;
4243 	memset(tx, 0, sizeof(*tx));
4244 
4245 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
4246 
4247 #if NBPFILTER > 0
4248 	if (sc->sc_drvbpf != NULL) {
4249 		struct mbuf mb;
4250 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
4251 		uint16_t chan_flags;
4252 
4253 		tap->wt_flags = 0;
4254 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
4255 		chan_flags = ni->ni_chan->ic_flags;
4256 		if (ic->ic_curmode != IEEE80211_MODE_11N)
4257 			chan_flags &= ~IEEE80211_CHAN_HT;
4258 		tap->wt_chan_flags = htole16(chan_flags);
4259 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4260 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4261 		    type == IEEE80211_FC0_TYPE_DATA &&
4262 		    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
4263 			tap->wt_rate = (0x80 | rinfo->ht_plcp);
4264 		} else
4265 			tap->wt_rate = rinfo->rate;
4266 		tap->wt_hwqueue = ac;
4267 		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
4268 		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
4269 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4270 
4271 		mb.m_data = (caddr_t)tap;
4272 		mb.m_len = sc->sc_txtap_len;
4273 		mb.m_next = m;
4274 		mb.m_nextpkt = NULL;
4275 		mb.m_type = 0;
4276 		mb.m_flags = 0;
4277 		bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT);
4278 	}
4279 #endif
4280 
4281 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
4282                 k = ieee80211_get_txkey(ic, wh, ni);
4283 		if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
4284 			return ENOBUFS;
4285 		/* 802.11 header may have moved. */
4286 		wh = mtod(m, struct ieee80211_frame *);
4287 	}
4288 	totlen = m->m_pkthdr.len;
4289 
4290 	flags = 0;
4291 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
4292 		flags |= IWM_TX_CMD_FLG_ACK;
4293 	}
4294 
4295 	if (ni->ni_flags & IEEE80211_NODE_HT)
4296 		rtsthres = ieee80211_mira_get_rts_threshold(&in->in_mn, ic, ni,
4297 		    totlen + IEEE80211_CRC_LEN);
4298 
4299 	if (type == IEEE80211_FC0_TYPE_DATA &&
4300 	    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4301 	    (totlen + IEEE80211_CRC_LEN > rtsthres ||
4302 	    (ic->ic_flags & IEEE80211_F_USEPROT)))
4303 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
4304 
4305 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4306 	    type != IEEE80211_FC0_TYPE_DATA)
4307 		tx->sta_id = IWM_AUX_STA_ID;
4308 	else
4309 		tx->sta_id = IWM_STATION_ID;
4310 
4311 	if (type == IEEE80211_FC0_TYPE_MGT) {
4312 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4313 
4314 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
4315 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
4316 			tx->pm_frame_timeout = htole16(3);
4317 		else
4318 			tx->pm_frame_timeout = htole16(2);
4319 	} else {
4320 		tx->pm_frame_timeout = htole16(0);
4321 	}
4322 
4323 	if (hdrlen & 3) {
4324 		/* First segment length must be a multiple of 4. */
4325 		flags |= IWM_TX_CMD_FLG_MH_PAD;
4326 		pad = 4 - (hdrlen & 3);
4327 	} else
4328 		pad = 0;
4329 
4330 	tx->driver_txop = 0;
4331 	tx->next_frame_len = 0;
4332 
4333 	tx->len = htole16(totlen);
4334 	tx->tid_tspec = tid;
4335 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
4336 
4337 	/* Set physical address of "scratch area". */
4338 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
4339 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
4340 
4341 	/* Copy 802.11 header in TX command. */
4342 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
4343 
4344 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
4345 
4346 	tx->sec_ctl = 0;
4347 	tx->tx_flags |= htole32(flags);
4348 
4349 	/* Trim 802.11 header. */
4350 	m_adj(m, hdrlen);
4351 
4352 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4353 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4354 	if (err && err != EFBIG) {
4355 		printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
4356 		m_freem(m);
4357 		return err;
4358 	}
4359 	if (err) {
4360 		/* Too many DMA segments, linearize mbuf. */
4361 		if (m_defrag(m, M_DONTWAIT)) {
4362 			m_freem(m);
4363 			return ENOBUFS;
4364 		}
4365 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4366 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4367 		if (err) {
4368 			printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
4369 			    err);
4370 			m_freem(m);
4371 			return err;
4372 		}
4373 	}
4374 	data->m = m;
4375 	data->in = in;
4376 	data->done = 0;
4377 
4378 	/* Fill TX descriptor. */
4379 	desc->num_tbs = 2 + data->map->dm_nsegs;
4380 
4381 	desc->tbs[0].lo = htole32(data->cmd_paddr);
4382 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4383 	    (TB0_SIZE << 4);
4384 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
4385 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4386 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
4387 	      + hdrlen + pad - TB0_SIZE) << 4);
4388 
4389 	/* Other DMA segments are for data payload. */
4390 	seg = data->map->dm_segs;
4391 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
4392 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
4393 		desc->tbs[i+2].hi_n_len = \
4394 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
4395 		    | ((seg->ds_len) << 4);
4396 	}
4397 
4398 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
4399 	    BUS_DMASYNC_PREWRITE);
4400 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4401 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
4402 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
4403 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4404 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
4405 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
4406 
4407 #if 0
4408 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
4409 #endif
4410 
4411 	/* Kick TX ring. */
4412 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4413 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4414 
4415 	/* Mark TX ring as full if we reach a certain threshold. */
4416 	if (++ring->queued > IWM_TX_RING_HIMARK) {
4417 		sc->qfullmsk |= 1 << ring->qid;
4418 	}
4419 
4420 	return 0;
4421 }
4422 
4423 int
4424 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_msk)
4425 {
4426 	struct iwm_tx_path_flush_cmd flush_cmd = {
4427 		.queues_ctl = htole32(tfd_msk),
4428 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4429 	};
4430 	int err;
4431 
4432 	err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, 0,
4433 	    sizeof(flush_cmd), &flush_cmd);
4434 	if (err)
4435                 printf("%s: Flushing tx queue failed: %d\n", DEVNAME(sc), err);
4436 	return err;
4437 }
4438 
4439 void
4440 iwm_led_enable(struct iwm_softc *sc)
4441 {
4442 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
4443 }
4444 
4445 void
4446 iwm_led_disable(struct iwm_softc *sc)
4447 {
4448 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
4449 }
4450 
4451 int
4452 iwm_led_is_enabled(struct iwm_softc *sc)
4453 {
4454 	return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
4455 }
4456 
4457 void
4458 iwm_led_blink_timeout(void *arg)
4459 {
4460 	struct iwm_softc *sc = arg;
4461 
4462 	if (iwm_led_is_enabled(sc))
4463 		iwm_led_disable(sc);
4464 	else
4465 		iwm_led_enable(sc);
4466 
4467 	timeout_add_msec(&sc->sc_led_blink_to, 200);
4468 }
4469 
4470 void
4471 iwm_led_blink_start(struct iwm_softc *sc)
4472 {
4473 	timeout_add(&sc->sc_led_blink_to, 0);
4474 }
4475 
4476 void
4477 iwm_led_blink_stop(struct iwm_softc *sc)
4478 {
4479 	timeout_del(&sc->sc_led_blink_to);
4480 	iwm_led_disable(sc);
4481 }
4482 
4483 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
4484 
4485 int
4486 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
4487     struct iwm_beacon_filter_cmd *cmd)
4488 {
4489 	return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
4490 	    0, sizeof(struct iwm_beacon_filter_cmd), cmd);
4491 }
4492 
4493 void
4494 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
4495     struct iwm_beacon_filter_cmd *cmd)
4496 {
4497 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
4498 }
4499 
4500 int
4501 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
4502 {
4503 	struct iwm_beacon_filter_cmd cmd = {
4504 		IWM_BF_CMD_CONFIG_DEFAULTS,
4505 		.bf_enable_beacon_filter = htole32(1),
4506 		.ba_enable_beacon_abort = htole32(enable),
4507 	};
4508 
4509 	if (!sc->sc_bf.bf_enabled)
4510 		return 0;
4511 
4512 	sc->sc_bf.ba_enabled = enable;
4513 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4514 	return iwm_beacon_filter_send_cmd(sc, &cmd);
4515 }
4516 
4517 void
4518 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
4519     struct iwm_mac_power_cmd *cmd)
4520 {
4521 	struct ieee80211_node *ni = &in->in_ni;
4522 	int dtim_period, dtim_msec, keep_alive;
4523 
4524 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4525 	    in->in_color));
4526 	if (ni->ni_dtimperiod)
4527 		dtim_period = ni->ni_dtimperiod;
4528 	else
4529 		dtim_period = 1;
4530 
4531 	/*
4532 	 * Regardless of power management state the driver must set
4533 	 * keep alive period. FW will use it for sending keep alive NDPs
4534 	 * immediately after association. Check that keep alive period
4535 	 * is at least 3 * DTIM.
4536 	 */
4537 	dtim_msec = dtim_period * ni->ni_intval;
4538 	keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
4539 	keep_alive = roundup(keep_alive, 1000) / 1000;
4540 	cmd->keep_alive_seconds = htole16(keep_alive);
4541 
4542 	cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
4543 }
4544 
4545 int
4546 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
4547 {
4548 	int err;
4549 	int ba_enable;
4550 	struct iwm_mac_power_cmd cmd;
4551 
4552 	memset(&cmd, 0, sizeof(cmd));
4553 
4554 	iwm_power_build_cmd(sc, in, &cmd);
4555 
4556 	err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
4557 	    sizeof(cmd), &cmd);
4558 	if (err != 0)
4559 		return err;
4560 
4561 	ba_enable = !!(cmd.flags &
4562 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
4563 	return iwm_update_beacon_abort(sc, in, ba_enable);
4564 }
4565 
4566 int
4567 iwm_power_update_device(struct iwm_softc *sc)
4568 {
4569 	struct iwm_device_power_cmd cmd = {
4570 		.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
4571 	};
4572 
4573 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
4574 		return 0;
4575 
4576 	return iwm_send_cmd_pdu(sc,
4577 	    IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
4578 }
4579 
4580 int
4581 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4582 {
4583 	struct iwm_beacon_filter_cmd cmd = {
4584 		IWM_BF_CMD_CONFIG_DEFAULTS,
4585 		.bf_enable_beacon_filter = htole32(1),
4586 	};
4587 	int err;
4588 
4589 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4590 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
4591 
4592 	if (err == 0)
4593 		sc->sc_bf.bf_enabled = 1;
4594 
4595 	return err;
4596 }
4597 
4598 int
4599 iwm_disable_beacon_filter(struct iwm_softc *sc)
4600 {
4601 	struct iwm_beacon_filter_cmd cmd;
4602 	int err;
4603 
4604 	memset(&cmd, 0, sizeof(cmd));
4605 	if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
4606 		return 0;
4607 
4608 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
4609 	if (err == 0)
4610 		sc->sc_bf.bf_enabled = 0;
4611 
4612 	return err;
4613 }
4614 
4615 int
4616 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
4617 {
4618 	struct iwm_add_sta_cmd_v7 add_sta_cmd;
4619 	int err;
4620 	uint32_t status;
4621 	struct ieee80211com *ic = &sc->sc_ic;
4622 
4623 	if (!update && (sc->sc_flags & IWM_FLAG_STA_ACTIVE))
4624 		panic("STA already added");
4625 
4626 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
4627 
4628 	add_sta_cmd.sta_id = IWM_STATION_ID;
4629 	add_sta_cmd.mac_id_n_color
4630 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
4631 	if (!update) {
4632 		int ac;
4633 		for (ac = 0; ac < EDCA_NUM_AC; ac++) {
4634 			add_sta_cmd.tfd_queue_msk |=
4635 			    htole32(1 << iwm_ac_to_tx_fifo[ac]);
4636 		}
4637 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
4638 	}
4639 	add_sta_cmd.add_modify = update ? 1 : 0;
4640 	add_sta_cmd.station_flags_msk
4641 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
4642 	add_sta_cmd.tid_disable_tx = htole16(0xffff);
4643 	if (update)
4644 		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
4645 
4646 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
4647 		add_sta_cmd.station_flags_msk
4648 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
4649 		    IWM_STA_FLG_AGG_MPDU_DENS_MSK);
4650 
4651 		add_sta_cmd.station_flags
4652 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_64K);
4653 		switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
4654 		case IEEE80211_AMPDU_PARAM_SS_2:
4655 			add_sta_cmd.station_flags
4656 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
4657 			break;
4658 		case IEEE80211_AMPDU_PARAM_SS_4:
4659 			add_sta_cmd.station_flags
4660 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
4661 			break;
4662 		case IEEE80211_AMPDU_PARAM_SS_8:
4663 			add_sta_cmd.station_flags
4664 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
4665 			break;
4666 		case IEEE80211_AMPDU_PARAM_SS_16:
4667 			add_sta_cmd.station_flags
4668 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
4669 			break;
4670 		default:
4671 			break;
4672 		}
4673 	}
4674 
4675 	status = IWM_ADD_STA_SUCCESS;
4676 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(add_sta_cmd),
4677 	    &add_sta_cmd, &status);
4678 	if (err == 0 && status != IWM_ADD_STA_SUCCESS)
4679 		err = EIO;
4680 
4681 	return err;
4682 }
4683 
4684 int
4685 iwm_add_aux_sta(struct iwm_softc *sc)
4686 {
4687 	struct iwm_add_sta_cmd_v7 cmd;
4688 	int err;
4689 	uint32_t status;
4690 
4691 	err = iwm_enable_txq(sc, 0, IWM_AUX_QUEUE, IWM_TX_FIFO_MCAST);
4692 	if (err)
4693 		return err;
4694 
4695 	memset(&cmd, 0, sizeof(cmd));
4696 	cmd.sta_id = IWM_AUX_STA_ID;
4697 	cmd.mac_id_n_color =
4698 	    htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
4699 	cmd.tfd_queue_msk = htole32(1 << IWM_AUX_QUEUE);
4700 	cmd.tid_disable_tx = htole16(0xffff);
4701 
4702 	status = IWM_ADD_STA_SUCCESS;
4703 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
4704 	    &status);
4705 	if (err == 0 && status != IWM_ADD_STA_SUCCESS)
4706 		err = EIO;
4707 
4708 	return err;
4709 }
4710 
4711 int
4712 iwm_rm_sta_cmd(struct iwm_softc *sc, struct iwm_node *in)
4713 {
4714 	struct iwm_rm_sta_cmd rm_sta_cmd;
4715 	int err;
4716 
4717 	if ((sc->sc_flags & IWM_FLAG_STA_ACTIVE) == 0)
4718 		panic("sta already removed");
4719 
4720 	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
4721 	rm_sta_cmd.sta_id = IWM_STATION_ID;
4722 
4723 	err = iwm_send_cmd_pdu(sc, IWM_REMOVE_STA, 0, sizeof(rm_sta_cmd),
4724 	    &rm_sta_cmd);
4725 
4726 	return err;
4727 }
4728 
4729 uint16_t
4730 iwm_scan_rx_chain(struct iwm_softc *sc)
4731 {
4732 	uint16_t rx_chain;
4733 	uint8_t rx_ant;
4734 
4735 	rx_ant = iwm_fw_valid_rx_ant(sc);
4736 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
4737 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
4738 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
4739 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
4740 	return htole16(rx_chain);
4741 }
4742 
4743 uint32_t
4744 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
4745 {
4746 	uint32_t tx_ant;
4747 	int i, ind;
4748 
4749 	for (i = 0, ind = sc->sc_scan_last_antenna;
4750 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
4751 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
4752 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
4753 			sc->sc_scan_last_antenna = ind;
4754 			break;
4755 		}
4756 	}
4757 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
4758 
4759 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
4760 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
4761 				   tx_ant);
4762 	else
4763 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
4764 }
4765 
4766 uint8_t
4767 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
4768     struct iwm_scan_channel_cfg_lmac *chan, int n_ssids, int bgscan)
4769 {
4770 	struct ieee80211com *ic = &sc->sc_ic;
4771 	struct ieee80211_channel *c;
4772 	uint8_t nchan;
4773 
4774 	for (nchan = 0, c = &ic->ic_channels[1];
4775 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
4776 	    nchan < sc->sc_capa_n_scan_channels;
4777 	    c++) {
4778 		if (c->ic_flags == 0)
4779 			continue;
4780 
4781 		chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
4782 		chan->iter_count = htole16(1);
4783 		chan->iter_interval = 0;
4784 		chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
4785 		if (n_ssids != 0 && !bgscan)
4786 			chan->flags |= htole32(1 << 1); /* select SSID 0 */
4787 		chan++;
4788 		nchan++;
4789 	}
4790 
4791 	return nchan;
4792 }
4793 
4794 uint8_t
4795 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
4796     struct iwm_scan_channel_cfg_umac *chan, int n_ssids, int bgscan)
4797 {
4798 	struct ieee80211com *ic = &sc->sc_ic;
4799 	struct ieee80211_channel *c;
4800 	uint8_t nchan;
4801 
4802 	for (nchan = 0, c = &ic->ic_channels[1];
4803 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
4804 	    nchan < sc->sc_capa_n_scan_channels;
4805 	    c++) {
4806 		if (c->ic_flags == 0)
4807 			continue;
4808 
4809 		chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
4810 		chan->iter_count = 1;
4811 		chan->iter_interval = htole16(0);
4812 		if (n_ssids != 0 && !bgscan)
4813 			chan->flags = htole32(1 << 0); /* select SSID 0 */
4814 		chan++;
4815 		nchan++;
4816 	}
4817 
4818 	return nchan;
4819 }
4820 
4821 int
4822 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
4823 {
4824 	struct ieee80211com *ic = &sc->sc_ic;
4825 	struct ifnet *ifp = IC2IFP(ic);
4826 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
4827 	struct ieee80211_rateset *rs;
4828 	size_t remain = sizeof(preq->buf);
4829 	uint8_t *frm, *pos;
4830 
4831 	memset(preq, 0, sizeof(*preq));
4832 
4833 	if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
4834 		return ENOBUFS;
4835 
4836 	/*
4837 	 * Build a probe request frame.  Most of the following code is a
4838 	 * copy & paste of what is done in net80211.
4839 	 */
4840 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4841 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4842 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4843 	IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl));
4844 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
4845 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
4846 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
4847 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
4848 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
4849 
4850 	frm = (uint8_t *)(wh + 1);
4851 	frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
4852 
4853 	/* Tell the firmware where the MAC header is. */
4854 	preq->mac_header.offset = 0;
4855 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
4856 	remain -= frm - (uint8_t *)wh;
4857 
4858 	/* Fill in 2GHz IEs and tell firmware where they are. */
4859 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
4860 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
4861 		if (remain < 4 + rs->rs_nrates)
4862 			return ENOBUFS;
4863 	} else if (remain < 2 + rs->rs_nrates)
4864 		return ENOBUFS;
4865 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
4866 	pos = frm;
4867 	frm = ieee80211_add_rates(frm, rs);
4868 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4869 		frm = ieee80211_add_xrates(frm, rs);
4870 	preq->band_data[0].len = htole16(frm - pos);
4871 	remain -= frm - pos;
4872 
4873 	if (isset(sc->sc_enabled_capa,
4874 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
4875 		if (remain < 3)
4876 			return ENOBUFS;
4877 		*frm++ = IEEE80211_ELEMID_DSPARMS;
4878 		*frm++ = 1;
4879 		*frm++ = 0;
4880 		remain -= 3;
4881 	}
4882 
4883 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
4884 		/* Fill in 5GHz IEs. */
4885 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
4886 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
4887 			if (remain < 4 + rs->rs_nrates)
4888 				return ENOBUFS;
4889 		} else if (remain < 2 + rs->rs_nrates)
4890 			return ENOBUFS;
4891 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
4892 		pos = frm;
4893 		frm = ieee80211_add_rates(frm, rs);
4894 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4895 			frm = ieee80211_add_xrates(frm, rs);
4896 		preq->band_data[1].len = htole16(frm - pos);
4897 		remain -= frm - pos;
4898 	}
4899 
4900 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
4901 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
4902 	pos = frm;
4903 	if (ic->ic_flags & IEEE80211_F_HTON) {
4904 		if (remain < 28)
4905 			return ENOBUFS;
4906 		frm = ieee80211_add_htcaps(frm, ic);
4907 		/* XXX add WME info? */
4908 	}
4909 	preq->common_data.len = htole16(frm - pos);
4910 
4911 	return 0;
4912 }
4913 
4914 int
4915 iwm_lmac_scan(struct iwm_softc *sc, int bgscan)
4916 {
4917 	struct ieee80211com *ic = &sc->sc_ic;
4918 	struct iwm_host_cmd hcmd = {
4919 		.id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
4920 		.len = { 0, },
4921 		.data = { NULL, },
4922 		.flags = 0,
4923 	};
4924 	struct iwm_scan_req_lmac *req;
4925 	size_t req_len;
4926 	int err, async = bgscan;
4927 
4928 	req_len = sizeof(struct iwm_scan_req_lmac) +
4929 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
4930 	    sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req);
4931 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
4932 		return ENOMEM;
4933 	req = malloc(req_len, M_DEVBUF,
4934 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
4935 	if (req == NULL)
4936 		return ENOMEM;
4937 
4938 	hcmd.len[0] = (uint16_t)req_len;
4939 	hcmd.data[0] = (void *)req;
4940 	hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
4941 
4942 	/* These timings correspond to iwlwifi's UNASSOC scan. */
4943 	req->active_dwell = 10;
4944 	req->passive_dwell = 110;
4945 	req->fragmented_dwell = 44;
4946 	req->extended_dwell = 90;
4947 	if (bgscan) {
4948 		req->max_out_time = htole32(120);
4949 		req->suspend_time = htole32(120);
4950 	} else {
4951 		req->max_out_time = htole32(0);
4952 		req->suspend_time = htole32(0);
4953 	}
4954 	req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
4955 	req->rx_chain_select = iwm_scan_rx_chain(sc);
4956 	req->iter_num = htole32(1);
4957 	req->delay = 0;
4958 
4959 	req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
4960 	    IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
4961 	    IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
4962 	if (ic->ic_des_esslen == 0)
4963 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
4964 	else
4965 		req->scan_flags |=
4966 		    htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
4967 	if (isset(sc->sc_enabled_capa,
4968 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
4969 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
4970 
4971 	req->flags = htole32(IWM_PHY_BAND_24);
4972 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
4973 		req->flags |= htole32(IWM_PHY_BAND_5);
4974 	req->filter_flags =
4975 	    htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
4976 
4977 	/* Tx flags 2 GHz. */
4978 	req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
4979 	    IWM_TX_CMD_FLG_BT_DIS);
4980 	req->tx_cmd[0].rate_n_flags =
4981 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
4982 	req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
4983 
4984 	/* Tx flags 5 GHz. */
4985 	req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
4986 	    IWM_TX_CMD_FLG_BT_DIS);
4987 	req->tx_cmd[1].rate_n_flags =
4988 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
4989 	req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
4990 
4991 	/* Check if we're doing an active directed scan. */
4992 	if (ic->ic_des_esslen != 0) {
4993 		req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
4994 		req->direct_scan[0].len = ic->ic_des_esslen;
4995 		memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
4996 		    ic->ic_des_esslen);
4997 	}
4998 
4999 	req->n_channels = iwm_lmac_scan_fill_channels(sc,
5000 	    (struct iwm_scan_channel_cfg_lmac *)req->data,
5001 	    ic->ic_des_esslen != 0, bgscan);
5002 
5003 	err = iwm_fill_probe_req(sc,
5004 			    (struct iwm_scan_probe_req *)(req->data +
5005 			    (sizeof(struct iwm_scan_channel_cfg_lmac) *
5006 			    sc->sc_capa_n_scan_channels)));
5007 	if (err) {
5008 		free(req, M_DEVBUF, req_len);
5009 		return err;
5010 	}
5011 
5012 	/* Specify the scan plan: We'll do one iteration. */
5013 	req->schedule[0].iterations = 1;
5014 	req->schedule[0].full_scan_mul = 1;
5015 
5016 	/* Disable EBS. */
5017 	req->channel_opt[0].non_ebs_ratio = 1;
5018 	req->channel_opt[1].non_ebs_ratio = 1;
5019 
5020 	err = iwm_send_cmd(sc, &hcmd);
5021 	free(req, M_DEVBUF, req_len);
5022 	return err;
5023 }
5024 
5025 int
5026 iwm_config_umac_scan(struct iwm_softc *sc)
5027 {
5028 	struct ieee80211com *ic = &sc->sc_ic;
5029 	struct iwm_scan_config *scan_config;
5030 	int err, nchan;
5031 	size_t cmd_size;
5032 	struct ieee80211_channel *c;
5033 	struct iwm_host_cmd hcmd = {
5034 		.id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_ALWAYS_LONG_GROUP, 0),
5035 		.flags = 0,
5036 	};
5037 	static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
5038 	    IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
5039 	    IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
5040 	    IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
5041 	    IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
5042 	    IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
5043 	    IWM_SCAN_CONFIG_RATE_54M);
5044 
5045 	cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
5046 
5047 	scan_config = malloc(cmd_size, M_DEVBUF, M_WAIT | M_CANFAIL | M_ZERO);
5048 	if (scan_config == NULL)
5049 		return ENOMEM;
5050 
5051 	scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
5052 	scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
5053 	scan_config->legacy_rates = htole32(rates |
5054 	    IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
5055 
5056 	/* These timings correspond to iwlwifi's UNASSOC scan. */
5057 	scan_config->dwell_active = 10;
5058 	scan_config->dwell_passive = 110;
5059 	scan_config->dwell_fragmented = 44;
5060 	scan_config->dwell_extended = 90;
5061 	scan_config->out_of_channel_time = htole32(0);
5062 	scan_config->suspend_time = htole32(0);
5063 
5064 	IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
5065 
5066 	scan_config->bcast_sta_id = IWM_AUX_STA_ID;
5067 	scan_config->channel_flags = IWM_CHANNEL_FLAG_EBS |
5068 	    IWM_CHANNEL_FLAG_ACCURATE_EBS | IWM_CHANNEL_FLAG_EBS_ADD |
5069 	    IWM_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
5070 
5071 	for (c = &ic->ic_channels[1], nchan = 0;
5072 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5073 	    nchan < sc->sc_capa_n_scan_channels; c++) {
5074 		if (c->ic_flags == 0)
5075 			continue;
5076 		scan_config->channel_array[nchan++] =
5077 		    ieee80211_mhz2ieee(c->ic_freq, 0);
5078 	}
5079 
5080 	scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
5081 	    IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
5082 	    IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
5083 	    IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
5084 	    IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
5085 	    IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
5086 	    IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
5087 	    IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
5088 	    IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
5089 	    IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
5090 	    IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
5091 
5092 	hcmd.data[0] = scan_config;
5093 	hcmd.len[0] = cmd_size;
5094 
5095 	err = iwm_send_cmd(sc, &hcmd);
5096 	free(scan_config, M_DEVBUF, cmd_size);
5097 	return err;
5098 }
5099 
5100 int
5101 iwm_umac_scan(struct iwm_softc *sc, int bgscan)
5102 {
5103 	struct ieee80211com *ic = &sc->sc_ic;
5104 	struct iwm_host_cmd hcmd = {
5105 		.id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_ALWAYS_LONG_GROUP, 0),
5106 		.len = { 0, },
5107 		.data = { NULL, },
5108 		.flags =0,
5109 	};
5110 	struct iwm_scan_req_umac *req;
5111 	struct iwm_scan_req_umac_tail *tail;
5112 	size_t req_len;
5113 	int err, async = bgscan;
5114 
5115 	req_len = sizeof(struct iwm_scan_req_umac) +
5116 	    (sizeof(struct iwm_scan_channel_cfg_umac) *
5117 	    sc->sc_capa_n_scan_channels) +
5118 	    sizeof(struct iwm_scan_req_umac_tail);
5119 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5120 		return ENOMEM;
5121 	req = malloc(req_len, M_DEVBUF,
5122 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
5123 	if (req == NULL)
5124 		return ENOMEM;
5125 
5126 	hcmd.len[0] = (uint16_t)req_len;
5127 	hcmd.data[0] = (void *)req;
5128 	hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
5129 
5130 	/* These timings correspond to iwlwifi's UNASSOC scan. */
5131 	req->active_dwell = 10;
5132 	req->passive_dwell = 110;
5133 	req->fragmented_dwell = 44;
5134 	req->extended_dwell = 90;
5135 	if (bgscan) {
5136 		req->max_out_time = htole32(120);
5137 		req->suspend_time = htole32(120);
5138 	} else {
5139 		req->max_out_time = htole32(0);
5140 		req->suspend_time = htole32(0);
5141 	}
5142 
5143 	req->scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5144 	req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5145 
5146 	req->n_channels = iwm_umac_scan_fill_channels(sc,
5147 	    (struct iwm_scan_channel_cfg_umac *)req->data,
5148 	    ic->ic_des_esslen != 0, bgscan);
5149 
5150 	req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
5151 	    IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE |
5152 	    IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
5153 
5154 	tail = (void *)&req->data +
5155 		sizeof(struct iwm_scan_channel_cfg_umac) *
5156 			sc->sc_capa_n_scan_channels;
5157 
5158 	/* Check if we're doing an active directed scan. */
5159 	if (ic->ic_des_esslen != 0) {
5160 		tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5161 		tail->direct_scan[0].len = ic->ic_des_esslen;
5162 		memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
5163 		    ic->ic_des_esslen);
5164 		req->general_flags |=
5165 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
5166 	} else
5167 		req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
5168 
5169 	if (isset(sc->sc_enabled_capa,
5170 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5171 		req->general_flags |=
5172 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
5173 
5174 	err = iwm_fill_probe_req(sc, &tail->preq);
5175 	if (err) {
5176 		free(req, M_DEVBUF, req_len);
5177 		return err;
5178 	}
5179 
5180 	/* Specify the scan plan: We'll do one iteration. */
5181 	tail->schedule[0].interval = 0;
5182 	tail->schedule[0].iter_count = 1;
5183 
5184 	err = iwm_send_cmd(sc, &hcmd);
5185 	free(req, M_DEVBUF, req_len);
5186 	return err;
5187 }
5188 
5189 uint8_t
5190 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
5191 {
5192 	int i;
5193 	uint8_t rval;
5194 
5195 	for (i = 0; i < rs->rs_nrates; i++) {
5196 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
5197 		if (rval == iwm_rates[ridx].rate)
5198 			return rs->rs_rates[i];
5199 	}
5200 
5201 	return 0;
5202 }
5203 
5204 int
5205 iwm_rval2ridx(int rval)
5206 {
5207 	int ridx;
5208 
5209 	for (ridx = 0; ridx < nitems(iwm_rates); ridx++) {
5210 		if (iwm_rates[ridx].plcp == IWM_RATE_INVM_PLCP)
5211 			continue;
5212 		if (rval == iwm_rates[ridx].rate)
5213 			break;
5214 	}
5215 
5216        return ridx;
5217 }
5218 
5219 void
5220 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
5221     int *ofdm_rates)
5222 {
5223 	struct ieee80211_node *ni = &in->in_ni;
5224 	struct ieee80211_rateset *rs = &ni->ni_rates;
5225 	int lowest_present_ofdm = -1;
5226 	int lowest_present_cck = -1;
5227 	uint8_t cck = 0;
5228 	uint8_t ofdm = 0;
5229 	int i;
5230 
5231 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
5232 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
5233 		for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
5234 			if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5235 				continue;
5236 			cck |= (1 << i);
5237 			if (lowest_present_cck == -1 || lowest_present_cck > i)
5238 				lowest_present_cck = i;
5239 		}
5240 	}
5241 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
5242 		if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5243 			continue;
5244 		ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
5245 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
5246 			lowest_present_ofdm = i;
5247 	}
5248 
5249 	/*
5250 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
5251 	 * variables. This isn't sufficient though, as there might not
5252 	 * be all the right rates in the bitmap. E.g. if the only basic
5253 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
5254 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
5255 	 *
5256 	 *    [...] a STA responding to a received frame shall transmit
5257 	 *    its Control Response frame [...] at the highest rate in the
5258 	 *    BSSBasicRateSet parameter that is less than or equal to the
5259 	 *    rate of the immediately previous frame in the frame exchange
5260 	 *    sequence ([...]) and that is of the same modulation class
5261 	 *    ([...]) as the received frame. If no rate contained in the
5262 	 *    BSSBasicRateSet parameter meets these conditions, then the
5263 	 *    control frame sent in response to a received frame shall be
5264 	 *    transmitted at the highest mandatory rate of the PHY that is
5265 	 *    less than or equal to the rate of the received frame, and
5266 	 *    that is of the same modulation class as the received frame.
5267 	 *
5268 	 * As a consequence, we need to add all mandatory rates that are
5269 	 * lower than all of the basic rates to these bitmaps.
5270 	 */
5271 
5272 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
5273 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
5274 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
5275 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
5276 	/* 6M already there or needed so always add */
5277 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
5278 
5279 	/*
5280 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
5281 	 * Note, however:
5282 	 *  - if no CCK rates are basic, it must be ERP since there must
5283 	 *    be some basic rates at all, so they're OFDM => ERP PHY
5284 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
5285 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
5286 	 *  - if 5.5M is basic, 1M and 2M are mandatory
5287 	 *  - if 2M is basic, 1M is mandatory
5288 	 *  - if 1M is basic, that's the only valid ACK rate.
5289 	 * As a consequence, it's not as complicated as it sounds, just add
5290 	 * any lower rates to the ACK rate bitmap.
5291 	 */
5292 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
5293 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
5294 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
5295 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
5296 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
5297 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
5298 	/* 1M already there or needed so always add */
5299 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
5300 
5301 	*cck_rates = cck;
5302 	*ofdm_rates = ofdm;
5303 }
5304 
5305 void
5306 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
5307     struct iwm_mac_ctx_cmd *cmd, uint32_t action, int assoc)
5308 {
5309 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
5310 	struct ieee80211com *ic = &sc->sc_ic;
5311 	struct ieee80211_node *ni = ic->ic_bss;
5312 	int cck_ack_rates, ofdm_ack_rates;
5313 	int i;
5314 
5315 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
5316 	    in->in_color));
5317 	cmd->action = htole32(action);
5318 
5319 	cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
5320 	cmd->tsf_id = htole32(IWM_TSF_ID_A);
5321 
5322 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
5323 	IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
5324 
5325 	iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
5326 	cmd->cck_rates = htole32(cck_ack_rates);
5327 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
5328 
5329 	cmd->cck_short_preamble
5330 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5331 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
5332 	cmd->short_slot
5333 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
5334 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
5335 
5336 	for (i = 0; i < EDCA_NUM_AC; i++) {
5337 		struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
5338 		int txf = iwm_ac_to_tx_fifo[i];
5339 
5340 		cmd->ac[txf].cw_min = htole16(IWM_EXP2(ac->ac_ecwmin));
5341 		cmd->ac[txf].cw_max = htole16(IWM_EXP2(ac->ac_ecwmax));
5342 		cmd->ac[txf].aifsn = ac->ac_aifsn;
5343 		cmd->ac[txf].fifos_mask = (1 << txf);
5344 		cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32);
5345 	}
5346 	if (ni->ni_flags & IEEE80211_NODE_QOS)
5347 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
5348 
5349 	if (ni->ni_flags & IEEE80211_NODE_HT) {
5350 		enum ieee80211_htprot htprot =
5351 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
5352 		switch (htprot) {
5353 		case IEEE80211_HTPROT_NONE:
5354 			break;
5355 		case IEEE80211_HTPROT_NONMEMBER:
5356 		case IEEE80211_HTPROT_NONHT_MIXED:
5357 			cmd->protection_flags |=
5358 			    htole32(IWM_MAC_PROT_FLG_HT_PROT);
5359 			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
5360 				cmd->protection_flags |=
5361 				    htole32(IWM_MAC_PROT_FLG_SELF_CTS_EN);
5362 			break;
5363 		case IEEE80211_HTPROT_20MHZ:
5364 			if (ic->ic_htcaps & IEEE80211_HTCAP_CBW20_40) {
5365 				/* XXX ... and if our channel is 40 MHz ... */
5366 				cmd->protection_flags |=
5367 				    htole32(IWM_MAC_PROT_FLG_HT_PROT |
5368 				    IWM_MAC_PROT_FLG_FAT_PROT);
5369 				if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
5370 					cmd->protection_flags |= htole32(
5371 					    IWM_MAC_PROT_FLG_SELF_CTS_EN);
5372 			}
5373 			break;
5374 		default:
5375 			break;
5376 		}
5377 
5378 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
5379 	}
5380 	if (ic->ic_flags & IEEE80211_F_USEPROT)
5381 		cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
5382 
5383 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
5384 #undef IWM_EXP2
5385 }
5386 
5387 void
5388 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
5389     struct iwm_mac_data_sta *sta, int assoc)
5390 {
5391 	struct ieee80211_node *ni = &in->in_ni;
5392 	uint32_t dtim_off;
5393 	uint64_t tsf;
5394 
5395 	dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU;
5396 	memcpy(&tsf, ni->ni_tstamp, sizeof(tsf));
5397 	tsf = letoh64(tsf);
5398 
5399 	sta->is_assoc = htole32(assoc);
5400 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
5401 	sta->dtim_tsf = htole64(tsf + dtim_off);
5402 	sta->bi = htole32(ni->ni_intval);
5403 	sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
5404 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod);
5405 	sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
5406 	sta->listen_interval = htole32(10);
5407 	sta->assoc_id = htole32(ni->ni_associd);
5408 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
5409 }
5410 
5411 int
5412 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
5413     int assoc)
5414 {
5415 	struct ieee80211_node *ni = &in->in_ni;
5416 	struct iwm_mac_ctx_cmd cmd;
5417 	int active = (sc->sc_flags & IWM_FLAG_MAC_ACTIVE);
5418 
5419 	if (action == IWM_FW_CTXT_ACTION_ADD && active)
5420 		panic("MAC already added");
5421 	if (action == IWM_FW_CTXT_ACTION_REMOVE && !active)
5422 		panic("MAC already removed");
5423 
5424 	memset(&cmd, 0, sizeof(cmd));
5425 
5426 	iwm_mac_ctxt_cmd_common(sc, in, &cmd, action, assoc);
5427 
5428 	/* Allow beacons to pass through as long as we are not associated or we
5429 	 * do not have dtim period information */
5430 	if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod)
5431 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
5432 	else
5433 		iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
5434 
5435 	return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5436 }
5437 
5438 int
5439 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in, int running)
5440 {
5441 	struct iwm_time_quota_cmd cmd;
5442 	int i, idx, num_active_macs, quota, quota_rem;
5443 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
5444 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
5445 	uint16_t id;
5446 
5447 	memset(&cmd, 0, sizeof(cmd));
5448 
5449 	/* currently, PHY ID == binding ID */
5450 	if (in && in->in_phyctxt) {
5451 		id = in->in_phyctxt->id;
5452 		KASSERT(id < IWM_MAX_BINDINGS);
5453 		colors[id] = in->in_phyctxt->color;
5454 		if (running)
5455 			n_ifs[id] = 1;
5456 	}
5457 
5458 	/*
5459 	 * The FW's scheduling session consists of
5460 	 * IWM_MAX_QUOTA fragments. Divide these fragments
5461 	 * equally between all the bindings that require quota
5462 	 */
5463 	num_active_macs = 0;
5464 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
5465 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
5466 		num_active_macs += n_ifs[i];
5467 	}
5468 
5469 	quota = 0;
5470 	quota_rem = 0;
5471 	if (num_active_macs) {
5472 		quota = IWM_MAX_QUOTA / num_active_macs;
5473 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
5474 	}
5475 
5476 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
5477 		if (colors[i] < 0)
5478 			continue;
5479 
5480 		cmd.quotas[idx].id_and_color =
5481 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
5482 
5483 		if (n_ifs[i] <= 0) {
5484 			cmd.quotas[idx].quota = htole32(0);
5485 			cmd.quotas[idx].max_duration = htole32(0);
5486 		} else {
5487 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
5488 			cmd.quotas[idx].max_duration = htole32(0);
5489 		}
5490 		idx++;
5491 	}
5492 
5493 	/* Give the remainder of the session to the first binding */
5494 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
5495 
5496 	return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0,
5497 	    sizeof(cmd), &cmd);
5498 }
5499 
5500 void
5501 iwm_add_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
5502 {
5503 	int s = splnet();
5504 
5505 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
5506 		splx(s);
5507 		return;
5508 	}
5509 
5510 	refcnt_take(&sc->task_refs);
5511 	if (!task_add(taskq, task))
5512 		refcnt_rele_wake(&sc->task_refs);
5513 	splx(s);
5514 }
5515 
5516 void
5517 iwm_del_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
5518 {
5519 	if (task_del(taskq, task))
5520 		refcnt_rele(&sc->task_refs);
5521 }
5522 
5523 int
5524 iwm_scan(struct iwm_softc *sc)
5525 {
5526 	struct ieee80211com *ic = &sc->sc_ic;
5527 	struct ifnet *ifp = IC2IFP(ic);
5528 	int err;
5529 
5530 	if (sc->sc_flags & IWM_FLAG_BGSCAN) {
5531 		err = iwm_scan_abort(sc);
5532 		if (err) {
5533 			printf("%s: could not abort background scan\n",
5534 			    DEVNAME(sc));
5535 			return err;
5536 		}
5537 	}
5538 
5539 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
5540 		err = iwm_umac_scan(sc, 0);
5541 	else
5542 		err = iwm_lmac_scan(sc, 0);
5543 	if (err) {
5544 		printf("%s: could not initiate scan\n", DEVNAME(sc));
5545 		return err;
5546 	}
5547 
5548 	sc->sc_flags |= IWM_FLAG_SCANNING;
5549 	if (ifp->if_flags & IFF_DEBUG)
5550 		printf("%s: %s -> %s\n", ifp->if_xname,
5551 		    ieee80211_state_name[ic->ic_state],
5552 		    ieee80211_state_name[IEEE80211_S_SCAN]);
5553 	if ((sc->sc_flags & IWM_FLAG_BGSCAN) == 0) {
5554 		ieee80211_set_link_state(ic, LINK_STATE_DOWN);
5555 		ieee80211_free_allnodes(ic, 1);
5556 	}
5557 	ic->ic_state = IEEE80211_S_SCAN;
5558 	iwm_led_blink_start(sc);
5559 	wakeup(&ic->ic_state); /* wake iwm_init() */
5560 
5561 	return 0;
5562 }
5563 
5564 int
5565 iwm_bgscan(struct ieee80211com *ic)
5566 {
5567 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
5568 	int err;
5569 
5570 	if (sc->sc_flags & IWM_FLAG_SCANNING)
5571 		return 0;
5572 
5573 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
5574 		err = iwm_umac_scan(sc, 1);
5575 	else
5576 		err = iwm_lmac_scan(sc, 1);
5577 	if (err) {
5578 		printf("%s: could not initiate scan\n", DEVNAME(sc));
5579 		return err;
5580 	}
5581 
5582 	sc->sc_flags |= IWM_FLAG_BGSCAN;
5583 	return 0;
5584 }
5585 
5586 int
5587 iwm_umac_scan_abort(struct iwm_softc *sc)
5588 {
5589 	struct iwm_umac_scan_abort cmd = { 0 };
5590 
5591 	return iwm_send_cmd_pdu(sc,
5592 	    IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC),
5593 	    0, sizeof(cmd), &cmd);
5594 }
5595 
5596 int
5597 iwm_lmac_scan_abort(struct iwm_softc *sc)
5598 {
5599 	struct iwm_host_cmd cmd = {
5600 		.id = IWM_SCAN_OFFLOAD_ABORT_CMD,
5601 	};
5602 	int err, status;
5603 
5604 	err = iwm_send_cmd_status(sc, &cmd, &status);
5605 	if (err)
5606 		return err;
5607 
5608 	if (status != IWM_CAN_ABORT_STATUS) {
5609 		/*
5610 		 * The scan abort will return 1 for success or
5611 		 * 2 for "failure".  A failure condition can be
5612 		 * due to simply not being in an active scan which
5613 		 * can occur if we send the scan abort before the
5614 		 * microcode has notified us that a scan is completed.
5615 		 */
5616 		return EBUSY;
5617 	}
5618 
5619 	return 0;
5620 }
5621 
5622 int
5623 iwm_scan_abort(struct iwm_softc *sc)
5624 {
5625 	int err;
5626 
5627 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
5628 		err = iwm_umac_scan_abort(sc);
5629 	else
5630 		err = iwm_lmac_scan_abort(sc);
5631 
5632 	if (err == 0)
5633 		sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
5634 	return err;
5635 }
5636 
5637 int
5638 iwm_auth(struct iwm_softc *sc)
5639 {
5640 	struct ieee80211com *ic = &sc->sc_ic;
5641 	struct iwm_node *in = (void *)ic->ic_bss;
5642 	uint32_t duration;
5643 	int generation = sc->sc_generation, err;
5644 
5645 	splassert(IPL_NET);
5646 
5647 	sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
5648 	err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
5649 	    IWM_FW_CTXT_ACTION_MODIFY, 0);
5650 	if (err) {
5651 		printf("%s: could not update PHY context (error %d)\n",
5652 		    DEVNAME(sc), err);
5653 		return err;
5654 	}
5655 	in->in_phyctxt = &sc->sc_phyctxt[0];
5656 
5657 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
5658 	if (err) {
5659 		printf("%s: could not add MAC context (error %d)\n",
5660 		    DEVNAME(sc), err);
5661 		return err;
5662  	}
5663 	sc->sc_flags |= IWM_FLAG_MAC_ACTIVE;
5664 
5665 	err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
5666 	if (err) {
5667 		printf("%s: could not add binding (error %d)\n",
5668 		    DEVNAME(sc), err);
5669 		goto rm_mac_ctxt;
5670 	}
5671 	sc->sc_flags |= IWM_FLAG_BINDING_ACTIVE;
5672 
5673 	err = iwm_add_sta_cmd(sc, in, 0);
5674 	if (err) {
5675 		printf("%s: could not add sta (error %d)\n",
5676 		    DEVNAME(sc), err);
5677 		goto rm_binding;
5678 	}
5679 	sc->sc_flags |= IWM_FLAG_STA_ACTIVE;
5680 
5681 	/*
5682 	 * Prevent the FW from wandering off channel during association
5683 	 * by "protecting" the session with a time event.
5684 	 */
5685 	if (in->in_ni.ni_intval)
5686 		duration = in->in_ni.ni_intval * 2;
5687 	else
5688 		duration = IEEE80211_DUR_TU;
5689 	iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
5690 
5691 	return 0;
5692 
5693 rm_binding:
5694 	if (generation == sc->sc_generation) {
5695 		iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
5696 		sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
5697 	}
5698 rm_mac_ctxt:
5699 	if (generation == sc->sc_generation) {
5700 		iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
5701 		sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
5702 	}
5703 	return err;
5704 }
5705 
5706 int
5707 iwm_deauth(struct iwm_softc *sc)
5708 {
5709 	struct ieee80211com *ic = &sc->sc_ic;
5710 	struct iwm_node *in = (void *)ic->ic_bss;
5711 	int ac, tfd_msk, err;
5712 
5713 	splassert(IPL_NET);
5714 
5715 	iwm_unprotect_session(sc, in);
5716 
5717 	if (sc->sc_flags & IWM_FLAG_STA_ACTIVE) {
5718 		err = iwm_rm_sta_cmd(sc, in);
5719 		if (err) {
5720 			printf("%s: could not remove STA (error %d)\n",
5721 			    DEVNAME(sc), err);
5722 			return err;
5723 		}
5724 		sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
5725 	}
5726 
5727 	tfd_msk = 0;
5728 	for (ac = 0; ac < EDCA_NUM_AC; ac++)
5729 		tfd_msk |= htole32(1 << iwm_ac_to_tx_fifo[ac]);
5730 	err = iwm_flush_tx_path(sc, tfd_msk);
5731 	if (err) {
5732 		printf("%s: could not flush Tx path (error %d)\n",
5733 		    DEVNAME(sc), err);
5734 		return err;
5735 	}
5736 
5737 	if (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE) {
5738 		err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
5739 		if (err) {
5740 			printf("%s: could not remove binding (error %d)\n",
5741 			    DEVNAME(sc), err);
5742 			return err;
5743 		}
5744 		sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
5745 	}
5746 
5747 	if (sc->sc_flags & IWM_FLAG_MAC_ACTIVE) {
5748 		err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
5749 		if (err) {
5750 			printf("%s: could not remove MAC context (error %d)\n",
5751 			    DEVNAME(sc), err);
5752 			return err;
5753 		}
5754 		sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
5755 	}
5756 
5757 	return 0;
5758 }
5759 
5760 int
5761 iwm_assoc(struct iwm_softc *sc)
5762 {
5763 	struct ieee80211com *ic = &sc->sc_ic;
5764 	struct iwm_node *in = (void *)ic->ic_bss;
5765 	int update_sta = (sc->sc_flags & IWM_FLAG_STA_ACTIVE);
5766 	int err;
5767 
5768 	splassert(IPL_NET);
5769 
5770 	err = iwm_add_sta_cmd(sc, in, update_sta);
5771 	if (err) {
5772 		printf("%s: could not %s STA (error %d)\n",
5773 		    DEVNAME(sc), update_sta ? "update" : "add", err);
5774 		return err;
5775 	}
5776 
5777 	return 0;
5778 }
5779 
5780 int
5781 iwm_disassoc(struct iwm_softc *sc)
5782 {
5783 	struct ieee80211com *ic = &sc->sc_ic;
5784 	struct iwm_node *in = (void *)ic->ic_bss;
5785 	int err;
5786 
5787 	splassert(IPL_NET);
5788 
5789 	if (sc->sc_flags & IWM_FLAG_STA_ACTIVE) {
5790 		err = iwm_rm_sta_cmd(sc, in);
5791 		if (err) {
5792 			printf("%s: could not remove STA (error %d)\n",
5793 			    DEVNAME(sc), err);
5794 			return err;
5795 		}
5796 		sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
5797 	}
5798 
5799 	return 0;
5800 }
5801 
5802 int
5803 iwm_run(struct iwm_softc *sc)
5804 {
5805 	struct ieee80211com *ic = &sc->sc_ic;
5806 	struct iwm_node *in = (void *)ic->ic_bss;
5807 	int err;
5808 
5809 	splassert(IPL_NET);
5810 
5811 	/* Configure Rx chains for MIMO. */
5812 	if ((in->in_ni.ni_flags & IEEE80211_NODE_HT) &&
5813 	    !sc->sc_nvm.sku_cap_mimo_disable) {
5814 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0],
5815 		    2, 2, IWM_FW_CTXT_ACTION_MODIFY, 0);
5816 		if (err) {
5817 			printf("%s: failed to update PHY\n",
5818 			    DEVNAME(sc));
5819 			return err;
5820 		}
5821 	}
5822 
5823 	/* We have now been assigned an associd by the AP. */
5824 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
5825 	if (err) {
5826 		printf("%s: failed to update MAC\n", DEVNAME(sc));
5827 		return err;
5828 	}
5829 
5830 	err = iwm_sf_config(sc, IWM_SF_FULL_ON);
5831 	if (err) {
5832 		printf("%s: could not set sf full on (error %d)\n",
5833 		    DEVNAME(sc), err);
5834 		return err;
5835 	}
5836 
5837 	err = iwm_allow_mcast(sc);
5838 	if (err) {
5839 		printf("%s: could not allow mcast (error %d)\n",
5840 		    DEVNAME(sc), err);
5841 		return err;
5842 	}
5843 
5844 	err = iwm_power_update_device(sc);
5845 	if (err) {
5846 		printf("%s: could not send power command (error %d)\n",
5847 		    DEVNAME(sc), err);
5848 		return err;
5849 	}
5850 #ifdef notyet
5851 	/*
5852 	 * Disabled for now. Default beacon filter settings
5853 	 * prevent net80211 from getting ERP and HT protection
5854 	 * updates from beacons.
5855 	 */
5856 	err = iwm_enable_beacon_filter(sc, in);
5857 	if (err) {
5858 		printf("%s: could not enable beacon filter\n",
5859 		    DEVNAME(sc));
5860 		return err;
5861 	}
5862 #endif
5863 	err = iwm_power_mac_update_mode(sc, in);
5864 	if (err) {
5865 		printf("%s: could not update MAC power (error %d)\n",
5866 		    DEVNAME(sc), err);
5867 		return err;
5868 	}
5869 
5870 	err = iwm_update_quotas(sc, in, 1);
5871 	if (err) {
5872 		printf("%s: could not update quotas (error %d)\n",
5873 		    DEVNAME(sc), err);
5874 		return err;
5875 	}
5876 
5877 	ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
5878 	ieee80211_mira_node_init(&in->in_mn);
5879 
5880 	/* Start at lowest available bit-rate, AMRR will raise. */
5881 	in->in_ni.ni_txrate = 0;
5882 	in->in_ni.ni_txmcs = 0;
5883 
5884 	timeout_add_msec(&sc->sc_calib_to, 500);
5885 	iwm_led_enable(sc);
5886 
5887 	return 0;
5888 }
5889 
5890 int
5891 iwm_run_stop(struct iwm_softc *sc)
5892 {
5893 	struct ieee80211com *ic = &sc->sc_ic;
5894 	struct iwm_node *in = (void *)ic->ic_bss;
5895 	int err;
5896 
5897 	splassert(IPL_NET);
5898 
5899 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
5900 	if (err)
5901 		return err;
5902 
5903 	iwm_disable_beacon_filter(sc);
5904 
5905 	err = iwm_update_quotas(sc, in, 0);
5906 	if (err) {
5907 		printf("%s: could not update quotas (error %d)\n",
5908 		    DEVNAME(sc), err);
5909 		return err;
5910 	}
5911 
5912 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
5913 	if (err) {
5914 		printf("%s: failed to update MAC\n", DEVNAME(sc));
5915 		return err;
5916 	}
5917 
5918 	/* Reset Tx chains in case MIMO was enabled. */
5919 	if ((in->in_ni.ni_flags & IEEE80211_NODE_HT) &&
5920 	    !sc->sc_nvm.sku_cap_mimo_disable) {
5921 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
5922 		    IWM_FW_CTXT_ACTION_MODIFY, 0);
5923 		if (err) {
5924 			printf("%s: failed to update PHY\n", DEVNAME(sc));
5925 			return err;
5926 		}
5927 	}
5928 
5929 	return 0;
5930 }
5931 
5932 struct ieee80211_node *
5933 iwm_node_alloc(struct ieee80211com *ic)
5934 {
5935 	return malloc(sizeof (struct iwm_node), M_DEVBUF, M_NOWAIT | M_ZERO);
5936 }
5937 
5938 void
5939 iwm_calib_timeout(void *arg)
5940 {
5941 	struct iwm_softc *sc = arg;
5942 	struct ieee80211com *ic = &sc->sc_ic;
5943 	struct iwm_node *in = (void *)ic->ic_bss;
5944 	struct ieee80211_node *ni = &in->in_ni;
5945 	int s;
5946 
5947 	s = splnet();
5948 	if ((ic->ic_fixed_rate == -1 || ic->ic_fixed_mcs == -1) &&
5949 	    ((ni->ni_flags & IEEE80211_NODE_HT) == 0 || in->ht_force_cck) &&
5950 	    ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
5951 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
5952 		if (in->ht_force_cck) {
5953 			struct ieee80211_rateset *rs = &ni->ni_rates;
5954 			uint8_t rv;
5955 			rv = (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL);
5956 			if (IWM_RVAL_IS_OFDM(rv))
5957 				in->ht_force_cck = 0;
5958 		}
5959 	}
5960 
5961 	splx(s);
5962 
5963 	timeout_add_msec(&sc->sc_calib_to, 500);
5964 }
5965 
5966 int
5967 iwm_media_change(struct ifnet *ifp)
5968 {
5969 	struct iwm_softc *sc = ifp->if_softc;
5970 	struct ieee80211com *ic = &sc->sc_ic;
5971 	uint8_t rate, ridx;
5972 	int err;
5973 
5974 	err = ieee80211_media_change(ifp);
5975 	if (err != ENETRESET)
5976 		return err;
5977 
5978 	if (ic->ic_fixed_mcs != -1)
5979 		sc->sc_fixed_ridx = iwm_mcs2ridx[ic->ic_fixed_mcs];
5980 	else if (ic->ic_fixed_rate != -1) {
5981 		rate = ic->ic_sup_rates[ic->ic_curmode].
5982 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
5983 		/* Map 802.11 rate to HW rate index. */
5984 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
5985 			if (iwm_rates[ridx].rate == rate)
5986 				break;
5987 		sc->sc_fixed_ridx = ridx;
5988 	}
5989 
5990 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
5991 	    (IFF_UP | IFF_RUNNING)) {
5992 		iwm_stop(ifp);
5993 		err = iwm_init(ifp);
5994 	}
5995 	return err;
5996 }
5997 
5998 void
5999 iwm_newstate_task(void *psc)
6000 {
6001 	struct iwm_softc *sc = (struct iwm_softc *)psc;
6002 	struct ieee80211com *ic = &sc->sc_ic;
6003 	enum ieee80211_state nstate = sc->ns_nstate;
6004 	enum ieee80211_state ostate = ic->ic_state;
6005 	int arg = sc->ns_arg;
6006 	int err = 0, s = splnet();
6007 
6008 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
6009 		/* iwm_stop() is waiting for us. */
6010 		refcnt_rele_wake(&sc->task_refs);
6011 		splx(s);
6012 		return;
6013 	}
6014 
6015 	if (ostate == IEEE80211_S_SCAN) {
6016 		if (nstate == ostate) {
6017 			if (sc->sc_flags & IWM_FLAG_SCANNING) {
6018 				refcnt_rele_wake(&sc->task_refs);
6019 				splx(s);
6020 				return;
6021 			}
6022 			/* Firmware is no longer scanning. Do another scan. */
6023 			goto next_scan;
6024 		} else
6025 			iwm_led_blink_stop(sc);
6026 	}
6027 
6028 	if (nstate <= ostate) {
6029 		switch (ostate) {
6030 		case IEEE80211_S_RUN:
6031 			err = iwm_run_stop(sc);
6032 			if (err)
6033 				goto out;
6034 			/* FALLTHROUGH */
6035 		case IEEE80211_S_ASSOC:
6036 			if (nstate <= IEEE80211_S_ASSOC) {
6037 				err = iwm_disassoc(sc);
6038 				if (err)
6039 					goto out;
6040 			}
6041 			/* FALLTHROUGH */
6042 		case IEEE80211_S_AUTH:
6043 			if (nstate <= IEEE80211_S_AUTH) {
6044 				err = iwm_deauth(sc);
6045 				if (err)
6046 					goto out;
6047 			}
6048 			/* FALLTHROUGH */
6049 		case IEEE80211_S_SCAN:
6050 		case IEEE80211_S_INIT:
6051 			break;
6052 		}
6053 
6054 		/* Die now if iwm_stop() was called while we were sleeping. */
6055 		if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
6056 			refcnt_rele_wake(&sc->task_refs);
6057 			splx(s);
6058 			return;
6059 		}
6060 	}
6061 
6062 	switch (nstate) {
6063 	case IEEE80211_S_INIT:
6064 		break;
6065 
6066 	case IEEE80211_S_SCAN:
6067 next_scan:
6068 		err = iwm_scan(sc);
6069 		if (err)
6070 			break;
6071 		refcnt_rele_wake(&sc->task_refs);
6072 		splx(s);
6073 		return;
6074 
6075 	case IEEE80211_S_AUTH:
6076 		err = iwm_auth(sc);
6077 		break;
6078 
6079 	case IEEE80211_S_ASSOC:
6080 		err = iwm_assoc(sc);
6081 		break;
6082 
6083 	case IEEE80211_S_RUN:
6084 		err = iwm_run(sc);
6085 		break;
6086 	}
6087 
6088 out:
6089 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
6090 		if (err)
6091 			task_add(systq, &sc->init_task);
6092 		else
6093 			sc->sc_newstate(ic, nstate, arg);
6094 	}
6095 	refcnt_rele_wake(&sc->task_refs);
6096 	splx(s);
6097 }
6098 
6099 int
6100 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
6101 {
6102 	struct ifnet *ifp = IC2IFP(ic);
6103 	struct iwm_softc *sc = ifp->if_softc;
6104 	struct iwm_node *in = (void *)ic->ic_bss;
6105 
6106 	if (ic->ic_state == IEEE80211_S_RUN) {
6107 		timeout_del(&sc->sc_calib_to);
6108 		ieee80211_mira_cancel_timeouts(&in->in_mn);
6109 		iwm_del_task(sc, systq, &sc->ba_task);
6110 		iwm_del_task(sc, systq, &sc->htprot_task);
6111 	}
6112 
6113 	sc->ns_nstate = nstate;
6114 	sc->ns_arg = arg;
6115 
6116 	iwm_add_task(sc, sc->sc_nswq, &sc->newstate_task);
6117 
6118 	return 0;
6119 }
6120 
6121 void
6122 iwm_endscan(struct iwm_softc *sc)
6123 {
6124 	struct ieee80211com *ic = &sc->sc_ic;
6125 
6126 	if ((sc->sc_flags & (IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN)) == 0)
6127 		return;
6128 
6129 	sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
6130 	ieee80211_end_scan(&ic->ic_if);
6131 }
6132 
6133 /*
6134  * Aging and idle timeouts for the different possible scenarios
6135  * in default configuration
6136  */
6137 static const uint32_t
6138 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
6139 	{
6140 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
6141 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
6142 	},
6143 	{
6144 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
6145 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
6146 	},
6147 	{
6148 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
6149 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
6150 	},
6151 	{
6152 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
6153 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
6154 	},
6155 	{
6156 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
6157 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
6158 	},
6159 };
6160 
6161 /*
6162  * Aging and idle timeouts for the different possible scenarios
6163  * in single BSS MAC configuration.
6164  */
6165 static const uint32_t
6166 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
6167 	{
6168 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
6169 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
6170 	},
6171 	{
6172 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
6173 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
6174 	},
6175 	{
6176 		htole32(IWM_SF_MCAST_AGING_TIMER),
6177 		htole32(IWM_SF_MCAST_IDLE_TIMER)
6178 	},
6179 	{
6180 		htole32(IWM_SF_BA_AGING_TIMER),
6181 		htole32(IWM_SF_BA_IDLE_TIMER)
6182 	},
6183 	{
6184 		htole32(IWM_SF_TX_RE_AGING_TIMER),
6185 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
6186 	},
6187 };
6188 
6189 void
6190 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
6191     struct ieee80211_node *ni)
6192 {
6193 	int i, j, watermark;
6194 
6195 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
6196 
6197 	/*
6198 	 * If we are in association flow - check antenna configuration
6199 	 * capabilities of the AP station, and choose the watermark accordingly.
6200 	 */
6201 	if (ni) {
6202 		if (ni->ni_flags & IEEE80211_NODE_HT) {
6203 			if (ni->ni_rxmcs[1] != 0)
6204 				watermark = IWM_SF_W_MARK_MIMO2;
6205 			else
6206 				watermark = IWM_SF_W_MARK_SISO;
6207 		} else {
6208 			watermark = IWM_SF_W_MARK_LEGACY;
6209 		}
6210 	/* default watermark value for unassociated mode. */
6211 	} else {
6212 		watermark = IWM_SF_W_MARK_MIMO2;
6213 	}
6214 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
6215 
6216 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
6217 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
6218 			sf_cmd->long_delay_timeouts[i][j] =
6219 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
6220 		}
6221 	}
6222 
6223 	if (ni) {
6224 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
6225 		       sizeof(iwm_sf_full_timeout));
6226 	} else {
6227 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
6228 		       sizeof(iwm_sf_full_timeout_def));
6229 	}
6230 
6231 }
6232 
6233 int
6234 iwm_sf_config(struct iwm_softc *sc, int new_state)
6235 {
6236 	struct ieee80211com *ic = &sc->sc_ic;
6237 	struct iwm_sf_cfg_cmd sf_cmd = {
6238 		.state = htole32(IWM_SF_FULL_ON),
6239 	};
6240 	int err = 0;
6241 
6242 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
6243 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
6244 
6245 	switch (new_state) {
6246 	case IWM_SF_UNINIT:
6247 	case IWM_SF_INIT_OFF:
6248 		iwm_fill_sf_command(sc, &sf_cmd, NULL);
6249 		break;
6250 	case IWM_SF_FULL_ON:
6251 		iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
6252 		break;
6253 	default:
6254 		return EINVAL;
6255 	}
6256 
6257 	err = iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
6258 				   sizeof(sf_cmd), &sf_cmd);
6259 	return err;
6260 }
6261 
6262 int
6263 iwm_send_bt_init_conf(struct iwm_softc *sc)
6264 {
6265 	struct iwm_bt_coex_cmd bt_cmd;
6266 
6267 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
6268 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
6269 
6270 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
6271 	    &bt_cmd);
6272 }
6273 
6274 int
6275 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
6276 {
6277 	struct iwm_mcc_update_cmd mcc_cmd;
6278 	struct iwm_host_cmd hcmd = {
6279 		.id = IWM_MCC_UPDATE_CMD,
6280 		.flags = IWM_CMD_WANT_RESP,
6281 		.data = { &mcc_cmd },
6282 	};
6283 	int err;
6284 	int resp_v2 = isset(sc->sc_enabled_capa,
6285 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
6286 
6287 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
6288 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
6289 	if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
6290 	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
6291 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
6292 	else
6293 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
6294 
6295 	if (resp_v2) {
6296 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
6297 		hcmd.resp_pkt_len = sizeof(struct iwm_rx_packet) +
6298 		    sizeof(struct iwm_mcc_update_resp);
6299 	} else {
6300 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
6301 		hcmd.resp_pkt_len = sizeof(struct iwm_rx_packet) +
6302 		    sizeof(struct iwm_mcc_update_resp_v1);
6303 	}
6304 
6305 	err = iwm_send_cmd(sc, &hcmd);
6306 	if (err)
6307 		return err;
6308 
6309 	iwm_free_resp(sc, &hcmd);
6310 
6311 	return 0;
6312 }
6313 
6314 void
6315 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
6316 {
6317 	struct iwm_host_cmd cmd = {
6318 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
6319 		.len = { sizeof(uint32_t), },
6320 		.data = { &backoff, },
6321 	};
6322 
6323 	iwm_send_cmd(sc, &cmd);
6324 }
6325 
6326 int
6327 iwm_init_hw(struct iwm_softc *sc)
6328 {
6329 	struct ieee80211com *ic = &sc->sc_ic;
6330 	int err, i, ac;
6331 
6332 	err = iwm_preinit(sc);
6333 	if (err)
6334 		return err;
6335 
6336 	err = iwm_start_hw(sc);
6337 	if (err) {
6338 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
6339 		return err;
6340 	}
6341 
6342 	err = iwm_run_init_mvm_ucode(sc, 0);
6343 	if (err)
6344 		return err;
6345 
6346 	/* Should stop and start HW since INIT image just loaded. */
6347 	iwm_stop_device(sc);
6348 	err = iwm_start_hw(sc);
6349 	if (err) {
6350 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
6351 		return err;
6352 	}
6353 
6354 	/* Restart, this time with the regular firmware */
6355 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
6356 	if (err) {
6357 		printf("%s: could not load firmware\n", DEVNAME(sc));
6358 		goto err;
6359 	}
6360 
6361 	if (!iwm_nic_lock(sc))
6362 		return EBUSY;
6363 
6364 	err = iwm_send_bt_init_conf(sc);
6365 	if (err) {
6366 		printf("%s: could not init bt coex (error %d)\n",
6367 		    DEVNAME(sc), err);
6368 		goto err;
6369 	}
6370 
6371 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
6372 	if (err) {
6373 		printf("%s: could not init tx ant config (error %d)\n",
6374 		    DEVNAME(sc), err);
6375 		goto err;
6376 	}
6377 
6378 	err = iwm_send_phy_db_data(sc);
6379 	if (err) {
6380 		printf("%s: could not init phy db (error %d)\n",
6381 		    DEVNAME(sc), err);
6382 		goto err;
6383 	}
6384 
6385 	err = iwm_send_phy_cfg_cmd(sc);
6386 	if (err) {
6387 		printf("%s: could not send phy config (error %d)\n",
6388 		    DEVNAME(sc), err);
6389 		goto err;
6390 	}
6391 
6392 	/* Add auxiliary station for scanning */
6393 	err = iwm_add_aux_sta(sc);
6394 	if (err) {
6395 		printf("%s: could not add aux station (error %d)\n",
6396 		    DEVNAME(sc), err);
6397 		goto err;
6398 	}
6399 
6400 	for (i = 0; i < 1; i++) {
6401 		/*
6402 		 * The channel used here isn't relevant as it's
6403 		 * going to be overwritten in the other flows.
6404 		 * For now use the first channel we have.
6405 		 */
6406 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
6407 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
6408 		    IWM_FW_CTXT_ACTION_ADD, 0);
6409 		if (err) {
6410 			printf("%s: could not add phy context %d (error %d)\n",
6411 			    DEVNAME(sc), i, err);
6412 			goto err;
6413 		}
6414 	}
6415 
6416 	/* Initialize tx backoffs to the minimum. */
6417 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
6418 		iwm_tt_tx_backoff(sc, 0);
6419 
6420 	err = iwm_power_update_device(sc);
6421 	if (err) {
6422 		printf("%s: could not send power command (error %d)\n",
6423 		    DEVNAME(sc), err);
6424 		goto err;
6425 	}
6426 
6427 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
6428 		err = iwm_send_update_mcc_cmd(sc, "ZZ");
6429 		if (err) {
6430 			printf("%s: could not init LAR (error %d)\n",
6431 			    DEVNAME(sc), err);
6432 			goto err;
6433 		}
6434 	}
6435 
6436 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
6437 		err = iwm_config_umac_scan(sc);
6438 		if (err) {
6439 			printf("%s: could not configure scan (error %d)\n",
6440 			    DEVNAME(sc), err);
6441 			goto err;
6442 		}
6443 	}
6444 
6445 	for (ac = 0; ac < EDCA_NUM_AC; ac++) {
6446 		err = iwm_enable_txq(sc, IWM_STATION_ID, ac,
6447 		    iwm_ac_to_tx_fifo[ac]);
6448 		if (err) {
6449 			printf("%s: could not enable Tx queue %d (error %d)\n",
6450 			    DEVNAME(sc), ac, err);
6451 			goto err;
6452 		}
6453 	}
6454 
6455 	err = iwm_disable_beacon_filter(sc);
6456 	if (err) {
6457 		printf("%s: could not disable beacon filter (error %d)\n",
6458 		    DEVNAME(sc), err);
6459 		goto err;
6460 	}
6461 
6462 err:
6463 	iwm_nic_unlock(sc);
6464 	return err;
6465 }
6466 
6467 /* Allow multicast from our BSSID. */
6468 int
6469 iwm_allow_mcast(struct iwm_softc *sc)
6470 {
6471 	struct ieee80211com *ic = &sc->sc_ic;
6472 	struct ieee80211_node *ni = ic->ic_bss;
6473 	struct iwm_mcast_filter_cmd *cmd;
6474 	size_t size;
6475 	int err;
6476 
6477 	size = roundup(sizeof(*cmd), 4);
6478 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
6479 	if (cmd == NULL)
6480 		return ENOMEM;
6481 	cmd->filter_own = 1;
6482 	cmd->port_id = 0;
6483 	cmd->count = 0;
6484 	cmd->pass_all = 1;
6485 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
6486 
6487 	err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
6488 	    0, size, cmd);
6489 	free(cmd, M_DEVBUF, size);
6490 	return err;
6491 }
6492 
6493 int
6494 iwm_init(struct ifnet *ifp)
6495 {
6496 	struct iwm_softc *sc = ifp->if_softc;
6497 	struct ieee80211com *ic = &sc->sc_ic;
6498 	int err, generation;
6499 
6500 	rw_assert_wrlock(&sc->ioctl_rwl);
6501 
6502 	generation = ++sc->sc_generation;
6503 
6504 	KASSERT(sc->task_refs.refs == 0);
6505 	refcnt_init(&sc->task_refs);
6506 
6507 	err = iwm_init_hw(sc);
6508 	if (err) {
6509 		if (generation == sc->sc_generation)
6510 			iwm_stop(ifp);
6511 		return err;
6512 	}
6513 
6514 	ifq_clr_oactive(&ifp->if_snd);
6515 	ifp->if_flags |= IFF_RUNNING;
6516 
6517 	ieee80211_begin_scan(ifp);
6518 
6519 	/*
6520 	 * ieee80211_begin_scan() ends up scheduling iwm_newstate_task().
6521 	 * Wait until the transition to SCAN state has completed.
6522 	 */
6523 	do {
6524 		err = tsleep(&ic->ic_state, PCATCH, "iwminit", hz);
6525 		if (generation != sc->sc_generation)
6526 			return ENXIO;
6527 		if (err)
6528 			return err;
6529 	} while (ic->ic_state != IEEE80211_S_SCAN);
6530 
6531 	return 0;
6532 }
6533 
6534 void
6535 iwm_start(struct ifnet *ifp)
6536 {
6537 	struct iwm_softc *sc = ifp->if_softc;
6538 	struct ieee80211com *ic = &sc->sc_ic;
6539 	struct ieee80211_node *ni;
6540 	struct ether_header *eh;
6541 	struct mbuf *m;
6542 	int ac = EDCA_AC_BE; /* XXX */
6543 
6544 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
6545 		return;
6546 
6547 	for (;;) {
6548 		/* why isn't this done per-queue? */
6549 		if (sc->qfullmsk != 0) {
6550 			ifq_set_oactive(&ifp->if_snd);
6551 			break;
6552 		}
6553 
6554 		/* need to send management frames even if we're not RUNning */
6555 		m = mq_dequeue(&ic->ic_mgtq);
6556 		if (m) {
6557 			ni = m->m_pkthdr.ph_cookie;
6558 			goto sendit;
6559 		}
6560 
6561 		if (ic->ic_state != IEEE80211_S_RUN ||
6562 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
6563 			break;
6564 
6565 		IFQ_DEQUEUE(&ifp->if_snd, m);
6566 		if (!m)
6567 			break;
6568 		if (m->m_len < sizeof (*eh) &&
6569 		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
6570 			ifp->if_oerrors++;
6571 			continue;
6572 		}
6573 #if NBPFILTER > 0
6574 		if (ifp->if_bpf != NULL)
6575 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
6576 #endif
6577 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
6578 			ifp->if_oerrors++;
6579 			continue;
6580 		}
6581 
6582  sendit:
6583 #if NBPFILTER > 0
6584 		if (ic->ic_rawbpf != NULL)
6585 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
6586 #endif
6587 		if (iwm_tx(sc, m, ni, ac) != 0) {
6588 			ieee80211_release_node(ic, ni);
6589 			ifp->if_oerrors++;
6590 			continue;
6591 		}
6592 
6593 		if (ifp->if_flags & IFF_UP) {
6594 			sc->sc_tx_timer = 15;
6595 			ifp->if_timer = 1;
6596 		}
6597 	}
6598 
6599 	return;
6600 }
6601 
6602 void
6603 iwm_stop(struct ifnet *ifp)
6604 {
6605 	struct iwm_softc *sc = ifp->if_softc;
6606 	struct ieee80211com *ic = &sc->sc_ic;
6607 	struct iwm_node *in = (void *)ic->ic_bss;
6608 	int i, s = splnet();
6609 
6610 	rw_assert_wrlock(&sc->ioctl_rwl);
6611 
6612 	sc->sc_flags |= IWM_FLAG_SHUTDOWN; /* Disallow new tasks. */
6613 
6614 	/* Cancel scheduled tasks and let any stale tasks finish up. */
6615 	task_del(systq, &sc->init_task);
6616 	iwm_del_task(sc, sc->sc_nswq, &sc->newstate_task);
6617 	iwm_del_task(sc, systq, &sc->ba_task);
6618 	iwm_del_task(sc, systq, &sc->htprot_task);
6619 	KASSERT(sc->task_refs.refs >= 1);
6620 	refcnt_finalize(&sc->task_refs, "iwmstop");
6621 
6622 	iwm_stop_device(sc);
6623 
6624 	/* Reset soft state. */
6625 
6626 	sc->sc_generation++;
6627 	for (i = 0; i < nitems(sc->sc_cmd_resp_pkt); i++) {
6628 		free(sc->sc_cmd_resp_pkt[i], M_DEVBUF, sc->sc_cmd_resp_len[i]);
6629 		sc->sc_cmd_resp_pkt[i] = NULL;
6630 		sc->sc_cmd_resp_len[i] = 0;
6631 	}
6632 	ifp->if_flags &= ~IFF_RUNNING;
6633 	ifq_clr_oactive(&ifp->if_snd);
6634 
6635 	in->in_phyctxt = NULL;
6636 	if (ic->ic_state == IEEE80211_S_RUN)
6637 		ieee80211_mira_cancel_timeouts(&in->in_mn); /* XXX refcount? */
6638 
6639 	sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
6640 	sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
6641 	sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
6642 	sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
6643 	sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
6644 	sc->sc_flags &= ~IWM_FLAG_HW_ERR;
6645 	sc->sc_flags &= ~IWM_FLAG_SHUTDOWN;
6646 
6647 	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
6648 
6649 	timeout_del(&sc->sc_calib_to); /* XXX refcount? */
6650 	iwm_led_blink_stop(sc);
6651 	ifp->if_timer = sc->sc_tx_timer = 0;
6652 
6653 	splx(s);
6654 }
6655 
6656 void
6657 iwm_watchdog(struct ifnet *ifp)
6658 {
6659 	struct iwm_softc *sc = ifp->if_softc;
6660 
6661 	ifp->if_timer = 0;
6662 	if (sc->sc_tx_timer > 0) {
6663 		if (--sc->sc_tx_timer == 0) {
6664 			printf("%s: device timeout\n", DEVNAME(sc));
6665 #ifdef IWM_DEBUG
6666 			iwm_nic_error(sc);
6667 #endif
6668 			if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
6669 				task_add(systq, &sc->init_task);
6670 			ifp->if_oerrors++;
6671 			return;
6672 		}
6673 		ifp->if_timer = 1;
6674 	}
6675 
6676 	ieee80211_watchdog(ifp);
6677 }
6678 
6679 int
6680 iwm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
6681 {
6682 	struct iwm_softc *sc = ifp->if_softc;
6683 	int s, err = 0, generation = sc->sc_generation;
6684 
6685 	/*
6686 	 * Prevent processes from entering this function while another
6687 	 * process is tsleep'ing in it.
6688 	 */
6689 	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
6690 	if (err == 0 && generation != sc->sc_generation) {
6691 		rw_exit(&sc->ioctl_rwl);
6692 		return ENXIO;
6693 	}
6694 	if (err)
6695 		return err;
6696 	s = splnet();
6697 
6698 	switch (cmd) {
6699 	case SIOCSIFADDR:
6700 		ifp->if_flags |= IFF_UP;
6701 		/* FALLTHROUGH */
6702 	case SIOCSIFFLAGS:
6703 		if (ifp->if_flags & IFF_UP) {
6704 			if (!(ifp->if_flags & IFF_RUNNING)) {
6705 				err = iwm_init(ifp);
6706 			}
6707 		} else {
6708 			if (ifp->if_flags & IFF_RUNNING)
6709 				iwm_stop(ifp);
6710 		}
6711 		break;
6712 
6713 	default:
6714 		err = ieee80211_ioctl(ifp, cmd, data);
6715 	}
6716 
6717 	if (err == ENETRESET) {
6718 		err = 0;
6719 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6720 		    (IFF_UP | IFF_RUNNING)) {
6721 			iwm_stop(ifp);
6722 			err = iwm_init(ifp);
6723 		}
6724 	}
6725 
6726 	splx(s);
6727 	rw_exit(&sc->ioctl_rwl);
6728 
6729 	return err;
6730 }
6731 
6732 #ifdef IWM_DEBUG
6733 /*
6734  * Note: This structure is read from the device with IO accesses,
6735  * and the reading already does the endian conversion. As it is
6736  * read with uint32_t-sized accesses, any members with a different size
6737  * need to be ordered correctly though!
6738  */
6739 struct iwm_error_event_table {
6740 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
6741 	uint32_t error_id;		/* type of error */
6742 	uint32_t trm_hw_status0;	/* TRM HW status */
6743 	uint32_t trm_hw_status1;	/* TRM HW status */
6744 	uint32_t blink2;		/* branch link */
6745 	uint32_t ilink1;		/* interrupt link */
6746 	uint32_t ilink2;		/* interrupt link */
6747 	uint32_t data1;		/* error-specific data */
6748 	uint32_t data2;		/* error-specific data */
6749 	uint32_t data3;		/* error-specific data */
6750 	uint32_t bcon_time;		/* beacon timer */
6751 	uint32_t tsf_low;		/* network timestamp function timer */
6752 	uint32_t tsf_hi;		/* network timestamp function timer */
6753 	uint32_t gp1;		/* GP1 timer register */
6754 	uint32_t gp2;		/* GP2 timer register */
6755 	uint32_t fw_rev_type;	/* firmware revision type */
6756 	uint32_t major;		/* uCode version major */
6757 	uint32_t minor;		/* uCode version minor */
6758 	uint32_t hw_ver;		/* HW Silicon version */
6759 	uint32_t brd_ver;		/* HW board version */
6760 	uint32_t log_pc;		/* log program counter */
6761 	uint32_t frame_ptr;		/* frame pointer */
6762 	uint32_t stack_ptr;		/* stack pointer */
6763 	uint32_t hcmd;		/* last host command header */
6764 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
6765 				 * rxtx_flag */
6766 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
6767 				 * host_flag */
6768 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
6769 				 * enc_flag */
6770 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
6771 				 * time_flag */
6772 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
6773 				 * wico interrupt */
6774 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
6775 	uint32_t wait_event;		/* wait event() caller address */
6776 	uint32_t l2p_control;	/* L2pControlField */
6777 	uint32_t l2p_duration;	/* L2pDurationField */
6778 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
6779 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
6780 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
6781 				 * (LMPM_PMG_SEL) */
6782 	uint32_t u_timestamp;	/* indicate when the date and time of the
6783 				 * compilation */
6784 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
6785 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
6786 
6787 /*
6788  * UMAC error struct - relevant starting from family 8000 chip.
6789  * Note: This structure is read from the device with IO accesses,
6790  * and the reading already does the endian conversion. As it is
6791  * read with u32-sized accesses, any members with a different size
6792  * need to be ordered correctly though!
6793  */
6794 struct iwm_umac_error_event_table {
6795 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
6796 	uint32_t error_id;	/* type of error */
6797 	uint32_t blink1;	/* branch link */
6798 	uint32_t blink2;	/* branch link */
6799 	uint32_t ilink1;	/* interrupt link */
6800 	uint32_t ilink2;	/* interrupt link */
6801 	uint32_t data1;		/* error-specific data */
6802 	uint32_t data2;		/* error-specific data */
6803 	uint32_t data3;		/* error-specific data */
6804 	uint32_t umac_major;
6805 	uint32_t umac_minor;
6806 	uint32_t frame_pointer;	/* core register 27*/
6807 	uint32_t stack_pointer;	/* core register 28 */
6808 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
6809 	uint32_t nic_isr_pref;	/* ISR status register */
6810 } __packed;
6811 
6812 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
6813 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
6814 
6815 void
6816 iwm_nic_umac_error(struct iwm_softc *sc)
6817 {
6818 	struct iwm_umac_error_event_table table;
6819 	uint32_t base;
6820 
6821 	base = sc->sc_uc.uc_umac_error_event_table;
6822 
6823 	if (base < 0x800000) {
6824 		printf("%s: Invalid error log pointer 0x%08x\n",
6825 		    DEVNAME(sc), base);
6826 		return;
6827 	}
6828 
6829 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
6830 		printf("%s: reading errlog failed\n", DEVNAME(sc));
6831 		return;
6832 	}
6833 
6834 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
6835 		printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
6836 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
6837 			sc->sc_flags, table.valid);
6838 	}
6839 
6840 	printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
6841 		iwm_desc_lookup(table.error_id));
6842 	printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
6843 	printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
6844 	printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
6845 	printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
6846 	printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
6847 	printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
6848 	printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
6849 	printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
6850 	printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
6851 	printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
6852 	    table.frame_pointer);
6853 	printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
6854 	    table.stack_pointer);
6855 	printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
6856 	printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
6857 	    table.nic_isr_pref);
6858 }
6859 
6860 struct {
6861 	const char *name;
6862 	uint8_t num;
6863 } advanced_lookup[] = {
6864 	{ "NMI_INTERRUPT_WDG", 0x34 },
6865 	{ "SYSASSERT", 0x35 },
6866 	{ "UCODE_VERSION_MISMATCH", 0x37 },
6867 	{ "BAD_COMMAND", 0x38 },
6868 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
6869 	{ "FATAL_ERROR", 0x3D },
6870 	{ "NMI_TRM_HW_ERR", 0x46 },
6871 	{ "NMI_INTERRUPT_TRM", 0x4C },
6872 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
6873 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
6874 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
6875 	{ "NMI_INTERRUPT_HOST", 0x66 },
6876 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
6877 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
6878 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
6879 	{ "ADVANCED_SYSASSERT", 0 },
6880 };
6881 
6882 const char *
6883 iwm_desc_lookup(uint32_t num)
6884 {
6885 	int i;
6886 
6887 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
6888 		if (advanced_lookup[i].num == num)
6889 			return advanced_lookup[i].name;
6890 
6891 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
6892 	return advanced_lookup[i].name;
6893 }
6894 
6895 /*
6896  * Support for dumping the error log seemed like a good idea ...
6897  * but it's mostly hex junk and the only sensible thing is the
6898  * hw/ucode revision (which we know anyway).  Since it's here,
6899  * I'll just leave it in, just in case e.g. the Intel guys want to
6900  * help us decipher some "ADVANCED_SYSASSERT" later.
6901  */
6902 void
6903 iwm_nic_error(struct iwm_softc *sc)
6904 {
6905 	struct iwm_error_event_table table;
6906 	uint32_t base;
6907 
6908 	printf("%s: dumping device error log\n", DEVNAME(sc));
6909 	base = sc->sc_uc.uc_error_event_table;
6910 	if (base < 0x800000) {
6911 		printf("%s: Invalid error log pointer 0x%08x\n",
6912 		    DEVNAME(sc), base);
6913 		return;
6914 	}
6915 
6916 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
6917 		printf("%s: reading errlog failed\n", DEVNAME(sc));
6918 		return;
6919 	}
6920 
6921 	if (!table.valid) {
6922 		printf("%s: errlog not found, skipping\n", DEVNAME(sc));
6923 		return;
6924 	}
6925 
6926 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
6927 		printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
6928 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
6929 		    sc->sc_flags, table.valid);
6930 	}
6931 
6932 	printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
6933 	    iwm_desc_lookup(table.error_id));
6934 	printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
6935 	    table.trm_hw_status0);
6936 	printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
6937 	    table.trm_hw_status1);
6938 	printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
6939 	printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
6940 	printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
6941 	printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
6942 	printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
6943 	printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
6944 	printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
6945 	printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
6946 	printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
6947 	printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
6948 	printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
6949 	printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
6950 	    table.fw_rev_type);
6951 	printf("%s: %08X | uCode version major\n", DEVNAME(sc),
6952 	    table.major);
6953 	printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
6954 	    table.minor);
6955 	printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
6956 	printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
6957 	printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
6958 	printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
6959 	printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
6960 	printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
6961 	printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
6962 	printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
6963 	printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
6964 	printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
6965 	printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
6966 	printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
6967 	printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
6968 	printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
6969 	printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
6970 	printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
6971 	printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
6972 
6973 	if (sc->sc_uc.uc_umac_error_event_table)
6974 		iwm_nic_umac_error(sc);
6975 }
6976 #endif
6977 
6978 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
6979 do {									\
6980 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
6981 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
6982 	_var_ = (void *)((_pkt_)+1);					\
6983 } while (/*CONSTCOND*/0)
6984 
6985 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
6986 do {									\
6987 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
6988 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
6989 	_ptr_ = (void *)((_pkt_)+1);					\
6990 } while (/*CONSTCOND*/0)
6991 
6992 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
6993 
6994 void
6995 iwm_notif_intr(struct iwm_softc *sc)
6996 {
6997 	uint16_t hw;
6998 
6999 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
7000 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
7001 
7002 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
7003 	while (sc->rxq.cur != hw) {
7004 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
7005 		struct iwm_rx_packet *pkt;
7006 		int qid, idx, code;
7007 
7008 		bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
7009 		    BUS_DMASYNC_POSTREAD);
7010 		pkt = mtod(data->m, struct iwm_rx_packet *);
7011 
7012 		qid = pkt->hdr.qid & ~0x80;
7013 		idx = pkt->hdr.idx;
7014 
7015 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
7016 
7017 		/*
7018 		 * randomly get these from the firmware, no idea why.
7019 		 * they at least seem harmless, so just ignore them for now
7020 		 */
7021 		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
7022 		    || pkt->len_n_flags == htole32(0x55550000))) {
7023 			ADVANCE_RXQ(sc);
7024 			continue;
7025 		}
7026 
7027 		switch (code) {
7028 		case IWM_REPLY_RX_PHY_CMD:
7029 			iwm_rx_rx_phy_cmd(sc, pkt, data);
7030 			break;
7031 
7032 		case IWM_REPLY_RX_MPDU_CMD:
7033 			iwm_rx_rx_mpdu(sc, pkt, data);
7034 			break;
7035 
7036 		case IWM_TX_CMD:
7037 			iwm_rx_tx_cmd(sc, pkt, data);
7038 			break;
7039 
7040 		case IWM_MISSED_BEACONS_NOTIFICATION:
7041 			iwm_rx_bmiss(sc, pkt, data);
7042 			break;
7043 
7044 		case IWM_MFUART_LOAD_NOTIFICATION:
7045 			break;
7046 
7047 		case IWM_ALIVE: {
7048 			struct iwm_alive_resp_v1 *resp1;
7049 			struct iwm_alive_resp_v2 *resp2;
7050 			struct iwm_alive_resp_v3 *resp3;
7051 
7052 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
7053 				SYNC_RESP_STRUCT(resp1, pkt);
7054 				sc->sc_uc.uc_error_event_table
7055 				    = le32toh(resp1->error_event_table_ptr);
7056 				sc->sc_uc.uc_log_event_table
7057 				    = le32toh(resp1->log_event_table_ptr);
7058 				sc->sched_base = le32toh(resp1->scd_base_ptr);
7059 				if (resp1->status == IWM_ALIVE_STATUS_OK)
7060 					sc->sc_uc.uc_ok = 1;
7061 				else
7062 					sc->sc_uc.uc_ok = 0;
7063 			}
7064 
7065 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
7066 				SYNC_RESP_STRUCT(resp2, pkt);
7067 				sc->sc_uc.uc_error_event_table
7068 				    = le32toh(resp2->error_event_table_ptr);
7069 				sc->sc_uc.uc_log_event_table
7070 				    = le32toh(resp2->log_event_table_ptr);
7071 				sc->sched_base = le32toh(resp2->scd_base_ptr);
7072 				sc->sc_uc.uc_umac_error_event_table
7073 				    = le32toh(resp2->error_info_addr);
7074 				if (resp2->status == IWM_ALIVE_STATUS_OK)
7075 					sc->sc_uc.uc_ok = 1;
7076 				else
7077 					sc->sc_uc.uc_ok = 0;
7078 			}
7079 
7080 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
7081 				SYNC_RESP_STRUCT(resp3, pkt);
7082 				sc->sc_uc.uc_error_event_table
7083 				    = le32toh(resp3->error_event_table_ptr);
7084 				sc->sc_uc.uc_log_event_table
7085 				    = le32toh(resp3->log_event_table_ptr);
7086 				sc->sched_base = le32toh(resp3->scd_base_ptr);
7087 				sc->sc_uc.uc_umac_error_event_table
7088 				    = le32toh(resp3->error_info_addr);
7089 				if (resp3->status == IWM_ALIVE_STATUS_OK)
7090 					sc->sc_uc.uc_ok = 1;
7091 				else
7092 					sc->sc_uc.uc_ok = 0;
7093 			}
7094 
7095 			sc->sc_uc.uc_intr = 1;
7096 			wakeup(&sc->sc_uc);
7097 			break;
7098 		}
7099 
7100 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
7101 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
7102 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
7103 			iwm_phy_db_set_section(sc, phy_db_notif);
7104 			sc->sc_init_complete |= IWM_CALIB_COMPLETE;
7105 			wakeup(&sc->sc_init_complete);
7106 			break;
7107 		}
7108 
7109 		case IWM_STATISTICS_NOTIFICATION: {
7110 			struct iwm_notif_statistics *stats;
7111 			SYNC_RESP_STRUCT(stats, pkt);
7112 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
7113 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
7114 			break;
7115 		}
7116 
7117 		case IWM_MCC_CHUB_UPDATE_CMD: {
7118 			struct iwm_mcc_chub_notif *notif;
7119 			SYNC_RESP_STRUCT(notif, pkt);
7120 
7121 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
7122 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
7123 			sc->sc_fw_mcc[2] = '\0';
7124 		}
7125 
7126 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
7127 			break;
7128 
7129 		case IWM_PHY_CONFIGURATION_CMD:
7130 		case IWM_TX_ANT_CONFIGURATION_CMD:
7131 		case IWM_ADD_STA:
7132 		case IWM_MAC_CONTEXT_CMD:
7133 		case IWM_REPLY_SF_CFG_CMD:
7134 		case IWM_POWER_TABLE_CMD:
7135 		case IWM_PHY_CONTEXT_CMD:
7136 		case IWM_BINDING_CONTEXT_CMD:
7137 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
7138 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
7139 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
7140 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
7141 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
7142 		case IWM_REPLY_BEACON_FILTERING_CMD:
7143 		case IWM_MAC_PM_POWER_TABLE:
7144 		case IWM_TIME_QUOTA_CMD:
7145 		case IWM_REMOVE_STA:
7146 		case IWM_TXPATH_FLUSH:
7147 		case IWM_LQ_CMD:
7148 		case IWM_BT_CONFIG:
7149 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
7150 		case IWM_NVM_ACCESS_CMD:
7151 		case IWM_MCC_UPDATE_CMD:
7152 		case IWM_TIME_EVENT_CMD: {
7153 			size_t pkt_len;
7154 
7155 			if (sc->sc_cmd_resp_pkt[idx] == NULL)
7156 				break;
7157 
7158 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
7159 			    sizeof(*pkt), BUS_DMASYNC_POSTREAD);
7160 
7161 			pkt_len = sizeof(pkt->len_n_flags) +
7162 			    iwm_rx_packet_len(pkt);
7163 
7164 			if ((pkt->hdr.flags & IWM_CMD_FAILED_MSK) ||
7165 			    pkt_len < sizeof(*pkt) ||
7166 			    pkt_len > sc->sc_cmd_resp_len[idx]) {
7167 				free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
7168 				    sc->sc_cmd_resp_len[idx]);
7169 				sc->sc_cmd_resp_pkt[idx] = NULL;
7170 				break;
7171 			}
7172 
7173 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
7174 			    pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD);
7175 			memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
7176 			break;
7177 		}
7178 
7179 		/* ignore */
7180 		case 0x6c: /* IWM_PHY_DB_CMD */
7181 			break;
7182 
7183 		case IWM_INIT_COMPLETE_NOTIF:
7184 			sc->sc_init_complete |= IWM_INIT_COMPLETE;
7185 			wakeup(&sc->sc_init_complete);
7186 			break;
7187 
7188 		case IWM_SCAN_OFFLOAD_COMPLETE: {
7189 			struct iwm_periodic_scan_complete *notif;
7190 			SYNC_RESP_STRUCT(notif, pkt);
7191 			break;
7192 		}
7193 
7194 		case IWM_SCAN_ITERATION_COMPLETE: {
7195 			struct iwm_lmac_scan_complete_notif *notif;
7196 			SYNC_RESP_STRUCT(notif, pkt);
7197 			iwm_endscan(sc);
7198 			break;
7199 		}
7200 
7201 		case IWM_SCAN_COMPLETE_UMAC: {
7202 			struct iwm_umac_scan_complete *notif;
7203 			SYNC_RESP_STRUCT(notif, pkt);
7204 			iwm_endscan(sc);
7205 			break;
7206 		}
7207 
7208 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
7209 			struct iwm_umac_scan_iter_complete_notif *notif;
7210 			SYNC_RESP_STRUCT(notif, pkt);
7211 			iwm_endscan(sc);
7212 			break;
7213 		}
7214 
7215 		case IWM_REPLY_ERROR: {
7216 			struct iwm_error_resp *resp;
7217 			SYNC_RESP_STRUCT(resp, pkt);
7218 			printf("%s: firmware error 0x%x, cmd 0x%x\n",
7219 				DEVNAME(sc), le32toh(resp->error_type),
7220 				resp->cmd_id);
7221 			break;
7222 		}
7223 
7224 		case IWM_TIME_EVENT_NOTIFICATION: {
7225 			struct iwm_time_event_notif *notif;
7226 			uint32_t action;
7227 			SYNC_RESP_STRUCT(notif, pkt);
7228 
7229 			if (sc->sc_time_event_uid != le32toh(notif->unique_id))
7230 				break;
7231 			action = le32toh(notif->action);
7232 			if (action & IWM_TE_V2_NOTIF_HOST_EVENT_END)
7233 				sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
7234 			break;
7235 		}
7236 
7237 		case IWM_WIDE_ID(IWM_SYSTEM_GROUP,
7238 		    IWM_FSEQ_VER_MISMATCH_NOTIFICATION):
7239 		    break;
7240 
7241 		/*
7242 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
7243 		 * messages. Just ignore them for now.
7244 		 */
7245 		case IWM_DEBUG_LOG_MSG:
7246 			break;
7247 
7248 		case IWM_MCAST_FILTER_CMD:
7249 			break;
7250 
7251 		case IWM_SCD_QUEUE_CFG: {
7252 			struct iwm_scd_txq_cfg_rsp *rsp;
7253 			SYNC_RESP_STRUCT(rsp, pkt);
7254 
7255 			break;
7256 		}
7257 
7258 		default:
7259 			printf("%s: unhandled firmware response 0x%x/0x%x "
7260 			    "rx ring %d[%d]\n",
7261 			    DEVNAME(sc), pkt->hdr.code, pkt->len_n_flags, qid,
7262 			    idx);
7263 			break;
7264 		}
7265 
7266 		/*
7267 		 * uCode sets bit 0x80 when it originates the notification,
7268 		 * i.e. when the notification is not a direct response to a
7269 		 * command sent by the driver.
7270 		 * For example, uCode issues IWM_REPLY_RX when it sends a
7271 		 * received frame to the driver.
7272 		 */
7273 		if (!(pkt->hdr.qid & (1 << 7))) {
7274 			iwm_cmd_done(sc, pkt);
7275 		}
7276 
7277 		ADVANCE_RXQ(sc);
7278 	}
7279 
7280 	/*
7281 	 * Tell the firmware what we have processed.
7282 	 * Seems like the hardware gets upset unless we align the write by 8??
7283 	 */
7284 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
7285 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
7286 }
7287 
7288 int
7289 iwm_intr(void *arg)
7290 {
7291 	struct iwm_softc *sc = arg;
7292 	int handled = 0;
7293 	int r1, r2, rv = 0;
7294 	int isperiodic = 0;
7295 
7296 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
7297 
7298 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
7299 		uint32_t *ict = sc->ict_dma.vaddr;
7300 		int tmp;
7301 
7302 		tmp = htole32(ict[sc->ict_cur]);
7303 		if (!tmp)
7304 			goto out_ena;
7305 
7306 		/*
7307 		 * ok, there was something.  keep plowing until we have all.
7308 		 */
7309 		r1 = r2 = 0;
7310 		while (tmp) {
7311 			r1 |= tmp;
7312 			ict[sc->ict_cur] = 0;
7313 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
7314 			tmp = htole32(ict[sc->ict_cur]);
7315 		}
7316 
7317 		/* this is where the fun begins.  don't ask */
7318 		if (r1 == 0xffffffff)
7319 			r1 = 0;
7320 
7321 		/* i am not expected to understand this */
7322 		if (r1 & 0xc0000)
7323 			r1 |= 0x8000;
7324 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
7325 	} else {
7326 		r1 = IWM_READ(sc, IWM_CSR_INT);
7327 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
7328 			goto out;
7329 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
7330 	}
7331 	if (r1 == 0 && r2 == 0) {
7332 		goto out_ena;
7333 	}
7334 
7335 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
7336 
7337 	/* ignored */
7338 	handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
7339 
7340 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
7341 		handled |= IWM_CSR_INT_BIT_RF_KILL;
7342 		iwm_check_rfkill(sc);
7343 		task_add(systq, &sc->init_task);
7344 		rv = 1;
7345 		goto out_ena;
7346 	}
7347 
7348 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
7349 #ifdef IWM_DEBUG
7350 		int i;
7351 
7352 		iwm_nic_error(sc);
7353 
7354 		/* Dump driver status (TX and RX rings) while we're here. */
7355 		DPRINTF(("driver status:\n"));
7356 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
7357 			struct iwm_tx_ring *ring = &sc->txq[i];
7358 			DPRINTF(("  tx ring %2d: qid=%-2d cur=%-3d "
7359 			    "queued=%-3d\n",
7360 			    i, ring->qid, ring->cur, ring->queued));
7361 		}
7362 		DPRINTF(("  rx ring: cur=%d\n", sc->rxq.cur));
7363 		DPRINTF(("  802.11 state %s\n",
7364 		    ieee80211_state_name[sc->sc_ic.ic_state]));
7365 #endif
7366 
7367 		printf("%s: fatal firmware error\n", DEVNAME(sc));
7368 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
7369 			task_add(systq, &sc->init_task);
7370 		rv = 1;
7371 		goto out;
7372 
7373 	}
7374 
7375 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
7376 		handled |= IWM_CSR_INT_BIT_HW_ERR;
7377 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
7378 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
7379 			sc->sc_flags |= IWM_FLAG_HW_ERR;
7380 			task_add(systq, &sc->init_task);
7381 		}
7382 		rv = 1;
7383 		goto out;
7384 	}
7385 
7386 	/* firmware chunk loaded */
7387 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
7388 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
7389 		handled |= IWM_CSR_INT_BIT_FH_TX;
7390 
7391 		sc->sc_fw_chunk_done = 1;
7392 		wakeup(&sc->sc_fw);
7393 	}
7394 
7395 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
7396 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
7397 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
7398 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
7399 			IWM_WRITE_1(sc,
7400 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
7401 		isperiodic = 1;
7402 	}
7403 
7404 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) ||
7405 	    isperiodic) {
7406 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
7407 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
7408 
7409 		iwm_notif_intr(sc);
7410 
7411 		/* enable periodic interrupt, see above */
7412 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) &&
7413 		    !isperiodic)
7414 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
7415 			    IWM_CSR_INT_PERIODIC_ENA);
7416 	}
7417 
7418 	rv = 1;
7419 
7420  out_ena:
7421 	iwm_restore_interrupts(sc);
7422  out:
7423 	return rv;
7424 }
7425 
7426 typedef void *iwm_match_t;
7427 
7428 static const struct pci_matchid iwm_devices[] = {
7429 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_1 },
7430 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_2 },
7431 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3165_1 },
7432 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3165_2 },
7433 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3168_1 },
7434 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_1 },
7435 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_2 },
7436 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_1 },
7437 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_2 },
7438 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_1 },
7439 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_2 },
7440 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8265_1 },
7441 };
7442 
7443 int
7444 iwm_match(struct device *parent, iwm_match_t match __unused, void *aux)
7445 {
7446 	return pci_matchbyid((struct pci_attach_args *)aux, iwm_devices,
7447 	    nitems(iwm_devices));
7448 }
7449 
7450 int
7451 iwm_preinit(struct iwm_softc *sc)
7452 {
7453 	struct ieee80211com *ic = &sc->sc_ic;
7454 	struct ifnet *ifp = IC2IFP(ic);
7455 	int err;
7456 	static int attached;
7457 
7458 	err = iwm_prepare_card_hw(sc);
7459 	if (err) {
7460 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
7461 		return err;
7462 	}
7463 
7464 	if (attached) {
7465 		/* Update MAC in case the upper layers changed it. */
7466 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
7467 		    ((struct arpcom *)ifp)->ac_enaddr);
7468 		return 0;
7469 	}
7470 
7471 	err = iwm_start_hw(sc);
7472 	if (err) {
7473 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
7474 		return err;
7475 	}
7476 
7477 	err = iwm_run_init_mvm_ucode(sc, 1);
7478 	iwm_stop_device(sc);
7479 	if (err)
7480 		return err;
7481 
7482 	/* Print version info and MAC address on first successful fw load. */
7483 	attached = 1;
7484 	printf("%s: hw rev 0x%x, fw ver %s, address %s\n",
7485 	    DEVNAME(sc), sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
7486 	    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
7487 
7488 	if (sc->sc_nvm.sku_cap_11n_enable)
7489 		iwm_setup_ht_rates(sc);
7490 
7491 	/* not all hardware can do 5GHz band */
7492 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
7493 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
7494 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
7495 
7496 	/* Configure channel information obtained from firmware. */
7497 	ieee80211_channel_init(ifp);
7498 
7499 	/* Configure MAC address. */
7500 	err = if_setlladdr(ifp, ic->ic_myaddr);
7501 	if (err)
7502 		printf("%s: could not set MAC address (error %d)\n",
7503 		    DEVNAME(sc), err);
7504 
7505 	ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
7506 
7507 	return 0;
7508 }
7509 
7510 void
7511 iwm_attach_hook(struct device *self)
7512 {
7513 	struct iwm_softc *sc = (void *)self;
7514 
7515 	KASSERT(!cold);
7516 
7517 	iwm_preinit(sc);
7518 }
7519 
7520 void
7521 iwm_attach(struct device *parent, struct device *self, void *aux)
7522 {
7523 	struct iwm_softc *sc = (void *)self;
7524 	struct pci_attach_args *pa = aux;
7525 	pci_intr_handle_t ih;
7526 	pcireg_t reg, memtype;
7527 	struct ieee80211com *ic = &sc->sc_ic;
7528 	struct ifnet *ifp = &ic->ic_if;
7529 	const char *intrstr;
7530 	int err;
7531 	int txq_i, i;
7532 
7533 	sc->sc_pct = pa->pa_pc;
7534 	sc->sc_pcitag = pa->pa_tag;
7535 	sc->sc_dmat = pa->pa_dmat;
7536 
7537 	rw_init(&sc->ioctl_rwl, "iwmioctl");
7538 
7539 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
7540 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
7541 	if (err == 0) {
7542 		printf("%s: PCIe capability structure not found!\n",
7543 		    DEVNAME(sc));
7544 		return;
7545 	}
7546 
7547 	/* Clear device-specific "PCI retry timeout" register (41h). */
7548 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7549 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7550 
7551 	/* Enable bus-mastering and hardware bug workaround. */
7552 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7553 	reg |= PCI_COMMAND_MASTER_ENABLE;
7554 	/* if !MSI */
7555 	if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
7556 		reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
7557 	}
7558 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7559 
7560 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
7561 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
7562 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
7563 	if (err) {
7564 		printf("%s: can't map mem space\n", DEVNAME(sc));
7565 		return;
7566 	}
7567 
7568 	if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
7569 		printf("%s: can't map interrupt\n", DEVNAME(sc));
7570 		return;
7571 	}
7572 
7573 	intrstr = pci_intr_string(sc->sc_pct, ih);
7574 	sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET, iwm_intr, sc,
7575 	    DEVNAME(sc));
7576 
7577 	if (sc->sc_ih == NULL) {
7578 		printf("\n");
7579 		printf("%s: can't establish interrupt", DEVNAME(sc));
7580 		if (intrstr != NULL)
7581 			printf(" at %s", intrstr);
7582 		printf("\n");
7583 		return;
7584 	}
7585 	printf(", %s\n", intrstr);
7586 
7587 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
7588 	switch (PCI_PRODUCT(pa->pa_id)) {
7589 	case PCI_PRODUCT_INTEL_WL_3160_1:
7590 	case PCI_PRODUCT_INTEL_WL_3160_2:
7591 		sc->sc_fwname = "iwm-3160-16";
7592 		sc->host_interrupt_operation_mode = 1;
7593 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7594 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7595 		break;
7596 	case PCI_PRODUCT_INTEL_WL_3165_1:
7597 	case PCI_PRODUCT_INTEL_WL_3165_2:
7598 		sc->sc_fwname = "iwm-7265-16";
7599 		sc->host_interrupt_operation_mode = 0;
7600 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7601 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7602 		break;
7603 	case PCI_PRODUCT_INTEL_WL_3168_1:
7604 		sc->sc_fwname = "iwm-3168-22";
7605 		sc->host_interrupt_operation_mode = 0;
7606 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7607 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7608 		break;
7609 	case PCI_PRODUCT_INTEL_WL_7260_1:
7610 	case PCI_PRODUCT_INTEL_WL_7260_2:
7611 		sc->sc_fwname = "iwm-7260-16";
7612 		sc->host_interrupt_operation_mode = 1;
7613 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7614 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7615 		break;
7616 	case PCI_PRODUCT_INTEL_WL_7265_1:
7617 	case PCI_PRODUCT_INTEL_WL_7265_2:
7618 		sc->sc_fwname = "iwm-7265-16";
7619 		sc->host_interrupt_operation_mode = 0;
7620 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7621 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7622 		break;
7623 	case PCI_PRODUCT_INTEL_WL_8260_1:
7624 	case PCI_PRODUCT_INTEL_WL_8260_2:
7625 		sc->sc_fwname = "iwm-8000C-16";
7626 		sc->host_interrupt_operation_mode = 0;
7627 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
7628 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
7629 		break;
7630 	case PCI_PRODUCT_INTEL_WL_8265_1:
7631 		sc->sc_fwname = "iwm-8265-22";
7632 		sc->host_interrupt_operation_mode = 0;
7633 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
7634 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
7635 		break;
7636 	default:
7637 		printf("%s: unknown adapter type\n", DEVNAME(sc));
7638 		return;
7639 	}
7640 
7641 	/*
7642 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
7643 	 * changed, and now the revision step also includes bit 0-1 (no more
7644 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
7645 	 * in the old format.
7646 	 */
7647 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
7648 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
7649 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
7650 
7651 	if (iwm_prepare_card_hw(sc) != 0) {
7652 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
7653 		return;
7654 	}
7655 
7656 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
7657 		uint32_t hw_step;
7658 
7659 		/*
7660 		 * In order to recognize C step the driver should read the
7661 		 * chip version id located at the AUX bus MISC address.
7662 		 */
7663 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
7664 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
7665 		DELAY(2);
7666 
7667 		err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
7668 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7669 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7670 				   25000);
7671 		if (!err) {
7672 			printf("%s: Failed to wake up the nic\n", DEVNAME(sc));
7673 			return;
7674 		}
7675 
7676 		if (iwm_nic_lock(sc)) {
7677 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
7678 			hw_step |= IWM_ENABLE_WFPM;
7679 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
7680 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
7681 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
7682 			if (hw_step == 0x3)
7683 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
7684 						(IWM_SILICON_C_STEP << 2);
7685 			iwm_nic_unlock(sc);
7686 		} else {
7687 			printf("%s: Failed to lock the nic\n", DEVNAME(sc));
7688 			return;
7689 		}
7690 	}
7691 
7692 	/*
7693 	 * Allocate DMA memory for firmware transfers.
7694 	 * Must be aligned on a 16-byte boundary.
7695 	 */
7696 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
7697 	    sc->sc_fwdmasegsz, 16);
7698 	if (err) {
7699 		printf("%s: could not allocate memory for firmware\n",
7700 		    DEVNAME(sc));
7701 		return;
7702 	}
7703 
7704 	/* Allocate "Keep Warm" page, used internally by the card. */
7705 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
7706 	if (err) {
7707 		printf("%s: could not allocate keep warm page\n", DEVNAME(sc));
7708 		goto fail1;
7709 	}
7710 
7711 	/* Allocate interrupt cause table (ICT).*/
7712 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
7713 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
7714 	if (err) {
7715 		printf("%s: could not allocate ICT table\n", DEVNAME(sc));
7716 		goto fail2;
7717 	}
7718 
7719 	/* TX scheduler rings must be aligned on a 1KB boundary. */
7720 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
7721 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
7722 	if (err) {
7723 		printf("%s: could not allocate TX scheduler rings\n",
7724 		    DEVNAME(sc));
7725 		goto fail3;
7726 	}
7727 
7728 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
7729 		err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
7730 		if (err) {
7731 			printf("%s: could not allocate TX ring %d\n",
7732 			    DEVNAME(sc), txq_i);
7733 			goto fail4;
7734 		}
7735 	}
7736 
7737 	err = iwm_alloc_rx_ring(sc, &sc->rxq);
7738 	if (err) {
7739 		printf("%s: could not allocate RX ring\n", DEVNAME(sc));
7740 		goto fail4;
7741 	}
7742 
7743 	sc->sc_nswq = taskq_create("iwmns", 1, IPL_NET, 0);
7744 	if (sc->sc_nswq == NULL)
7745 		goto fail4;
7746 
7747 	/* Clear pending interrupts. */
7748 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
7749 
7750 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
7751 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
7752 	ic->ic_state = IEEE80211_S_INIT;
7753 
7754 	/* Set device capabilities. */
7755 	ic->ic_caps =
7756 	    IEEE80211_C_WEP |		/* WEP */
7757 	    IEEE80211_C_RSN |		/* WPA/RSN */
7758 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
7759 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
7760 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
7761 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
7762 
7763 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
7764 	ic->ic_htcaps |=
7765 	    (IEEE80211_HTCAP_SMPS_DIS << IEEE80211_HTCAP_SMPS_SHIFT);
7766 	ic->ic_htxcaps = 0;
7767 	ic->ic_txbfcaps = 0;
7768 	ic->ic_aselcaps = 0;
7769 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
7770 
7771 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
7772 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
7773 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
7774 
7775 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
7776 		sc->sc_phyctxt[i].id = i;
7777 	}
7778 
7779 	sc->sc_amrr.amrr_min_success_threshold =  1;
7780 	sc->sc_amrr.amrr_max_success_threshold = 15;
7781 
7782 	/* IBSS channel undefined for now. */
7783 	ic->ic_ibss_chan = &ic->ic_channels[1];
7784 
7785 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
7786 
7787 	ifp->if_softc = sc;
7788 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
7789 	ifp->if_ioctl = iwm_ioctl;
7790 	ifp->if_start = iwm_start;
7791 	ifp->if_watchdog = iwm_watchdog;
7792 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
7793 
7794 	if_attach(ifp);
7795 	ieee80211_ifattach(ifp);
7796 	ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
7797 
7798 #if NBPFILTER > 0
7799 	iwm_radiotap_attach(sc);
7800 #endif
7801 	timeout_set(&sc->sc_calib_to, iwm_calib_timeout, sc);
7802 	timeout_set(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
7803 	task_set(&sc->init_task, iwm_init_task, sc);
7804 	task_set(&sc->newstate_task, iwm_newstate_task, sc);
7805 	task_set(&sc->ba_task, iwm_ba_task, sc);
7806 	task_set(&sc->htprot_task, iwm_htprot_task, sc);
7807 
7808 	ic->ic_node_alloc = iwm_node_alloc;
7809 	ic->ic_bgscan_start = iwm_bgscan;
7810 
7811 	/* Override 802.11 state transition machine. */
7812 	sc->sc_newstate = ic->ic_newstate;
7813 	ic->ic_newstate = iwm_newstate;
7814 	ic->ic_update_htprot = iwm_update_htprot;
7815 	ic->ic_ampdu_rx_start = iwm_ampdu_rx_start;
7816 	ic->ic_ampdu_rx_stop = iwm_ampdu_rx_stop;
7817 #ifdef notyet
7818 	ic->ic_ampdu_tx_start = iwm_ampdu_tx_start;
7819 	ic->ic_ampdu_tx_stop = iwm_ampdu_tx_stop;
7820 #endif
7821 	/*
7822 	 * We cannot read the MAC address without loading the
7823 	 * firmware from disk. Postpone until mountroot is done.
7824 	 */
7825 	config_mountroot(self, iwm_attach_hook);
7826 
7827 	return;
7828 
7829 fail4:	while (--txq_i >= 0)
7830 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
7831 	iwm_free_rx_ring(sc, &sc->rxq);
7832 	iwm_dma_contig_free(&sc->sched_dma);
7833 fail3:	if (sc->ict_dma.vaddr != NULL)
7834 		iwm_dma_contig_free(&sc->ict_dma);
7835 
7836 fail2:	iwm_dma_contig_free(&sc->kw_dma);
7837 fail1:	iwm_dma_contig_free(&sc->fw_dma);
7838 	return;
7839 }
7840 
7841 #if NBPFILTER > 0
7842 void
7843 iwm_radiotap_attach(struct iwm_softc *sc)
7844 {
7845 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
7846 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
7847 
7848 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
7849 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
7850 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
7851 
7852 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
7853 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
7854 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
7855 }
7856 #endif
7857 
7858 void
7859 iwm_init_task(void *arg1)
7860 {
7861 	struct iwm_softc *sc = arg1;
7862 	struct ifnet *ifp = &sc->sc_ic.ic_if;
7863 	int s = splnet();
7864 	int generation = sc->sc_generation;
7865 	int fatal = (sc->sc_flags & (IWM_FLAG_HW_ERR | IWM_FLAG_RFKILL));
7866 
7867 	rw_enter_write(&sc->ioctl_rwl);
7868 	if (generation != sc->sc_generation) {
7869 		rw_exit(&sc->ioctl_rwl);
7870 		splx(s);
7871 		return;
7872 	}
7873 
7874 	if (ifp->if_flags & IFF_RUNNING)
7875 		iwm_stop(ifp);
7876 	else
7877 		sc->sc_flags &= ~IWM_FLAG_HW_ERR;
7878 
7879 	if (!fatal && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
7880 		iwm_init(ifp);
7881 
7882 	rw_exit(&sc->ioctl_rwl);
7883 	splx(s);
7884 }
7885 
7886 int
7887 iwm_resume(struct iwm_softc *sc)
7888 {
7889 	pcireg_t reg;
7890 
7891 	/* Clear device-specific "PCI retry timeout" register (41h). */
7892 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7893 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7894 
7895 	iwm_enable_rfkill_int(sc);
7896 	iwm_check_rfkill(sc);
7897 
7898 	return iwm_prepare_card_hw(sc);
7899 }
7900 
7901 int
7902 iwm_activate(struct device *self, int act)
7903 {
7904 	struct iwm_softc *sc = (struct iwm_softc *)self;
7905 	struct ifnet *ifp = &sc->sc_ic.ic_if;
7906 	int err = 0;
7907 
7908 	switch (act) {
7909 	case DVACT_QUIESCE:
7910 		if (ifp->if_flags & IFF_RUNNING) {
7911 			rw_enter_write(&sc->ioctl_rwl);
7912 			iwm_stop(ifp);
7913 			rw_exit(&sc->ioctl_rwl);
7914 		}
7915 		break;
7916 	case DVACT_RESUME:
7917 		err = iwm_resume(sc);
7918 		if (err)
7919 			printf("%s: could not initialize hardware\n",
7920 			    DEVNAME(sc));
7921 		break;
7922 	case DVACT_WAKEUP:
7923 		/* Hardware should be up at this point. */
7924 		if (iwm_set_hw_ready(sc))
7925 			task_add(systq, &sc->init_task);
7926 		break;
7927 	}
7928 
7929 	return 0;
7930 }
7931 
7932 struct cfdriver iwm_cd = {
7933 	NULL, "iwm", DV_IFNET
7934 };
7935 
7936 struct cfattach iwm_ca = {
7937 	sizeof(struct iwm_softc), iwm_match, iwm_attach,
7938 	NULL, iwm_activate
7939 };
7940