xref: /openbsd-src/sys/dev/pci/if_iwm.c (revision f1dd7b858388b4a23f4f67a4957ec5ff656ebbe8)
1 /*	$OpenBSD: if_iwm.c,v 1.325 2021/05/16 15:10:20 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5  *   Author: Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2014 Fixup Software Ltd.
7  * Copyright (c) 2017 Stefan Sperling <stsp@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ***********************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
34  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35  * Copyright(c) 2016 Intel Deutschland GmbH
36  *
37  * This program is free software; you can redistribute it and/or modify
38  * it under the terms of version 2 of the GNU General Public License as
39  * published by the Free Software Foundation.
40  *
41  * This program is distributed in the hope that it will be useful, but
42  * WITHOUT ANY WARRANTY; without even the implied warranty of
43  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
44  * General Public License for more details.
45  *
46  * You should have received a copy of the GNU General Public License
47  * along with this program; if not, write to the Free Software
48  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49  * USA
50  *
51  * The full GNU General Public License is included in this distribution
52  * in the file called COPYING.
53  *
54  * Contact Information:
55  *  Intel Linux Wireless <ilw@linux.intel.com>
56  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57  *
58  *
59  * BSD LICENSE
60  *
61  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
62  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
63  * Copyright(c) 2016 Intel Deutschland GmbH
64  * All rights reserved.
65  *
66  * Redistribution and use in source and binary forms, with or without
67  * modification, are permitted provided that the following conditions
68  * are met:
69  *
70  *  * Redistributions of source code must retain the above copyright
71  *    notice, this list of conditions and the following disclaimer.
72  *  * Redistributions in binary form must reproduce the above copyright
73  *    notice, this list of conditions and the following disclaimer in
74  *    the documentation and/or other materials provided with the
75  *    distribution.
76  *  * Neither the name Intel Corporation nor the names of its
77  *    contributors may be used to endorse or promote products derived
78  *    from this software without specific prior written permission.
79  *
80  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
81  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
82  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
83  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
84  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
86  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
87  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
88  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
89  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
90  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91  */
92 
93 /*-
94  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
95  *
96  * Permission to use, copy, modify, and distribute this software for any
97  * purpose with or without fee is hereby granted, provided that the above
98  * copyright notice and this permission notice appear in all copies.
99  *
100  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
101  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
102  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
103  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
104  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
105  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
106  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
107  */
108 
109 #include "bpfilter.h"
110 
111 #include <sys/param.h>
112 #include <sys/conf.h>
113 #include <sys/kernel.h>
114 #include <sys/malloc.h>
115 #include <sys/mbuf.h>
116 #include <sys/mutex.h>
117 #include <sys/proc.h>
118 #include <sys/rwlock.h>
119 #include <sys/socket.h>
120 #include <sys/sockio.h>
121 #include <sys/systm.h>
122 #include <sys/endian.h>
123 
124 #include <sys/refcnt.h>
125 #include <sys/task.h>
126 #include <machine/bus.h>
127 #include <machine/intr.h>
128 
129 #include <dev/pci/pcireg.h>
130 #include <dev/pci/pcivar.h>
131 #include <dev/pci/pcidevs.h>
132 
133 #if NBPFILTER > 0
134 #include <net/bpf.h>
135 #endif
136 #include <net/if.h>
137 #include <net/if_dl.h>
138 #include <net/if_media.h>
139 
140 #include <netinet/in.h>
141 #include <netinet/if_ether.h>
142 
143 #include <net80211/ieee80211_var.h>
144 #include <net80211/ieee80211_amrr.h>
145 #include <net80211/ieee80211_ra.h>
146 #include <net80211/ieee80211_radiotap.h>
147 #include <net80211/ieee80211_priv.h> /* for SEQ_LT */
148 #undef DPRINTF /* defined in ieee80211_priv.h */
149 
150 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
151 
152 #define IC2IFP(_ic_) (&(_ic_)->ic_if)
153 
154 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
155 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
156 
157 #ifdef IWM_DEBUG
158 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
159 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
160 int iwm_debug = 1;
161 #else
162 #define DPRINTF(x)	do { ; } while (0)
163 #define DPRINTFN(n, x)	do { ; } while (0)
164 #endif
165 
166 #include <dev/pci/if_iwmreg.h>
167 #include <dev/pci/if_iwmvar.h>
168 
169 const uint8_t iwm_nvm_channels[] = {
170 	/* 2.4 GHz */
171 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
172 	/* 5 GHz */
173 	36, 40, 44 , 48, 52, 56, 60, 64,
174 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
175 	149, 153, 157, 161, 165
176 };
177 
178 const uint8_t iwm_nvm_channels_8000[] = {
179 	/* 2.4 GHz */
180 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
181 	/* 5 GHz */
182 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
183 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
184 	149, 153, 157, 161, 165, 169, 173, 177, 181
185 };
186 
187 #define IWM_NUM_2GHZ_CHANNELS	14
188 
189 const struct iwm_rate {
190 	uint16_t rate;
191 	uint8_t plcp;
192 	uint8_t ht_plcp;
193 } iwm_rates[] = {
194 		/* Legacy */		/* HT */
195 	{   2,	IWM_RATE_1M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
196 	{   4,	IWM_RATE_2M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
197 	{  11,	IWM_RATE_5M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
198 	{  22,	IWM_RATE_11M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
199 	{  12,	IWM_RATE_6M_PLCP,	IWM_RATE_HT_SISO_MCS_0_PLCP },
200 	{  18,	IWM_RATE_9M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
201 	{  24,	IWM_RATE_12M_PLCP,	IWM_RATE_HT_SISO_MCS_1_PLCP },
202 	{  26,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_8_PLCP },
203 	{  36,	IWM_RATE_18M_PLCP,	IWM_RATE_HT_SISO_MCS_2_PLCP },
204 	{  48,	IWM_RATE_24M_PLCP,	IWM_RATE_HT_SISO_MCS_3_PLCP },
205 	{  52,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_9_PLCP },
206 	{  72,	IWM_RATE_36M_PLCP,	IWM_RATE_HT_SISO_MCS_4_PLCP },
207 	{  78,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_10_PLCP },
208 	{  96,	IWM_RATE_48M_PLCP,	IWM_RATE_HT_SISO_MCS_5_PLCP },
209 	{ 104,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_11_PLCP },
210 	{ 108,	IWM_RATE_54M_PLCP,	IWM_RATE_HT_SISO_MCS_6_PLCP },
211 	{ 128,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_SISO_MCS_7_PLCP },
212 	{ 156,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_12_PLCP },
213 	{ 208,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_13_PLCP },
214 	{ 234,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_14_PLCP },
215 	{ 260,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_15_PLCP },
216 };
217 #define IWM_RIDX_CCK	0
218 #define IWM_RIDX_OFDM	4
219 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
220 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
221 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
222 #define IWM_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
223 
224 /* Convert an MCS index into an iwm_rates[] index. */
225 const int iwm_mcs2ridx[] = {
226 	IWM_RATE_MCS_0_INDEX,
227 	IWM_RATE_MCS_1_INDEX,
228 	IWM_RATE_MCS_2_INDEX,
229 	IWM_RATE_MCS_3_INDEX,
230 	IWM_RATE_MCS_4_INDEX,
231 	IWM_RATE_MCS_5_INDEX,
232 	IWM_RATE_MCS_6_INDEX,
233 	IWM_RATE_MCS_7_INDEX,
234 	IWM_RATE_MCS_8_INDEX,
235 	IWM_RATE_MCS_9_INDEX,
236 	IWM_RATE_MCS_10_INDEX,
237 	IWM_RATE_MCS_11_INDEX,
238 	IWM_RATE_MCS_12_INDEX,
239 	IWM_RATE_MCS_13_INDEX,
240 	IWM_RATE_MCS_14_INDEX,
241 	IWM_RATE_MCS_15_INDEX,
242 };
243 
244 struct iwm_nvm_section {
245 	uint16_t length;
246 	uint8_t *data;
247 };
248 
249 int	iwm_is_mimo_ht_plcp(uint8_t);
250 int	iwm_is_mimo_mcs(int);
251 int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
252 int	iwm_firmware_store_section(struct iwm_softc *, enum iwm_ucode_type,
253 	    uint8_t *, size_t);
254 int	iwm_set_default_calib(struct iwm_softc *, const void *);
255 void	iwm_fw_info_free(struct iwm_fw_info *);
256 int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
257 uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
258 void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
259 int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
260 int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
261 int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
262 int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
263 int	iwm_nic_lock(struct iwm_softc *);
264 void	iwm_nic_assert_locked(struct iwm_softc *);
265 void	iwm_nic_unlock(struct iwm_softc *);
266 void	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
267 	    uint32_t);
268 void	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
269 void	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
270 int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *, bus_size_t,
271 	    bus_size_t);
272 void	iwm_dma_contig_free(struct iwm_dma_info *);
273 int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
274 void	iwm_disable_rx_dma(struct iwm_softc *);
275 void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
276 void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
277 int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *, int);
278 void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
279 void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
280 void	iwm_enable_rfkill_int(struct iwm_softc *);
281 int	iwm_check_rfkill(struct iwm_softc *);
282 void	iwm_enable_interrupts(struct iwm_softc *);
283 void	iwm_enable_fwload_interrupt(struct iwm_softc *);
284 void	iwm_restore_interrupts(struct iwm_softc *);
285 void	iwm_disable_interrupts(struct iwm_softc *);
286 void	iwm_ict_reset(struct iwm_softc *);
287 int	iwm_set_hw_ready(struct iwm_softc *);
288 int	iwm_prepare_card_hw(struct iwm_softc *);
289 void	iwm_apm_config(struct iwm_softc *);
290 int	iwm_apm_init(struct iwm_softc *);
291 void	iwm_apm_stop(struct iwm_softc *);
292 int	iwm_allow_mcast(struct iwm_softc *);
293 void	iwm_init_msix_hw(struct iwm_softc *);
294 void	iwm_conf_msix_hw(struct iwm_softc *, int);
295 int	iwm_start_hw(struct iwm_softc *);
296 void	iwm_stop_device(struct iwm_softc *);
297 void	iwm_nic_config(struct iwm_softc *);
298 int	iwm_nic_rx_init(struct iwm_softc *);
299 int	iwm_nic_rx_legacy_init(struct iwm_softc *);
300 int	iwm_nic_rx_mq_init(struct iwm_softc *);
301 int	iwm_nic_tx_init(struct iwm_softc *);
302 int	iwm_nic_init(struct iwm_softc *);
303 int	iwm_enable_ac_txq(struct iwm_softc *, int, int);
304 int	iwm_enable_txq(struct iwm_softc *, int, int, int, int, uint8_t,
305 	    uint16_t);
306 int	iwm_post_alive(struct iwm_softc *);
307 struct iwm_phy_db_entry *iwm_phy_db_get_section(struct iwm_softc *, uint16_t,
308 	    uint16_t);
309 int	iwm_phy_db_set_section(struct iwm_softc *,
310 	    struct iwm_calib_res_notif_phy_db *);
311 int	iwm_is_valid_channel(uint16_t);
312 uint8_t	iwm_ch_id_to_ch_index(uint16_t);
313 uint16_t iwm_channel_id_to_papd(uint16_t);
314 uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
315 int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t, uint8_t **,
316 	    uint16_t *, uint16_t);
317 int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t, void *);
318 int	iwm_phy_db_send_all_channel_groups(struct iwm_softc *, uint16_t,
319 	    uint8_t);
320 int	iwm_send_phy_db_data(struct iwm_softc *);
321 void	iwm_protect_session(struct iwm_softc *, struct iwm_node *, uint32_t,
322 	    uint32_t);
323 void	iwm_unprotect_session(struct iwm_softc *, struct iwm_node *);
324 int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t, uint16_t,
325 	    uint8_t *, uint16_t *);
326 int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
327 	    uint16_t *, size_t);
328 void	iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
329 	    const uint8_t *nvm_channels, int nchan);
330 int	iwm_mimo_enabled(struct iwm_softc *);
331 void	iwm_setup_ht_rates(struct iwm_softc *);
332 void	iwm_mac_ctxt_task(void *);
333 void	iwm_updateprot(struct ieee80211com *);
334 void	iwm_updateslot(struct ieee80211com *);
335 void	iwm_updateedca(struct ieee80211com *);
336 void	iwm_init_reorder_buffer(struct iwm_reorder_buffer *, uint16_t,
337 	    uint16_t);
338 void	iwm_clear_reorder_buffer(struct iwm_softc *, struct iwm_rxba_data *);
339 int	iwm_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
340 	    uint8_t);
341 void	iwm_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
342 	    uint8_t);
343 void	iwm_rx_ba_session_expired(void *);
344 void	iwm_reorder_timer_expired(void *);
345 int	iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *, uint8_t,
346 	    uint16_t, uint16_t, int, int);
347 int	iwm_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
348 	    uint8_t);
349 void	iwm_ampdu_tx_stop(struct ieee80211com *, struct ieee80211_node *,
350 	    uint8_t);
351 void	iwm_ba_task(void *);
352 
353 int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
354 	    const uint16_t *, const uint16_t *,
355 	    const uint16_t *, const uint16_t *,
356 	    const uint16_t *, int);
357 void	iwm_set_hw_address_8000(struct iwm_softc *, struct iwm_nvm_data *,
358 	    const uint16_t *, const uint16_t *);
359 int	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
360 int	iwm_nvm_init(struct iwm_softc *);
361 int	iwm_firmware_load_sect(struct iwm_softc *, uint32_t, const uint8_t *,
362 	    uint32_t);
363 int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t, const uint8_t *,
364 	    uint32_t);
365 int	iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
366 int	iwm_load_cpu_sections_8000(struct iwm_softc *, struct iwm_fw_sects *,
367 	    int , int *);
368 int	iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
369 int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
370 int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
371 int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
372 int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
373 int	iwm_load_ucode_wait_alive(struct iwm_softc *, enum iwm_ucode_type);
374 int	iwm_send_dqa_cmd(struct iwm_softc *);
375 int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
376 int	iwm_config_ltr(struct iwm_softc *);
377 int	iwm_rx_addbuf(struct iwm_softc *, int, int);
378 int	iwm_get_signal_strength(struct iwm_softc *, struct iwm_rx_phy_info *);
379 int	iwm_rxmq_get_signal_strength(struct iwm_softc *, struct iwm_rx_mpdu_desc *);
380 void	iwm_rx_rx_phy_cmd(struct iwm_softc *, struct iwm_rx_packet *,
381 	    struct iwm_rx_data *);
382 int	iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
383 int	iwm_rx_hwdecrypt(struct iwm_softc *, struct mbuf *, uint32_t,
384 	    struct ieee80211_rxinfo *);
385 int	iwm_ccmp_decap(struct iwm_softc *, struct mbuf *,
386 	    struct ieee80211_node *, struct ieee80211_rxinfo *);
387 void	iwm_rx_frame(struct iwm_softc *, struct mbuf *, int, uint32_t, int, int,
388 	    uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
389 void	iwm_ht_single_rate_control(struct iwm_softc *, struct ieee80211_node *,
390 	    int, uint8_t, int);
391 void	iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,
392 	    struct iwm_node *, int, int);
393 void	iwm_txd_done(struct iwm_softc *, struct iwm_tx_data *);
394 void	iwm_txq_advance(struct iwm_softc *, struct iwm_tx_ring *, int);
395 void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
396 	    struct iwm_rx_data *);
397 void	iwm_clear_oactive(struct iwm_softc *, struct iwm_tx_ring *);
398 void	iwm_ampdu_rate_control(struct iwm_softc *, struct ieee80211_node *,
399 	    struct iwm_tx_ring *, int, uint16_t, uint16_t);
400 void	iwm_rx_compressed_ba(struct iwm_softc *, struct iwm_rx_packet *,
401 	    struct iwm_rx_data *);
402 void	iwm_rx_bmiss(struct iwm_softc *, struct iwm_rx_packet *,
403 	    struct iwm_rx_data *);
404 int	iwm_binding_cmd(struct iwm_softc *, struct iwm_node *, uint32_t);
405 void	iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
406 	    struct iwm_phy_context_cmd *, uint32_t, uint32_t);
407 void	iwm_phy_ctxt_cmd_data(struct iwm_softc *, struct iwm_phy_context_cmd *,
408 	    struct ieee80211_channel *, uint8_t, uint8_t);
409 int	iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t,
410 	    uint8_t, uint32_t, uint32_t);
411 int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
412 int	iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t, uint16_t,
413 	    const void *);
414 int	iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
415 	    uint32_t *);
416 int	iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
417 	    const void *, uint32_t *);
418 void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
419 void	iwm_cmd_done(struct iwm_softc *, int, int, int);
420 void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t, uint16_t);
421 void	iwm_reset_sched(struct iwm_softc *, int, int, uint8_t);
422 const struct iwm_rate *iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
423 	    struct ieee80211_frame *, struct iwm_tx_cmd *);
424 int	iwm_tx(struct iwm_softc *, struct mbuf *, struct ieee80211_node *, int);
425 int	iwm_flush_tx_path(struct iwm_softc *, int);
426 int	iwm_wait_tx_queues_empty(struct iwm_softc *);
427 void	iwm_led_enable(struct iwm_softc *);
428 void	iwm_led_disable(struct iwm_softc *);
429 int	iwm_led_is_enabled(struct iwm_softc *);
430 void	iwm_led_blink_timeout(void *);
431 void	iwm_led_blink_start(struct iwm_softc *);
432 void	iwm_led_blink_stop(struct iwm_softc *);
433 int	iwm_beacon_filter_send_cmd(struct iwm_softc *,
434 	    struct iwm_beacon_filter_cmd *);
435 void	iwm_beacon_filter_set_cqm_params(struct iwm_softc *, struct iwm_node *,
436 	    struct iwm_beacon_filter_cmd *);
437 int	iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *, int);
438 void	iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
439 	    struct iwm_mac_power_cmd *);
440 int	iwm_power_mac_update_mode(struct iwm_softc *, struct iwm_node *);
441 int	iwm_power_update_device(struct iwm_softc *);
442 int	iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
443 int	iwm_disable_beacon_filter(struct iwm_softc *);
444 int	iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
445 int	iwm_add_aux_sta(struct iwm_softc *);
446 int	iwm_drain_sta(struct iwm_softc *sc, struct iwm_node *, int);
447 int	iwm_flush_sta(struct iwm_softc *, struct iwm_node *);
448 int	iwm_rm_sta_cmd(struct iwm_softc *, struct iwm_node *);
449 uint16_t iwm_scan_rx_chain(struct iwm_softc *);
450 uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
451 uint8_t	iwm_lmac_scan_fill_channels(struct iwm_softc *,
452 	    struct iwm_scan_channel_cfg_lmac *, int, int);
453 int	iwm_fill_probe_req(struct iwm_softc *, struct iwm_scan_probe_req *);
454 int	iwm_lmac_scan(struct iwm_softc *, int);
455 int	iwm_config_umac_scan(struct iwm_softc *);
456 int	iwm_umac_scan(struct iwm_softc *, int);
457 uint8_t	iwm_ridx2rate(struct ieee80211_rateset *, int);
458 int	iwm_rval2ridx(int);
459 void	iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *, int *);
460 void	iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
461 	    struct iwm_mac_ctx_cmd *, uint32_t);
462 void	iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
463 	    struct iwm_mac_data_sta *, int);
464 int	iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *, uint32_t, int);
465 int	iwm_update_quotas(struct iwm_softc *, struct iwm_node *, int);
466 void	iwm_add_task(struct iwm_softc *, struct taskq *, struct task *);
467 void	iwm_del_task(struct iwm_softc *, struct taskq *, struct task *);
468 int	iwm_scan(struct iwm_softc *);
469 int	iwm_bgscan(struct ieee80211com *);
470 int	iwm_umac_scan_abort(struct iwm_softc *);
471 int	iwm_lmac_scan_abort(struct iwm_softc *);
472 int	iwm_scan_abort(struct iwm_softc *);
473 int	iwm_auth(struct iwm_softc *);
474 int	iwm_deauth(struct iwm_softc *);
475 int	iwm_assoc(struct iwm_softc *);
476 int	iwm_disassoc(struct iwm_softc *);
477 int	iwm_run(struct iwm_softc *);
478 int	iwm_run_stop(struct iwm_softc *);
479 struct ieee80211_node *iwm_node_alloc(struct ieee80211com *);
480 int	iwm_set_key_v1(struct ieee80211com *, struct ieee80211_node *,
481 	    struct ieee80211_key *);
482 int	iwm_set_key(struct ieee80211com *, struct ieee80211_node *,
483 	    struct ieee80211_key *);
484 void	iwm_delete_key_v1(struct ieee80211com *,
485 	    struct ieee80211_node *, struct ieee80211_key *);
486 void	iwm_delete_key(struct ieee80211com *,
487 	    struct ieee80211_node *, struct ieee80211_key *);
488 void	iwm_calib_timeout(void *);
489 void	iwm_setrates(struct iwm_node *, int);
490 int	iwm_media_change(struct ifnet *);
491 void	iwm_newstate_task(void *);
492 int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
493 void	iwm_endscan(struct iwm_softc *);
494 void	iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
495 	    struct ieee80211_node *);
496 int	iwm_sf_config(struct iwm_softc *, int);
497 int	iwm_send_bt_init_conf(struct iwm_softc *);
498 int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
499 void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
500 void	iwm_free_fw_paging(struct iwm_softc *);
501 int	iwm_save_fw_paging(struct iwm_softc *, const struct iwm_fw_sects *);
502 int	iwm_send_paging_cmd(struct iwm_softc *, const struct iwm_fw_sects *);
503 int	iwm_init_hw(struct iwm_softc *);
504 int	iwm_init(struct ifnet *);
505 void	iwm_start(struct ifnet *);
506 void	iwm_stop(struct ifnet *);
507 void	iwm_watchdog(struct ifnet *);
508 int	iwm_ioctl(struct ifnet *, u_long, caddr_t);
509 #if 1
510 const char *iwm_desc_lookup(uint32_t);
511 void	iwm_nic_error(struct iwm_softc *);
512 void	iwm_dump_driver_status(struct iwm_softc *);
513 void	iwm_nic_umac_error(struct iwm_softc *);
514 #endif
515 void	iwm_rx_mpdu(struct iwm_softc *, struct mbuf *, void *, size_t,
516 	    struct mbuf_list *);
517 void	iwm_flip_address(uint8_t *);
518 int	iwm_detect_duplicate(struct iwm_softc *, struct mbuf *,
519 	    struct iwm_rx_mpdu_desc *, struct ieee80211_rxinfo *);
520 int	iwm_is_sn_less(uint16_t, uint16_t, uint16_t);
521 void	iwm_release_frames(struct iwm_softc *, struct ieee80211_node *,
522 	    struct iwm_rxba_data *, struct iwm_reorder_buffer *, uint16_t,
523 	    struct mbuf_list *);
524 int	iwm_oldsn_workaround(struct iwm_softc *, struct ieee80211_node *,
525 	    int, struct iwm_reorder_buffer *, uint32_t, uint32_t);
526 int	iwm_rx_reorder(struct iwm_softc *, struct mbuf *, int,
527 	    struct iwm_rx_mpdu_desc *, int, int, uint32_t,
528 	    struct ieee80211_rxinfo *, struct mbuf_list *);
529 void	iwm_rx_mpdu_mq(struct iwm_softc *, struct mbuf *, void *, size_t,
530 	    struct mbuf_list *);
531 int	iwm_rx_pkt_valid(struct iwm_rx_packet *);
532 void	iwm_rx_pkt(struct iwm_softc *, struct iwm_rx_data *,
533 	    struct mbuf_list *);
534 void	iwm_notif_intr(struct iwm_softc *);
535 int	iwm_intr(void *);
536 int	iwm_intr_msix(void *);
537 int	iwm_match(struct device *, void *, void *);
538 int	iwm_preinit(struct iwm_softc *);
539 void	iwm_attach_hook(struct device *);
540 void	iwm_attach(struct device *, struct device *, void *);
541 void	iwm_init_task(void *);
542 int	iwm_activate(struct device *, int);
543 int	iwm_resume(struct iwm_softc *);
544 
545 #if NBPFILTER > 0
546 void	iwm_radiotap_attach(struct iwm_softc *);
547 #endif
548 
549 int
550 iwm_is_mimo_ht_plcp(uint8_t ht_plcp)
551 {
552 	return (ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP &&
553 	    (ht_plcp & IWM_RATE_HT_MCS_NSS_MSK));
554 }
555 
556 int
557 iwm_is_mimo_mcs(int mcs)
558 {
559 	int ridx = iwm_mcs2ridx[mcs];
560 	return iwm_is_mimo_ht_plcp(iwm_rates[ridx].ht_plcp);
561 
562 }
563 
564 int
565 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
566 {
567 	struct iwm_fw_cscheme_list *l = (void *)data;
568 
569 	if (dlen < sizeof(*l) ||
570 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
571 		return EINVAL;
572 
573 	/* we don't actually store anything for now, always use s/w crypto */
574 
575 	return 0;
576 }
577 
578 int
579 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
580     uint8_t *data, size_t dlen)
581 {
582 	struct iwm_fw_sects *fws;
583 	struct iwm_fw_onesect *fwone;
584 
585 	if (type >= IWM_UCODE_TYPE_MAX)
586 		return EINVAL;
587 	if (dlen < sizeof(uint32_t))
588 		return EINVAL;
589 
590 	fws = &sc->sc_fw.fw_sects[type];
591 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
592 		return EINVAL;
593 
594 	fwone = &fws->fw_sect[fws->fw_count];
595 
596 	/* first 32bit are device load offset */
597 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
598 
599 	/* rest is data */
600 	fwone->fws_data = data + sizeof(uint32_t);
601 	fwone->fws_len = dlen - sizeof(uint32_t);
602 
603 	fws->fw_count++;
604 	fws->fw_totlen += fwone->fws_len;
605 
606 	return 0;
607 }
608 
609 #define IWM_DEFAULT_SCAN_CHANNELS	40
610 /* Newer firmware might support more channels. Raise this value if needed. */
611 #define IWM_MAX_SCAN_CHANNELS		52 /* as of 8265-34 firmware image */
612 
613 struct iwm_tlv_calib_data {
614 	uint32_t ucode_type;
615 	struct iwm_tlv_calib_ctrl calib;
616 } __packed;
617 
618 int
619 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
620 {
621 	const struct iwm_tlv_calib_data *def_calib = data;
622 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
623 
624 	if (ucode_type >= IWM_UCODE_TYPE_MAX)
625 		return EINVAL;
626 
627 	sc->sc_default_calib[ucode_type].flow_trigger =
628 	    def_calib->calib.flow_trigger;
629 	sc->sc_default_calib[ucode_type].event_trigger =
630 	    def_calib->calib.event_trigger;
631 
632 	return 0;
633 }
634 
635 void
636 iwm_fw_info_free(struct iwm_fw_info *fw)
637 {
638 	free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
639 	fw->fw_rawdata = NULL;
640 	fw->fw_rawsize = 0;
641 	/* don't touch fw->fw_status */
642 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
643 }
644 
645 int
646 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
647 {
648 	struct iwm_fw_info *fw = &sc->sc_fw;
649 	struct iwm_tlv_ucode_header *uhdr;
650 	struct iwm_ucode_tlv tlv;
651 	uint32_t tlv_type;
652 	uint8_t *data;
653 	uint32_t usniffer_img;
654 	uint32_t paging_mem_size;
655 	int err;
656 	size_t len;
657 
658 	if (fw->fw_status == IWM_FW_STATUS_DONE &&
659 	    ucode_type != IWM_UCODE_TYPE_INIT)
660 		return 0;
661 
662 	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
663 		tsleep_nsec(&sc->sc_fw, 0, "iwmfwp", INFSLP);
664 	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
665 
666 	if (fw->fw_rawdata != NULL)
667 		iwm_fw_info_free(fw);
668 
669 	err = loadfirmware(sc->sc_fwname,
670 	    (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
671 	if (err) {
672 		printf("%s: could not read firmware %s (error %d)\n",
673 		    DEVNAME(sc), sc->sc_fwname, err);
674 		goto out;
675 	}
676 
677 	sc->sc_capaflags = 0;
678 	sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
679 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
680 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
681 
682 	uhdr = (void *)fw->fw_rawdata;
683 	if (*(uint32_t *)fw->fw_rawdata != 0
684 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
685 		printf("%s: invalid firmware %s\n",
686 		    DEVNAME(sc), sc->sc_fwname);
687 		err = EINVAL;
688 		goto out;
689 	}
690 
691 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
692 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
693 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
694 	    IWM_UCODE_API(le32toh(uhdr->ver)));
695 	data = uhdr->data;
696 	len = fw->fw_rawsize - sizeof(*uhdr);
697 
698 	while (len >= sizeof(tlv)) {
699 		size_t tlv_len;
700 		void *tlv_data;
701 
702 		memcpy(&tlv, data, sizeof(tlv));
703 		tlv_len = le32toh(tlv.length);
704 		tlv_type = le32toh(tlv.type);
705 
706 		len -= sizeof(tlv);
707 		data += sizeof(tlv);
708 		tlv_data = data;
709 
710 		if (len < tlv_len) {
711 			printf("%s: firmware too short: %zu bytes\n",
712 			    DEVNAME(sc), len);
713 			err = EINVAL;
714 			goto parse_out;
715 		}
716 
717 		switch (tlv_type) {
718 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
719 			if (tlv_len < sizeof(uint32_t)) {
720 				err = EINVAL;
721 				goto parse_out;
722 			}
723 			sc->sc_capa_max_probe_len
724 			    = le32toh(*(uint32_t *)tlv_data);
725 			if (sc->sc_capa_max_probe_len >
726 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
727 				err = EINVAL;
728 				goto parse_out;
729 			}
730 			break;
731 		case IWM_UCODE_TLV_PAN:
732 			if (tlv_len) {
733 				err = EINVAL;
734 				goto parse_out;
735 			}
736 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
737 			break;
738 		case IWM_UCODE_TLV_FLAGS:
739 			if (tlv_len < sizeof(uint32_t)) {
740 				err = EINVAL;
741 				goto parse_out;
742 			}
743 			/*
744 			 * Apparently there can be many flags, but Linux driver
745 			 * parses only the first one, and so do we.
746 			 *
747 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
748 			 * Intentional or a bug?  Observations from
749 			 * current firmware file:
750 			 *  1) TLV_PAN is parsed first
751 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
752 			 * ==> this resets TLV_PAN to itself... hnnnk
753 			 */
754 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
755 			break;
756 		case IWM_UCODE_TLV_CSCHEME:
757 			err = iwm_store_cscheme(sc, tlv_data, tlv_len);
758 			if (err)
759 				goto parse_out;
760 			break;
761 		case IWM_UCODE_TLV_NUM_OF_CPU: {
762 			uint32_t num_cpu;
763 			if (tlv_len != sizeof(uint32_t)) {
764 				err = EINVAL;
765 				goto parse_out;
766 			}
767 			num_cpu = le32toh(*(uint32_t *)tlv_data);
768 			if (num_cpu < 1 || num_cpu > 2) {
769 				err = EINVAL;
770 				goto parse_out;
771 			}
772 			break;
773 		}
774 		case IWM_UCODE_TLV_SEC_RT:
775 			err = iwm_firmware_store_section(sc,
776 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
777 			if (err)
778 				goto parse_out;
779 			break;
780 		case IWM_UCODE_TLV_SEC_INIT:
781 			err = iwm_firmware_store_section(sc,
782 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
783 			if (err)
784 				goto parse_out;
785 			break;
786 		case IWM_UCODE_TLV_SEC_WOWLAN:
787 			err = iwm_firmware_store_section(sc,
788 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
789 			if (err)
790 				goto parse_out;
791 			break;
792 		case IWM_UCODE_TLV_DEF_CALIB:
793 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
794 				err = EINVAL;
795 				goto parse_out;
796 			}
797 			err = iwm_set_default_calib(sc, tlv_data);
798 			if (err)
799 				goto parse_out;
800 			break;
801 		case IWM_UCODE_TLV_PHY_SKU:
802 			if (tlv_len != sizeof(uint32_t)) {
803 				err = EINVAL;
804 				goto parse_out;
805 			}
806 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
807 			break;
808 
809 		case IWM_UCODE_TLV_API_CHANGES_SET: {
810 			struct iwm_ucode_api *api;
811 			int idx, i;
812 			if (tlv_len != sizeof(*api)) {
813 				err = EINVAL;
814 				goto parse_out;
815 			}
816 			api = (struct iwm_ucode_api *)tlv_data;
817 			idx = le32toh(api->api_index);
818 			if (idx >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
819 				err = EINVAL;
820 				goto parse_out;
821 			}
822 			for (i = 0; i < 32; i++) {
823 				if ((le32toh(api->api_flags) & (1 << i)) == 0)
824 					continue;
825 				setbit(sc->sc_ucode_api, i + (32 * idx));
826 			}
827 			break;
828 		}
829 
830 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
831 			struct iwm_ucode_capa *capa;
832 			int idx, i;
833 			if (tlv_len != sizeof(*capa)) {
834 				err = EINVAL;
835 				goto parse_out;
836 			}
837 			capa = (struct iwm_ucode_capa *)tlv_data;
838 			idx = le32toh(capa->api_index);
839 			if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
840 				goto parse_out;
841 			}
842 			for (i = 0; i < 32; i++) {
843 				if ((le32toh(capa->api_capa) & (1 << i)) == 0)
844 					continue;
845 				setbit(sc->sc_enabled_capa, i + (32 * idx));
846 			}
847 			break;
848 		}
849 
850 		case 48: /* undocumented TLV */
851 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
852 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
853 			/* ignore, not used by current driver */
854 			break;
855 
856 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
857 			err = iwm_firmware_store_section(sc,
858 			    IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
859 			    tlv_len);
860 			if (err)
861 				goto parse_out;
862 			break;
863 
864 		case IWM_UCODE_TLV_PAGING:
865 			if (tlv_len != sizeof(uint32_t)) {
866 				err = EINVAL;
867 				goto parse_out;
868 			}
869 			paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
870 
871 			DPRINTF(("%s: Paging: paging enabled (size = %u bytes)\n",
872 			    DEVNAME(sc), paging_mem_size));
873 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
874 				printf("%s: Driver only supports up to %u"
875 				    " bytes for paging image (%u requested)\n",
876 				    DEVNAME(sc), IWM_MAX_PAGING_IMAGE_SIZE,
877 				    paging_mem_size);
878 				err = EINVAL;
879 				goto out;
880 			}
881 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
882 				printf("%s: Paging: image isn't multiple of %u\n",
883 				    DEVNAME(sc), IWM_FW_PAGING_SIZE);
884 				err = EINVAL;
885 				goto out;
886 			}
887 
888 			fw->fw_sects[IWM_UCODE_TYPE_REGULAR].paging_mem_size =
889 			    paging_mem_size;
890 			usniffer_img = IWM_UCODE_TYPE_REGULAR_USNIFFER;
891 			fw->fw_sects[usniffer_img].paging_mem_size =
892 			    paging_mem_size;
893 			break;
894 
895 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
896 			if (tlv_len != sizeof(uint32_t)) {
897 				err = EINVAL;
898 				goto parse_out;
899 			}
900 			sc->sc_capa_n_scan_channels =
901 			  le32toh(*(uint32_t *)tlv_data);
902 			if (sc->sc_capa_n_scan_channels > IWM_MAX_SCAN_CHANNELS) {
903 				err = ERANGE;
904 				goto parse_out;
905 			}
906 			break;
907 
908 		case IWM_UCODE_TLV_FW_VERSION:
909 			if (tlv_len != sizeof(uint32_t) * 3) {
910 				err = EINVAL;
911 				goto parse_out;
912 			}
913 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
914 			    "%u.%u.%u",
915 			    le32toh(((uint32_t *)tlv_data)[0]),
916 			    le32toh(((uint32_t *)tlv_data)[1]),
917 			    le32toh(((uint32_t *)tlv_data)[2]));
918 			break;
919 
920 		case IWM_UCODE_TLV_FW_DBG_DEST:
921 		case IWM_UCODE_TLV_FW_DBG_CONF:
922 			break;
923 
924 		case IWM_UCODE_TLV_FW_MEM_SEG:
925 			break;
926 
927 		default:
928 			err = EINVAL;
929 			goto parse_out;
930 		}
931 
932 		len -= roundup(tlv_len, 4);
933 		data += roundup(tlv_len, 4);
934 	}
935 
936 	KASSERT(err == 0);
937 
938  parse_out:
939 	if (err) {
940 		printf("%s: firmware parse error %d, "
941 		    "section type %d\n", DEVNAME(sc), err, tlv_type);
942 	}
943 
944  out:
945 	if (err) {
946 		fw->fw_status = IWM_FW_STATUS_NONE;
947 		if (fw->fw_rawdata != NULL)
948 			iwm_fw_info_free(fw);
949 	} else
950 		fw->fw_status = IWM_FW_STATUS_DONE;
951 	wakeup(&sc->sc_fw);
952 
953 	return err;
954 }
955 
956 uint32_t
957 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
958 {
959 	iwm_nic_assert_locked(sc);
960 	IWM_WRITE(sc,
961 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
962 	IWM_BARRIER_READ_WRITE(sc);
963 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
964 }
965 
966 void
967 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
968 {
969 	iwm_nic_assert_locked(sc);
970 	IWM_WRITE(sc,
971 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
972 	IWM_BARRIER_WRITE(sc);
973 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
974 }
975 
976 void
977 iwm_write_prph64(struct iwm_softc *sc, uint64_t addr, uint64_t val)
978 {
979 	iwm_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
980 	iwm_write_prph(sc, (uint32_t)addr + 4, val >> 32);
981 }
982 
983 int
984 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
985 {
986 	int offs, err = 0;
987 	uint32_t *vals = buf;
988 
989 	if (iwm_nic_lock(sc)) {
990 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
991 		for (offs = 0; offs < dwords; offs++)
992 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
993 		iwm_nic_unlock(sc);
994 	} else {
995 		err = EBUSY;
996 	}
997 	return err;
998 }
999 
1000 int
1001 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
1002 {
1003 	int offs;
1004 	const uint32_t *vals = buf;
1005 
1006 	if (iwm_nic_lock(sc)) {
1007 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
1008 		/* WADDR auto-increments */
1009 		for (offs = 0; offs < dwords; offs++) {
1010 			uint32_t val = vals ? vals[offs] : 0;
1011 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
1012 		}
1013 		iwm_nic_unlock(sc);
1014 	} else {
1015 		return EBUSY;
1016 	}
1017 	return 0;
1018 }
1019 
1020 int
1021 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1022 {
1023 	return iwm_write_mem(sc, addr, &val, 1);
1024 }
1025 
1026 int
1027 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
1028     int timo)
1029 {
1030 	for (;;) {
1031 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
1032 			return 1;
1033 		}
1034 		if (timo < 10) {
1035 			return 0;
1036 		}
1037 		timo -= 10;
1038 		DELAY(10);
1039 	}
1040 }
1041 
1042 int
1043 iwm_nic_lock(struct iwm_softc *sc)
1044 {
1045 	if (sc->sc_nic_locks > 0) {
1046 		iwm_nic_assert_locked(sc);
1047 		sc->sc_nic_locks++;
1048 		return 1; /* already locked */
1049 	}
1050 
1051 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1052 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1053 
1054 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000)
1055 		DELAY(2);
1056 
1057 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1058 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1059 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1060 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1061 		sc->sc_nic_locks++;
1062 		return 1;
1063 	}
1064 
1065 	printf("%s: acquiring device failed\n", DEVNAME(sc));
1066 	return 0;
1067 }
1068 
1069 void
1070 iwm_nic_assert_locked(struct iwm_softc *sc)
1071 {
1072 	uint32_t reg = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1073 	if ((reg & IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) == 0)
1074 		panic("%s: mac clock not ready", DEVNAME(sc));
1075 	if (reg & IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)
1076 		panic("%s: mac gone to sleep", DEVNAME(sc));
1077 	if (sc->sc_nic_locks <= 0)
1078 		panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1079 }
1080 
1081 void
1082 iwm_nic_unlock(struct iwm_softc *sc)
1083 {
1084 	if (sc->sc_nic_locks > 0) {
1085 		if (--sc->sc_nic_locks == 0)
1086 			IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1087 			    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1088 	} else
1089 		printf("%s: NIC already unlocked\n", DEVNAME(sc));
1090 }
1091 
1092 void
1093 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
1094     uint32_t mask)
1095 {
1096 	uint32_t val;
1097 
1098 	/* XXX: no error path? */
1099 	if (iwm_nic_lock(sc)) {
1100 		val = iwm_read_prph(sc, reg) & mask;
1101 		val |= bits;
1102 		iwm_write_prph(sc, reg, val);
1103 		iwm_nic_unlock(sc);
1104 	}
1105 }
1106 
1107 void
1108 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1109 {
1110 	iwm_set_bits_mask_prph(sc, reg, bits, ~0);
1111 }
1112 
1113 void
1114 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1115 {
1116 	iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1117 }
1118 
1119 int
1120 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1121     bus_size_t size, bus_size_t alignment)
1122 {
1123 	int nsegs, err;
1124 	caddr_t va;
1125 
1126 	dma->tag = tag;
1127 	dma->size = size;
1128 
1129 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1130 	    &dma->map);
1131 	if (err)
1132 		goto fail;
1133 
1134 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1135 	    BUS_DMA_NOWAIT);
1136 	if (err)
1137 		goto fail;
1138 
1139 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
1140 	    BUS_DMA_NOWAIT);
1141 	if (err)
1142 		goto fail;
1143 	dma->vaddr = va;
1144 
1145 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1146 	    BUS_DMA_NOWAIT);
1147 	if (err)
1148 		goto fail;
1149 
1150 	memset(dma->vaddr, 0, size);
1151 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1152 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1153 
1154 	return 0;
1155 
1156 fail:	iwm_dma_contig_free(dma);
1157 	return err;
1158 }
1159 
1160 void
1161 iwm_dma_contig_free(struct iwm_dma_info *dma)
1162 {
1163 	if (dma->map != NULL) {
1164 		if (dma->vaddr != NULL) {
1165 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1166 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1167 			bus_dmamap_unload(dma->tag, dma->map);
1168 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1169 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1170 			dma->vaddr = NULL;
1171 		}
1172 		bus_dmamap_destroy(dma->tag, dma->map);
1173 		dma->map = NULL;
1174 	}
1175 }
1176 
1177 int
1178 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1179 {
1180 	bus_size_t size;
1181 	size_t descsz;
1182 	int count, i, err;
1183 
1184 	ring->cur = 0;
1185 
1186 	if (sc->sc_mqrx_supported) {
1187 		count = IWM_RX_MQ_RING_COUNT;
1188 		descsz = sizeof(uint64_t);
1189 	} else {
1190 		count = IWM_RX_RING_COUNT;
1191 		descsz = sizeof(uint32_t);
1192 	}
1193 
1194 	/* Allocate RX descriptors (256-byte aligned). */
1195 	size = count * descsz;
1196 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 256);
1197 	if (err) {
1198 		printf("%s: could not allocate RX ring DMA memory\n",
1199 		    DEVNAME(sc));
1200 		goto fail;
1201 	}
1202 	ring->desc = ring->free_desc_dma.vaddr;
1203 
1204 	/* Allocate RX status area (16-byte aligned). */
1205 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1206 	    sizeof(*ring->stat), 16);
1207 	if (err) {
1208 		printf("%s: could not allocate RX status DMA memory\n",
1209 		    DEVNAME(sc));
1210 		goto fail;
1211 	}
1212 	ring->stat = ring->stat_dma.vaddr;
1213 
1214 	if (sc->sc_mqrx_supported) {
1215 		size = count * sizeof(uint32_t);
1216 		err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1217 		    size, 256);
1218 		if (err) {
1219 			printf("%s: could not allocate RX ring DMA memory\n",
1220 			    DEVNAME(sc));
1221 			goto fail;
1222 		}
1223 	}
1224 
1225 	for (i = 0; i < count; i++) {
1226 		struct iwm_rx_data *data = &ring->data[i];
1227 
1228 		memset(data, 0, sizeof(*data));
1229 		err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1230 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1231 		    &data->map);
1232 		if (err) {
1233 			printf("%s: could not create RX buf DMA map\n",
1234 			    DEVNAME(sc));
1235 			goto fail;
1236 		}
1237 
1238 		err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1239 		if (err)
1240 			goto fail;
1241 	}
1242 	return 0;
1243 
1244 fail:	iwm_free_rx_ring(sc, ring);
1245 	return err;
1246 }
1247 
1248 void
1249 iwm_disable_rx_dma(struct iwm_softc *sc)
1250 {
1251 	int ntries;
1252 
1253 	if (iwm_nic_lock(sc)) {
1254 		if (sc->sc_mqrx_supported) {
1255 			iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
1256 			for (ntries = 0; ntries < 1000; ntries++) {
1257 				if (iwm_read_prph(sc, IWM_RFH_GEN_STATUS) &
1258 				    IWM_RXF_DMA_IDLE)
1259 					break;
1260 				DELAY(10);
1261 			}
1262 		} else {
1263 			IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1264 			for (ntries = 0; ntries < 1000; ntries++) {
1265 				if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG)&
1266 				    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1267 					break;
1268 				DELAY(10);
1269 			}
1270 		}
1271 		iwm_nic_unlock(sc);
1272 	}
1273 }
1274 
1275 void
1276 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1277 {
1278 	ring->cur = 0;
1279 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1280 	    ring->stat_dma.size, BUS_DMASYNC_PREWRITE);
1281 	memset(ring->stat, 0, sizeof(*ring->stat));
1282 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1283 	    ring->stat_dma.size, BUS_DMASYNC_POSTWRITE);
1284 
1285 }
1286 
1287 void
1288 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1289 {
1290 	int count, i;
1291 
1292 	iwm_dma_contig_free(&ring->free_desc_dma);
1293 	iwm_dma_contig_free(&ring->stat_dma);
1294 	iwm_dma_contig_free(&ring->used_desc_dma);
1295 
1296 	if (sc->sc_mqrx_supported)
1297 		count = IWM_RX_MQ_RING_COUNT;
1298 	else
1299 		count = IWM_RX_RING_COUNT;
1300 
1301 	for (i = 0; i < count; i++) {
1302 		struct iwm_rx_data *data = &ring->data[i];
1303 
1304 		if (data->m != NULL) {
1305 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1306 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1307 			bus_dmamap_unload(sc->sc_dmat, data->map);
1308 			m_freem(data->m);
1309 			data->m = NULL;
1310 		}
1311 		if (data->map != NULL)
1312 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1313 	}
1314 }
1315 
1316 int
1317 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1318 {
1319 	bus_addr_t paddr;
1320 	bus_size_t size;
1321 	int i, err;
1322 
1323 	ring->qid = qid;
1324 	ring->queued = 0;
1325 	ring->cur = 0;
1326 	ring->tail = 0;
1327 
1328 	/* Allocate TX descriptors (256-byte aligned). */
1329 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1330 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1331 	if (err) {
1332 		printf("%s: could not allocate TX ring DMA memory\n",
1333 		    DEVNAME(sc));
1334 		goto fail;
1335 	}
1336 	ring->desc = ring->desc_dma.vaddr;
1337 
1338 	/*
1339 	 * There is no need to allocate DMA buffers for unused rings.
1340 	 * 7k/8k/9k hardware supports up to 31 Tx rings which is more
1341 	 * than we currently need.
1342 	 *
1343 	 * In DQA mode we use 1 command queue + 4 DQA mgmt/data queues.
1344 	 * The command is queue 0 (sc->txq[0]), and 4 mgmt/data frame queues
1345 	 * are sc->tqx[IWM_DQA_MIN_MGMT_QUEUE + ac], i.e. sc->txq[5:8],
1346 	 * in order to provide one queue per EDCA category.
1347 	 * Tx aggregation requires additional queues, one queue per TID for
1348 	 * which aggregation is enabled. We map TID 0-7 to sc->txq[10:17].
1349 	 *
1350 	 * In non-DQA mode, we use rings 0 through 9 (0-3 are EDCA, 9 is cmd),
1351 	 * and Tx aggregation is not supported.
1352 	 *
1353 	 * Unfortunately, we cannot tell if DQA will be used until the
1354 	 * firmware gets loaded later, so just allocate sufficient rings
1355 	 * in order to satisfy both cases.
1356 	 */
1357 	if (qid > IWM_LAST_AGG_TX_QUEUE)
1358 		return 0;
1359 
1360 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1361 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1362 	if (err) {
1363 		printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc));
1364 		goto fail;
1365 	}
1366 	ring->cmd = ring->cmd_dma.vaddr;
1367 
1368 	paddr = ring->cmd_dma.paddr;
1369 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1370 		struct iwm_tx_data *data = &ring->data[i];
1371 		size_t mapsize;
1372 
1373 		data->cmd_paddr = paddr;
1374 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1375 		    + offsetof(struct iwm_tx_cmd, scratch);
1376 		paddr += sizeof(struct iwm_device_cmd);
1377 
1378 		/* FW commands may require more mapped space than packets. */
1379 		if (qid == IWM_CMD_QUEUE || qid == IWM_DQA_CMD_QUEUE)
1380 			mapsize = (sizeof(struct iwm_cmd_header) +
1381 			    IWM_MAX_CMD_PAYLOAD_SIZE);
1382 		else
1383 			mapsize = MCLBYTES;
1384 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
1385 		    IWM_NUM_OF_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,
1386 		    &data->map);
1387 		if (err) {
1388 			printf("%s: could not create TX buf DMA map\n",
1389 			    DEVNAME(sc));
1390 			goto fail;
1391 		}
1392 	}
1393 	KASSERT(paddr == ring->cmd_dma.paddr + size);
1394 	return 0;
1395 
1396 fail:	iwm_free_tx_ring(sc, ring);
1397 	return err;
1398 }
1399 
1400 void
1401 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1402 {
1403 	int i;
1404 
1405 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1406 		struct iwm_tx_data *data = &ring->data[i];
1407 
1408 		if (data->m != NULL) {
1409 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1410 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1411 			bus_dmamap_unload(sc->sc_dmat, data->map);
1412 			m_freem(data->m);
1413 			data->m = NULL;
1414 		}
1415 	}
1416 	/* Clear TX descriptors. */
1417 	memset(ring->desc, 0, ring->desc_dma.size);
1418 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1419 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1420 	sc->qfullmsk &= ~(1 << ring->qid);
1421 	sc->qenablemsk &= ~(1 << ring->qid);
1422 	/* 7000 family NICs are locked while commands are in progress. */
1423 	if (ring->qid == sc->cmdqid && ring->queued > 0) {
1424 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1425 			iwm_nic_unlock(sc);
1426 	}
1427 	ring->queued = 0;
1428 	ring->cur = 0;
1429 	ring->tail = 0;
1430 }
1431 
1432 void
1433 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1434 {
1435 	int i;
1436 
1437 	iwm_dma_contig_free(&ring->desc_dma);
1438 	iwm_dma_contig_free(&ring->cmd_dma);
1439 
1440 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1441 		struct iwm_tx_data *data = &ring->data[i];
1442 
1443 		if (data->m != NULL) {
1444 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1445 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1446 			bus_dmamap_unload(sc->sc_dmat, data->map);
1447 			m_freem(data->m);
1448 			data->m = NULL;
1449 		}
1450 		if (data->map != NULL)
1451 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1452 	}
1453 }
1454 
1455 void
1456 iwm_enable_rfkill_int(struct iwm_softc *sc)
1457 {
1458 	if (!sc->sc_msix) {
1459 		sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1460 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1461 	} else {
1462 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1463 		    sc->sc_fh_init_mask);
1464 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1465 		    ~IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL);
1466 		sc->sc_hw_mask = IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL;
1467 	}
1468 
1469 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_9000)
1470 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1471 		    IWM_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
1472 }
1473 
1474 int
1475 iwm_check_rfkill(struct iwm_softc *sc)
1476 {
1477 	uint32_t v;
1478 	int s;
1479 	int rv;
1480 
1481 	s = splnet();
1482 
1483 	/*
1484 	 * "documentation" is not really helpful here:
1485 	 *  27:	HW_RF_KILL_SW
1486 	 *	Indicates state of (platform's) hardware RF-Kill switch
1487 	 *
1488 	 * But apparently when it's off, it's on ...
1489 	 */
1490 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1491 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1492 	if (rv) {
1493 		sc->sc_flags |= IWM_FLAG_RFKILL;
1494 	} else {
1495 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
1496 	}
1497 
1498 	splx(s);
1499 	return rv;
1500 }
1501 
1502 void
1503 iwm_enable_interrupts(struct iwm_softc *sc)
1504 {
1505 	if (!sc->sc_msix) {
1506 		sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1507 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1508 	} else {
1509 		/*
1510 		 * fh/hw_mask keeps all the unmasked causes.
1511 		 * Unlike msi, in msix cause is enabled when it is unset.
1512 		 */
1513 		sc->sc_hw_mask = sc->sc_hw_init_mask;
1514 		sc->sc_fh_mask = sc->sc_fh_init_mask;
1515 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1516 		    ~sc->sc_fh_mask);
1517 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1518 		    ~sc->sc_hw_mask);
1519 	}
1520 }
1521 
1522 void
1523 iwm_enable_fwload_interrupt(struct iwm_softc *sc)
1524 {
1525 	if (!sc->sc_msix) {
1526 		sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
1527 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1528 	} else {
1529 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1530 		    sc->sc_hw_init_mask);
1531 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1532 		    ~IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
1533 		sc->sc_fh_mask = IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM;
1534 	}
1535 }
1536 
1537 void
1538 iwm_restore_interrupts(struct iwm_softc *sc)
1539 {
1540 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1541 }
1542 
1543 void
1544 iwm_disable_interrupts(struct iwm_softc *sc)
1545 {
1546 	int s = splnet();
1547 
1548 	if (!sc->sc_msix) {
1549 		IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1550 
1551 		/* acknowledge all interrupts */
1552 		IWM_WRITE(sc, IWM_CSR_INT, ~0);
1553 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1554 	} else {
1555 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1556 		    sc->sc_fh_init_mask);
1557 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1558 		    sc->sc_hw_init_mask);
1559 	}
1560 
1561 	splx(s);
1562 }
1563 
1564 void
1565 iwm_ict_reset(struct iwm_softc *sc)
1566 {
1567 	iwm_disable_interrupts(sc);
1568 
1569 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1570 	sc->ict_cur = 0;
1571 
1572 	/* Set physical address of ICT (4KB aligned). */
1573 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1574 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1575 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1576 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1577 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1578 
1579 	/* Switch to ICT interrupt mode in driver. */
1580 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1581 
1582 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1583 	iwm_enable_interrupts(sc);
1584 }
1585 
1586 #define IWM_HW_READY_TIMEOUT 50
1587 int
1588 iwm_set_hw_ready(struct iwm_softc *sc)
1589 {
1590 	int ready;
1591 
1592 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1593 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1594 
1595 	ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1596 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1597 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1598 	    IWM_HW_READY_TIMEOUT);
1599 	if (ready)
1600 		IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1601 		    IWM_CSR_MBOX_SET_REG_OS_ALIVE);
1602 
1603 	return ready;
1604 }
1605 #undef IWM_HW_READY_TIMEOUT
1606 
1607 int
1608 iwm_prepare_card_hw(struct iwm_softc *sc)
1609 {
1610 	int t = 0;
1611 
1612 	if (iwm_set_hw_ready(sc))
1613 		return 0;
1614 
1615 	IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1616 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1617 	DELAY(1000);
1618 
1619 
1620 	/* If HW is not ready, prepare the conditions to check again */
1621 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1622 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1623 
1624 	do {
1625 		if (iwm_set_hw_ready(sc))
1626 			return 0;
1627 		DELAY(200);
1628 		t += 200;
1629 	} while (t < 150000);
1630 
1631 	return ETIMEDOUT;
1632 }
1633 
1634 void
1635 iwm_apm_config(struct iwm_softc *sc)
1636 {
1637 	pcireg_t lctl, cap;
1638 
1639 	/*
1640 	 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
1641 	 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1642 	 * If so (likely), disable L0S, so device moves directly L0->L1;
1643 	 *    costs negligible amount of power savings.
1644 	 * If not (unlikely), enable L0S, so there is at least some
1645 	 *    power savings, even without L1.
1646 	 */
1647 	lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1648 	    sc->sc_cap_off + PCI_PCIE_LCSR);
1649 	if (lctl & PCI_PCIE_LCSR_ASPM_L1) {
1650 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1651 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1652 	} else {
1653 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1654 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1655 	}
1656 
1657 	cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1658 	    sc->sc_cap_off + PCI_PCIE_DCSR2);
1659 	sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
1660 	DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
1661 	    DEVNAME(sc),
1662 	    (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
1663 	    sc->sc_ltr_enabled ? "En" : "Dis"));
1664 }
1665 
1666 /*
1667  * Start up NIC's basic functionality after it has been reset
1668  * e.g. after platform boot or shutdown.
1669  * NOTE:  This does not load uCode nor start the embedded processor
1670  */
1671 int
1672 iwm_apm_init(struct iwm_softc *sc)
1673 {
1674 	int err = 0;
1675 
1676 	/* Disable L0S exit timer (platform NMI workaround) */
1677 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000)
1678 		IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1679 		    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1680 
1681 	/*
1682 	 * Disable L0s without affecting L1;
1683 	 *  don't wait for ICH L0s (ICH bug W/A)
1684 	 */
1685 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1686 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1687 
1688 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
1689 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1690 
1691 	/*
1692 	 * Enable HAP INTA (interrupt from management bus) to
1693 	 * wake device's PCI Express link L1a -> L0s
1694 	 */
1695 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1696 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1697 
1698 	iwm_apm_config(sc);
1699 
1700 #if 0 /* not for 7k/8k */
1701 	/* Configure analog phase-lock-loop before activating to D0A */
1702 	if (trans->cfg->base_params->pll_cfg_val)
1703 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1704 		    trans->cfg->base_params->pll_cfg_val);
1705 #endif
1706 
1707 	/*
1708 	 * Set "initialization complete" bit to move adapter from
1709 	 * D0U* --> D0A* (powered-up active) state.
1710 	 */
1711 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1712 
1713 	/*
1714 	 * Wait for clock stabilization; once stabilized, access to
1715 	 * device-internal resources is supported, e.g. iwm_write_prph()
1716 	 * and accesses to uCode SRAM.
1717 	 */
1718 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1719 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1720 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1721 		printf("%s: timeout waiting for clock stabilization\n",
1722 		    DEVNAME(sc));
1723 		err = ETIMEDOUT;
1724 		goto out;
1725 	}
1726 
1727 	if (sc->host_interrupt_operation_mode) {
1728 		/*
1729 		 * This is a bit of an abuse - This is needed for 7260 / 3160
1730 		 * only check host_interrupt_operation_mode even if this is
1731 		 * not related to host_interrupt_operation_mode.
1732 		 *
1733 		 * Enable the oscillator to count wake up time for L1 exit. This
1734 		 * consumes slightly more power (100uA) - but allows to be sure
1735 		 * that we wake up from L1 on time.
1736 		 *
1737 		 * This looks weird: read twice the same register, discard the
1738 		 * value, set a bit, and yet again, read that same register
1739 		 * just to discard the value. But that's the way the hardware
1740 		 * seems to like it.
1741 		 */
1742 		if (iwm_nic_lock(sc)) {
1743 			iwm_read_prph(sc, IWM_OSC_CLK);
1744 			iwm_read_prph(sc, IWM_OSC_CLK);
1745 			iwm_nic_unlock(sc);
1746 		}
1747 		iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
1748 		if (iwm_nic_lock(sc)) {
1749 			iwm_read_prph(sc, IWM_OSC_CLK);
1750 			iwm_read_prph(sc, IWM_OSC_CLK);
1751 			iwm_nic_unlock(sc);
1752 		}
1753 	}
1754 
1755 	/*
1756 	 * Enable DMA clock and wait for it to stabilize.
1757 	 *
1758 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1759 	 * do not disable clocks.  This preserves any hardware bits already
1760 	 * set by default in "CLK_CTRL_REG" after reset.
1761 	 */
1762 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1763 		if (iwm_nic_lock(sc)) {
1764 			iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1765 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1766 			iwm_nic_unlock(sc);
1767 		}
1768 		DELAY(20);
1769 
1770 		/* Disable L1-Active */
1771 		iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1772 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1773 
1774 		/* Clear the interrupt in APMG if the NIC is in RFKILL */
1775 		if (iwm_nic_lock(sc)) {
1776 			iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1777 			    IWM_APMG_RTC_INT_STT_RFKILL);
1778 			iwm_nic_unlock(sc);
1779 		}
1780 	}
1781  out:
1782 	if (err)
1783 		printf("%s: apm init error %d\n", DEVNAME(sc), err);
1784 	return err;
1785 }
1786 
1787 void
1788 iwm_apm_stop(struct iwm_softc *sc)
1789 {
1790 	IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1791 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1792 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1793 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE |
1794 	    IWM_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
1795 	DELAY(1000);
1796 	IWM_CLRBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1797 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1798 	DELAY(5000);
1799 
1800 	/* stop device's busmaster DMA activity */
1801 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1802 
1803 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1804 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1805 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1806 		printf("%s: timeout waiting for master\n", DEVNAME(sc));
1807 
1808 	/*
1809 	 * Clear "initialization complete" bit to move adapter from
1810 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
1811 	 */
1812 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1813 	    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1814 }
1815 
1816 void
1817 iwm_init_msix_hw(struct iwm_softc *sc)
1818 {
1819 	iwm_conf_msix_hw(sc, 0);
1820 
1821 	if (!sc->sc_msix)
1822 		return;
1823 
1824 	sc->sc_fh_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_FH_INT_MASK_AD);
1825 	sc->sc_fh_mask = sc->sc_fh_init_mask;
1826 	sc->sc_hw_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_HW_INT_MASK_AD);
1827 	sc->sc_hw_mask = sc->sc_hw_init_mask;
1828 }
1829 
1830 void
1831 iwm_conf_msix_hw(struct iwm_softc *sc, int stopped)
1832 {
1833 	int vector = 0;
1834 
1835 	if (!sc->sc_msix) {
1836 		/* Newer chips default to MSIX. */
1837 		if (sc->sc_mqrx_supported && !stopped && iwm_nic_lock(sc)) {
1838 			iwm_write_prph(sc, IWM_UREG_CHICK,
1839 			    IWM_UREG_CHICK_MSI_ENABLE);
1840 			iwm_nic_unlock(sc);
1841 		}
1842 		return;
1843 	}
1844 
1845 	if (!stopped && iwm_nic_lock(sc)) {
1846 		iwm_write_prph(sc, IWM_UREG_CHICK, IWM_UREG_CHICK_MSIX_ENABLE);
1847 		iwm_nic_unlock(sc);
1848 	}
1849 
1850 	/* Disable all interrupts */
1851 	IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD, ~0);
1852 	IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD, ~0);
1853 
1854 	/* Map fallback-queue (command/mgmt) to a single vector */
1855 	IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(0),
1856 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1857 	/* Map RSS queue (data) to the same vector */
1858 	IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(1),
1859 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1860 
1861 	/* Enable the RX queues cause interrupts */
1862 	IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1863 	    IWM_MSIX_FH_INT_CAUSES_Q0 | IWM_MSIX_FH_INT_CAUSES_Q1);
1864 
1865 	/* Map non-RX causes to the same vector */
1866 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
1867 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1868 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
1869 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1870 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_S2D),
1871 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1872 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_FH_ERR),
1873 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1874 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_ALIVE),
1875 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1876 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_WAKEUP),
1877 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1878 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_IML),
1879 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1880 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_CT_KILL),
1881 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1882 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_RF_KILL),
1883 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1884 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_PERIODIC),
1885 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1886 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SW_ERR),
1887 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1888 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SCD),
1889 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1890 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_FH_TX),
1891 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1892 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HW_ERR),
1893 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1894 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HAP),
1895 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1896 
1897 	/* Enable non-RX causes interrupts */
1898 	IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1899 	    IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
1900 	    IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
1901 	    IWM_MSIX_FH_INT_CAUSES_S2D |
1902 	    IWM_MSIX_FH_INT_CAUSES_FH_ERR);
1903 	IWM_CLRBITS(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1904 	    IWM_MSIX_HW_INT_CAUSES_REG_ALIVE |
1905 	    IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP |
1906 	    IWM_MSIX_HW_INT_CAUSES_REG_IML |
1907 	    IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL |
1908 	    IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL |
1909 	    IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC |
1910 	    IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR |
1911 	    IWM_MSIX_HW_INT_CAUSES_REG_SCD |
1912 	    IWM_MSIX_HW_INT_CAUSES_REG_FH_TX |
1913 	    IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR |
1914 	    IWM_MSIX_HW_INT_CAUSES_REG_HAP);
1915 }
1916 
1917 int
1918 iwm_start_hw(struct iwm_softc *sc)
1919 {
1920 	int err;
1921 
1922 	err = iwm_prepare_card_hw(sc);
1923 	if (err)
1924 		return err;
1925 
1926 	/* Reset the entire device */
1927 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1928 	DELAY(5000);
1929 
1930 	err = iwm_apm_init(sc);
1931 	if (err)
1932 		return err;
1933 
1934 	iwm_init_msix_hw(sc);
1935 
1936 	iwm_enable_rfkill_int(sc);
1937 	iwm_check_rfkill(sc);
1938 
1939 	return 0;
1940 }
1941 
1942 
1943 void
1944 iwm_stop_device(struct iwm_softc *sc)
1945 {
1946 	int chnl, ntries;
1947 	int qid;
1948 
1949 	iwm_disable_interrupts(sc);
1950 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1951 
1952 	/* Stop all DMA channels. */
1953 	if (iwm_nic_lock(sc)) {
1954 		/* Deactivate TX scheduler. */
1955 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1956 
1957 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1958 			IWM_WRITE(sc,
1959 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1960 			for (ntries = 0; ntries < 200; ntries++) {
1961 				uint32_t r;
1962 
1963 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1964 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1965 				    chnl))
1966 					break;
1967 				DELAY(20);
1968 			}
1969 		}
1970 		iwm_nic_unlock(sc);
1971 	}
1972 	iwm_disable_rx_dma(sc);
1973 
1974 	iwm_reset_rx_ring(sc, &sc->rxq);
1975 
1976 	for (qid = 0; qid < nitems(sc->txq); qid++)
1977 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1978 
1979 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1980 		if (iwm_nic_lock(sc)) {
1981 			/* Power-down device's busmaster DMA clocks */
1982 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1983 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1984 			iwm_nic_unlock(sc);
1985 		}
1986 		DELAY(5);
1987 	}
1988 
1989 	/* Make sure (redundant) we've released our request to stay awake */
1990 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1991 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1992 	if (sc->sc_nic_locks > 0)
1993 		printf("%s: %d active NIC locks forcefully cleared\n",
1994 		    DEVNAME(sc), sc->sc_nic_locks);
1995 	sc->sc_nic_locks = 0;
1996 
1997 	/* Stop the device, and put it in low power state */
1998 	iwm_apm_stop(sc);
1999 
2000 	/* Reset the on-board processor. */
2001 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
2002 	DELAY(5000);
2003 
2004 	/*
2005 	 * Upon stop, the IVAR table gets erased, so msi-x won't
2006 	 * work. This causes a bug in RF-KILL flows, since the interrupt
2007 	 * that enables radio won't fire on the correct irq, and the
2008 	 * driver won't be able to handle the interrupt.
2009 	 * Configure the IVAR table again after reset.
2010 	 */
2011 	iwm_conf_msix_hw(sc, 1);
2012 
2013 	/*
2014 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2015 	 * Clear the interrupt again.
2016 	 */
2017 	iwm_disable_interrupts(sc);
2018 
2019 	/* Even though we stop the HW we still want the RF kill interrupt. */
2020 	iwm_enable_rfkill_int(sc);
2021 	iwm_check_rfkill(sc);
2022 
2023 	iwm_prepare_card_hw(sc);
2024 }
2025 
2026 void
2027 iwm_nic_config(struct iwm_softc *sc)
2028 {
2029 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2030 	uint32_t mask, val, reg_val = 0;
2031 
2032 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
2033 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
2034 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
2035 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
2036 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
2037 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
2038 
2039 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2040 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2041 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2042 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2043 
2044 	/* radio configuration */
2045 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2046 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2047 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2048 
2049 	mask = IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2050 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2051 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2052 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2053 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2054 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2055 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2056 
2057 	val = IWM_READ(sc, IWM_CSR_HW_IF_CONFIG_REG);
2058 	val &= ~mask;
2059 	val |= reg_val;
2060 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, val);
2061 
2062 	/*
2063 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
2064 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
2065 	 * to lose ownership and not being able to obtain it back.
2066 	 */
2067 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2068 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
2069 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
2070 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
2071 }
2072 
2073 int
2074 iwm_nic_rx_init(struct iwm_softc *sc)
2075 {
2076 	if (sc->sc_mqrx_supported)
2077 		return iwm_nic_rx_mq_init(sc);
2078 	else
2079 		return iwm_nic_rx_legacy_init(sc);
2080 }
2081 
2082 int
2083 iwm_nic_rx_mq_init(struct iwm_softc *sc)
2084 {
2085 	int enabled;
2086 
2087 	if (!iwm_nic_lock(sc))
2088 		return EBUSY;
2089 
2090 	/* Stop RX DMA. */
2091 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
2092 	/* Disable RX used and free queue operation. */
2093 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, 0);
2094 
2095 	iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB,
2096 	    sc->rxq.free_desc_dma.paddr);
2097 	iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB,
2098 	    sc->rxq.used_desc_dma.paddr);
2099 	iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB,
2100 	    sc->rxq.stat_dma.paddr);
2101 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX, 0);
2102 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX, 0);
2103 	iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX, 0);
2104 
2105 	/* We configure only queue 0 for now. */
2106 	enabled = ((1 << 0) << 16) | (1 << 0);
2107 
2108 	/* Enable RX DMA, 4KB buffer size. */
2109 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG,
2110 	    IWM_RFH_DMA_EN_ENABLE_VAL |
2111 	    IWM_RFH_RXF_DMA_RB_SIZE_4K |
2112 	    IWM_RFH_RXF_DMA_MIN_RB_4_8 |
2113 	    IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
2114 	    IWM_RFH_RXF_DMA_RBDCB_SIZE_512);
2115 
2116 	/* Enable RX DMA snooping. */
2117 	iwm_write_prph(sc, IWM_RFH_GEN_CFG,
2118 	    IWM_RFH_GEN_CFG_RFH_DMA_SNOOP |
2119 	    IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP |
2120 	    (sc->sc_integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
2121 	    IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128));
2122 
2123 	/* Enable the configured queue(s). */
2124 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, enabled);
2125 
2126 	iwm_nic_unlock(sc);
2127 
2128 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
2129 
2130 	IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8);
2131 
2132 	return 0;
2133 }
2134 
2135 int
2136 iwm_nic_rx_legacy_init(struct iwm_softc *sc)
2137 {
2138 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
2139 
2140 	iwm_disable_rx_dma(sc);
2141 
2142 	if (!iwm_nic_lock(sc))
2143 		return EBUSY;
2144 
2145 	/* reset and flush pointers */
2146 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
2147 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
2148 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
2149 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
2150 
2151 	/* Set physical address of RX ring (256-byte aligned). */
2152 	IWM_WRITE(sc,
2153 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.free_desc_dma.paddr >> 8);
2154 
2155 	/* Set physical address of RX status (16-byte aligned). */
2156 	IWM_WRITE(sc,
2157 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
2158 
2159 	/* Enable RX. */
2160 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
2161 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
2162 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
2163 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
2164 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
2165 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
2166 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
2167 
2168 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
2169 
2170 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
2171 	if (sc->host_interrupt_operation_mode)
2172 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
2173 
2174 	iwm_nic_unlock(sc);
2175 
2176 	/*
2177 	 * This value should initially be 0 (before preparing any RBs),
2178 	 * and should be 8 after preparing the first 8 RBs (for example).
2179 	 */
2180 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
2181 
2182 	return 0;
2183 }
2184 
2185 int
2186 iwm_nic_tx_init(struct iwm_softc *sc)
2187 {
2188 	int qid;
2189 
2190 	if (!iwm_nic_lock(sc))
2191 		return EBUSY;
2192 
2193 	/* Deactivate TX scheduler. */
2194 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
2195 
2196 	/* Set physical address of "keep warm" page (16-byte aligned). */
2197 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
2198 
2199 	for (qid = 0; qid < nitems(sc->txq); qid++) {
2200 		struct iwm_tx_ring *txq = &sc->txq[qid];
2201 
2202 		/* Set physical address of TX ring (256-byte aligned). */
2203 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
2204 		    txq->desc_dma.paddr >> 8);
2205 	}
2206 
2207 	iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL,
2208 	    IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE |
2209 	    IWM_SCD_GP_CTRL_ENABLE_31_QUEUES);
2210 
2211 	iwm_nic_unlock(sc);
2212 
2213 	return 0;
2214 }
2215 
2216 int
2217 iwm_nic_init(struct iwm_softc *sc)
2218 {
2219 	int err;
2220 
2221 	iwm_apm_init(sc);
2222 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2223 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
2224 		    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
2225 		    ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
2226 
2227 	iwm_nic_config(sc);
2228 
2229 	err = iwm_nic_rx_init(sc);
2230 	if (err)
2231 		return err;
2232 
2233 	err = iwm_nic_tx_init(sc);
2234 	if (err)
2235 		return err;
2236 
2237 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2238 
2239 	return 0;
2240 }
2241 
2242 /* Map a TID to an ieee80211_edca_ac category. */
2243 const uint8_t iwm_tid_to_ac[IWM_MAX_TID_COUNT] = {
2244 	EDCA_AC_BE,
2245 	EDCA_AC_BK,
2246 	EDCA_AC_BK,
2247 	EDCA_AC_BE,
2248 	EDCA_AC_VI,
2249 	EDCA_AC_VI,
2250 	EDCA_AC_VO,
2251 	EDCA_AC_VO,
2252 };
2253 
2254 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2255 const uint8_t iwm_ac_to_tx_fifo[] = {
2256 	IWM_TX_FIFO_BE,
2257 	IWM_TX_FIFO_BK,
2258 	IWM_TX_FIFO_VI,
2259 	IWM_TX_FIFO_VO,
2260 };
2261 
2262 int
2263 iwm_enable_ac_txq(struct iwm_softc *sc, int qid, int fifo)
2264 {
2265 	iwm_nic_assert_locked(sc);
2266 
2267 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
2268 
2269 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2270 	    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
2271 	    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
2272 
2273 	iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
2274 
2275 	iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
2276 
2277 	iwm_write_mem32(sc,
2278 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
2279 
2280 	/* Set scheduler window size and frame limit. */
2281 	iwm_write_mem32(sc,
2282 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
2283 	    sizeof(uint32_t),
2284 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
2285 	    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
2286 	    ((IWM_FRAME_LIMIT
2287 		<< IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2288 	    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
2289 
2290 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2291 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2292 	    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
2293 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
2294 	    IWM_SCD_QUEUE_STTS_REG_MSK);
2295 
2296 	if (qid == sc->cmdqid)
2297 		iwm_write_prph(sc, IWM_SCD_EN_CTRL,
2298 		    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | (1 << qid));
2299 
2300 	return 0;
2301 }
2302 
2303 int
2304 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo,
2305     int aggregate, uint8_t tid, uint16_t ssn)
2306 {
2307 	struct iwm_tx_ring *ring = &sc->txq[qid];
2308 	struct iwm_scd_txq_cfg_cmd cmd;
2309 	int err, idx, scd_bug;
2310 
2311 	iwm_nic_assert_locked(sc);
2312 
2313 	/*
2314 	 * If we need to move the SCD write pointer by steps of
2315 	 * 0x40, 0x80 or 0xc0, it gets stuck.
2316 	 * This is really ugly, but this is the easiest way out for
2317 	 * this sad hardware issue.
2318 	 * This bug has been fixed on devices 9000 and up.
2319 	 */
2320 	scd_bug = !sc->sc_mqrx_supported &&
2321 		!((ssn - ring->cur) & 0x3f) &&
2322 		(ssn != ring->cur);
2323 	if (scd_bug)
2324 		ssn = (ssn + 1) & 0xfff;
2325 
2326 	idx = IWM_AGG_SSN_TO_TXQ_IDX(ssn);
2327 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | idx);
2328 	ring->cur = idx;
2329 	ring->tail = idx;
2330 
2331 	memset(&cmd, 0, sizeof(cmd));
2332 	cmd.tid = tid;
2333 	cmd.scd_queue = qid;
2334 	cmd.enable = 1;
2335 	cmd.sta_id = sta_id;
2336 	cmd.tx_fifo = fifo;
2337 	cmd.aggregate = aggregate;
2338 	cmd.ssn = htole16(ssn);
2339 	cmd.window = IWM_FRAME_LIMIT;
2340 
2341 	err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0,
2342 	    sizeof(cmd), &cmd);
2343 	if (err)
2344 		return err;
2345 
2346 	sc->qenablemsk |= (1 << qid);
2347 	return 0;
2348 }
2349 
2350 int
2351 iwm_post_alive(struct iwm_softc *sc)
2352 {
2353 	int nwords;
2354 	int err, chnl;
2355 	uint32_t base;
2356 
2357 	if (!iwm_nic_lock(sc))
2358 		return EBUSY;
2359 
2360 	base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
2361 
2362 	iwm_ict_reset(sc);
2363 
2364 	iwm_nic_unlock(sc);
2365 
2366 	/* Clear TX scheduler state in SRAM. */
2367 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
2368 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
2369 	    / sizeof(uint32_t);
2370 	err = iwm_write_mem(sc,
2371 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
2372 	    NULL, nwords);
2373 	if (err)
2374 		return err;
2375 
2376 	if (!iwm_nic_lock(sc))
2377 		return EBUSY;
2378 
2379 	/* Set physical address of TX scheduler rings (1KB aligned). */
2380 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
2381 
2382 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
2383 
2384 	/* enable command channel */
2385 	err = iwm_enable_ac_txq(sc, sc->cmdqid, IWM_TX_FIFO_CMD);
2386 	if (err) {
2387 		iwm_nic_unlock(sc);
2388 		return err;
2389 	}
2390 
2391 	/* Activate TX scheduler. */
2392 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
2393 
2394 	/* Enable DMA channels. */
2395 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
2396 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
2397 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2398 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
2399 	}
2400 
2401 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
2402 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
2403 
2404 	iwm_nic_unlock(sc);
2405 
2406 	/* Enable L1-Active */
2407 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000)
2408 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
2409 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
2410 
2411 	return err;
2412 }
2413 
2414 struct iwm_phy_db_entry *
2415 iwm_phy_db_get_section(struct iwm_softc *sc, uint16_t type, uint16_t chg_id)
2416 {
2417 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2418 
2419 	if (type >= IWM_PHY_DB_MAX)
2420 		return NULL;
2421 
2422 	switch (type) {
2423 	case IWM_PHY_DB_CFG:
2424 		return &phy_db->cfg;
2425 	case IWM_PHY_DB_CALIB_NCH:
2426 		return &phy_db->calib_nch;
2427 	case IWM_PHY_DB_CALIB_CHG_PAPD:
2428 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
2429 			return NULL;
2430 		return &phy_db->calib_ch_group_papd[chg_id];
2431 	case IWM_PHY_DB_CALIB_CHG_TXP:
2432 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
2433 			return NULL;
2434 		return &phy_db->calib_ch_group_txp[chg_id];
2435 	default:
2436 		return NULL;
2437 	}
2438 	return NULL;
2439 }
2440 
2441 int
2442 iwm_phy_db_set_section(struct iwm_softc *sc,
2443     struct iwm_calib_res_notif_phy_db *phy_db_notif)
2444 {
2445 	uint16_t type = le16toh(phy_db_notif->type);
2446 	uint16_t size  = le16toh(phy_db_notif->length);
2447 	struct iwm_phy_db_entry *entry;
2448 	uint16_t chg_id = 0;
2449 
2450 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2451 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
2452 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2453 
2454 	entry = iwm_phy_db_get_section(sc, type, chg_id);
2455 	if (!entry)
2456 		return EINVAL;
2457 
2458 	if (entry->data)
2459 		free(entry->data, M_DEVBUF, entry->size);
2460 	entry->data = malloc(size, M_DEVBUF, M_NOWAIT);
2461 	if (!entry->data) {
2462 		entry->size = 0;
2463 		return ENOMEM;
2464 	}
2465 	memcpy(entry->data, phy_db_notif->data, size);
2466 	entry->size = size;
2467 
2468 	return 0;
2469 }
2470 
2471 int
2472 iwm_is_valid_channel(uint16_t ch_id)
2473 {
2474 	if (ch_id <= 14 ||
2475 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2476 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2477 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2478 		return 1;
2479 	return 0;
2480 }
2481 
2482 uint8_t
2483 iwm_ch_id_to_ch_index(uint16_t ch_id)
2484 {
2485 	if (!iwm_is_valid_channel(ch_id))
2486 		return 0xff;
2487 
2488 	if (ch_id <= 14)
2489 		return ch_id - 1;
2490 	if (ch_id <= 64)
2491 		return (ch_id + 20) / 4;
2492 	if (ch_id <= 140)
2493 		return (ch_id - 12) / 4;
2494 	return (ch_id - 13) / 4;
2495 }
2496 
2497 
2498 uint16_t
2499 iwm_channel_id_to_papd(uint16_t ch_id)
2500 {
2501 	if (!iwm_is_valid_channel(ch_id))
2502 		return 0xff;
2503 
2504 	if (1 <= ch_id && ch_id <= 14)
2505 		return 0;
2506 	if (36 <= ch_id && ch_id <= 64)
2507 		return 1;
2508 	if (100 <= ch_id && ch_id <= 140)
2509 		return 2;
2510 	return 3;
2511 }
2512 
2513 uint16_t
2514 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2515 {
2516 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2517 	struct iwm_phy_db_chg_txp *txp_chg;
2518 	int i;
2519 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2520 
2521 	if (ch_index == 0xff)
2522 		return 0xff;
2523 
2524 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2525 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2526 		if (!txp_chg)
2527 			return 0xff;
2528 		/*
2529 		 * Looking for the first channel group the max channel
2530 		 * of which is higher than the requested channel.
2531 		 */
2532 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2533 			return i;
2534 	}
2535 	return 0xff;
2536 }
2537 
2538 int
2539 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2540     uint16_t *size, uint16_t ch_id)
2541 {
2542 	struct iwm_phy_db_entry *entry;
2543 	uint16_t ch_group_id = 0;
2544 
2545 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2546 		ch_group_id = iwm_channel_id_to_papd(ch_id);
2547 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2548 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2549 
2550 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2551 	if (!entry)
2552 		return EINVAL;
2553 
2554 	*data = entry->data;
2555 	*size = entry->size;
2556 
2557 	return 0;
2558 }
2559 
2560 int
2561 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2562     void *data)
2563 {
2564 	struct iwm_phy_db_cmd phy_db_cmd;
2565 	struct iwm_host_cmd cmd = {
2566 		.id = IWM_PHY_DB_CMD,
2567 		.flags = IWM_CMD_ASYNC,
2568 	};
2569 
2570 	phy_db_cmd.type = le16toh(type);
2571 	phy_db_cmd.length = le16toh(length);
2572 
2573 	cmd.data[0] = &phy_db_cmd;
2574 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2575 	cmd.data[1] = data;
2576 	cmd.len[1] = length;
2577 
2578 	return iwm_send_cmd(sc, &cmd);
2579 }
2580 
2581 int
2582 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc, uint16_t type,
2583     uint8_t max_ch_groups)
2584 {
2585 	uint16_t i;
2586 	int err;
2587 	struct iwm_phy_db_entry *entry;
2588 
2589 	for (i = 0; i < max_ch_groups; i++) {
2590 		entry = iwm_phy_db_get_section(sc, type, i);
2591 		if (!entry)
2592 			return EINVAL;
2593 
2594 		if (!entry->size)
2595 			continue;
2596 
2597 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2598 		if (err)
2599 			return err;
2600 
2601 		DELAY(1000);
2602 	}
2603 
2604 	return 0;
2605 }
2606 
2607 int
2608 iwm_send_phy_db_data(struct iwm_softc *sc)
2609 {
2610 	uint8_t *data = NULL;
2611 	uint16_t size = 0;
2612 	int err;
2613 
2614 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2615 	if (err)
2616 		return err;
2617 
2618 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2619 	if (err)
2620 		return err;
2621 
2622 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2623 	    &data, &size, 0);
2624 	if (err)
2625 		return err;
2626 
2627 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2628 	if (err)
2629 		return err;
2630 
2631 	err = iwm_phy_db_send_all_channel_groups(sc,
2632 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2633 	if (err)
2634 		return err;
2635 
2636 	err = iwm_phy_db_send_all_channel_groups(sc,
2637 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2638 	if (err)
2639 		return err;
2640 
2641 	return 0;
2642 }
2643 
2644 /*
2645  * For the high priority TE use a time event type that has similar priority to
2646  * the FW's action scan priority.
2647  */
2648 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2649 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2650 
2651 int
2652 iwm_send_time_event_cmd(struct iwm_softc *sc,
2653     const struct iwm_time_event_cmd *cmd)
2654 {
2655 	struct iwm_rx_packet *pkt;
2656 	struct iwm_time_event_resp *resp;
2657 	struct iwm_host_cmd hcmd = {
2658 		.id = IWM_TIME_EVENT_CMD,
2659 		.flags = IWM_CMD_WANT_RESP,
2660 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2661 	};
2662 	uint32_t resp_len;
2663 	int err;
2664 
2665 	hcmd.data[0] = cmd;
2666 	hcmd.len[0] = sizeof(*cmd);
2667 	err = iwm_send_cmd(sc, &hcmd);
2668 	if (err)
2669 		return err;
2670 
2671 	pkt = hcmd.resp_pkt;
2672 	if (!pkt || (pkt->hdr.flags & IWM_CMD_FAILED_MSK)) {
2673 		err = EIO;
2674 		goto out;
2675 	}
2676 
2677 	resp_len = iwm_rx_packet_payload_len(pkt);
2678 	if (resp_len != sizeof(*resp)) {
2679 		err = EIO;
2680 		goto out;
2681 	}
2682 
2683 	resp = (void *)pkt->data;
2684 	if (le32toh(resp->status) == 0)
2685 		sc->sc_time_event_uid = le32toh(resp->unique_id);
2686 	else
2687 		err = EIO;
2688 out:
2689 	iwm_free_resp(sc, &hcmd);
2690 	return err;
2691 }
2692 
2693 void
2694 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2695     uint32_t duration, uint32_t max_delay)
2696 {
2697 	struct iwm_time_event_cmd time_cmd;
2698 
2699 	/* Do nothing if a time event is already scheduled. */
2700 	if (sc->sc_flags & IWM_FLAG_TE_ACTIVE)
2701 		return;
2702 
2703 	memset(&time_cmd, 0, sizeof(time_cmd));
2704 
2705 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2706 	time_cmd.id_and_color =
2707 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2708 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2709 
2710 	time_cmd.apply_time = htole32(0);
2711 
2712 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2713 	time_cmd.max_delay = htole32(max_delay);
2714 	/* TODO: why do we need to interval = bi if it is not periodic? */
2715 	time_cmd.interval = htole32(1);
2716 	time_cmd.duration = htole32(duration);
2717 	time_cmd.repeat = 1;
2718 	time_cmd.policy
2719 	    = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2720 	        IWM_TE_V2_NOTIF_HOST_EVENT_END |
2721 		IWM_T2_V2_START_IMMEDIATELY);
2722 
2723 	if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2724 		sc->sc_flags |= IWM_FLAG_TE_ACTIVE;
2725 
2726 	DELAY(100);
2727 }
2728 
2729 void
2730 iwm_unprotect_session(struct iwm_softc *sc, struct iwm_node *in)
2731 {
2732 	struct iwm_time_event_cmd time_cmd;
2733 
2734 	/* Do nothing if the time event has already ended. */
2735 	if ((sc->sc_flags & IWM_FLAG_TE_ACTIVE) == 0)
2736 		return;
2737 
2738 	memset(&time_cmd, 0, sizeof(time_cmd));
2739 
2740 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE);
2741 	time_cmd.id_and_color =
2742 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2743 	time_cmd.id = htole32(sc->sc_time_event_uid);
2744 
2745 	if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2746 		sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
2747 
2748 	DELAY(100);
2749 }
2750 
2751 /*
2752  * NVM read access and content parsing.  We do not support
2753  * external NVM or writing NVM.
2754  */
2755 
2756 /* list of NVM sections we are allowed/need to read */
2757 const int iwm_nvm_to_read[] = {
2758 	IWM_NVM_SECTION_TYPE_HW,
2759 	IWM_NVM_SECTION_TYPE_SW,
2760 	IWM_NVM_SECTION_TYPE_REGULATORY,
2761 	IWM_NVM_SECTION_TYPE_CALIBRATION,
2762 	IWM_NVM_SECTION_TYPE_PRODUCTION,
2763 	IWM_NVM_SECTION_TYPE_REGULATORY_SDP,
2764 	IWM_NVM_SECTION_TYPE_HW_8000,
2765 	IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
2766 	IWM_NVM_SECTION_TYPE_PHY_SKU,
2767 };
2768 
2769 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
2770 
2771 #define IWM_NVM_WRITE_OPCODE 1
2772 #define IWM_NVM_READ_OPCODE 0
2773 
2774 int
2775 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2776     uint16_t length, uint8_t *data, uint16_t *len)
2777 {
2778 	offset = 0;
2779 	struct iwm_nvm_access_cmd nvm_access_cmd = {
2780 		.offset = htole16(offset),
2781 		.length = htole16(length),
2782 		.type = htole16(section),
2783 		.op_code = IWM_NVM_READ_OPCODE,
2784 	};
2785 	struct iwm_nvm_access_resp *nvm_resp;
2786 	struct iwm_rx_packet *pkt;
2787 	struct iwm_host_cmd cmd = {
2788 		.id = IWM_NVM_ACCESS_CMD,
2789 		.flags = (IWM_CMD_WANT_RESP | IWM_CMD_SEND_IN_RFKILL),
2790 		.resp_pkt_len = IWM_CMD_RESP_MAX,
2791 		.data = { &nvm_access_cmd, },
2792 	};
2793 	int err, offset_read;
2794 	size_t bytes_read;
2795 	uint8_t *resp_data;
2796 
2797 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2798 
2799 	err = iwm_send_cmd(sc, &cmd);
2800 	if (err)
2801 		return err;
2802 
2803 	pkt = cmd.resp_pkt;
2804 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2805 		err = EIO;
2806 		goto exit;
2807 	}
2808 
2809 	/* Extract NVM response */
2810 	nvm_resp = (void *)pkt->data;
2811 	if (nvm_resp == NULL)
2812 		return EIO;
2813 
2814 	err = le16toh(nvm_resp->status);
2815 	bytes_read = le16toh(nvm_resp->length);
2816 	offset_read = le16toh(nvm_resp->offset);
2817 	resp_data = nvm_resp->data;
2818 	if (err) {
2819 		err = EINVAL;
2820 		goto exit;
2821 	}
2822 
2823 	if (offset_read != offset) {
2824 		err = EINVAL;
2825 		goto exit;
2826 	}
2827 
2828 	if (bytes_read > length) {
2829 		err = EINVAL;
2830 		goto exit;
2831 	}
2832 
2833 	memcpy(data + offset, resp_data, bytes_read);
2834 	*len = bytes_read;
2835 
2836  exit:
2837 	iwm_free_resp(sc, &cmd);
2838 	return err;
2839 }
2840 
2841 /*
2842  * Reads an NVM section completely.
2843  * NICs prior to 7000 family doesn't have a real NVM, but just read
2844  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2845  * by uCode, we need to manually check in this case that we don't
2846  * overflow and try to read more than the EEPROM size.
2847  */
2848 int
2849 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
2850     uint16_t *len, size_t max_len)
2851 {
2852 	uint16_t chunklen, seglen;
2853 	int err = 0;
2854 
2855 	chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2856 	*len = 0;
2857 
2858 	/* Read NVM chunks until exhausted (reading less than requested) */
2859 	while (seglen == chunklen && *len < max_len) {
2860 		err = iwm_nvm_read_chunk(sc,
2861 		    section, *len, chunklen, data, &seglen);
2862 		if (err)
2863 			return err;
2864 
2865 		*len += seglen;
2866 	}
2867 
2868 	return err;
2869 }
2870 
2871 uint8_t
2872 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
2873 {
2874 	uint8_t tx_ant;
2875 
2876 	tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
2877 	    >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
2878 
2879 	if (sc->sc_nvm.valid_tx_ant)
2880 		tx_ant &= sc->sc_nvm.valid_tx_ant;
2881 
2882 	return tx_ant;
2883 }
2884 
2885 uint8_t
2886 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
2887 {
2888 	uint8_t rx_ant;
2889 
2890 	rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
2891 	    >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
2892 
2893 	if (sc->sc_nvm.valid_rx_ant)
2894 		rx_ant &= sc->sc_nvm.valid_rx_ant;
2895 
2896 	return rx_ant;
2897 }
2898 
2899 void
2900 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
2901     const uint8_t *nvm_channels, int nchan)
2902 {
2903 	struct ieee80211com *ic = &sc->sc_ic;
2904 	struct iwm_nvm_data *data = &sc->sc_nvm;
2905 	int ch_idx;
2906 	struct ieee80211_channel *channel;
2907 	uint16_t ch_flags;
2908 	int is_5ghz;
2909 	int flags, hw_value;
2910 
2911 	for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
2912 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2913 
2914 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
2915 		    !data->sku_cap_band_52GHz_enable)
2916 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
2917 
2918 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID))
2919 			continue;
2920 
2921 		hw_value = nvm_channels[ch_idx];
2922 		channel = &ic->ic_channels[hw_value];
2923 
2924 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
2925 		if (!is_5ghz) {
2926 			flags = IEEE80211_CHAN_2GHZ;
2927 			channel->ic_flags
2928 			    = IEEE80211_CHAN_CCK
2929 			    | IEEE80211_CHAN_OFDM
2930 			    | IEEE80211_CHAN_DYN
2931 			    | IEEE80211_CHAN_2GHZ;
2932 		} else {
2933 			flags = IEEE80211_CHAN_5GHZ;
2934 			channel->ic_flags =
2935 			    IEEE80211_CHAN_A;
2936 		}
2937 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2938 
2939 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
2940 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2941 
2942 		if (data->sku_cap_11n_enable)
2943 			channel->ic_flags |= IEEE80211_CHAN_HT;
2944 	}
2945 }
2946 
2947 int
2948 iwm_mimo_enabled(struct iwm_softc *sc)
2949 {
2950 	struct ieee80211com *ic = &sc->sc_ic;
2951 
2952 	return !sc->sc_nvm.sku_cap_mimo_disable &&
2953 	    (ic->ic_userflags & IEEE80211_F_NOMIMO) == 0;
2954 }
2955 
2956 void
2957 iwm_setup_ht_rates(struct iwm_softc *sc)
2958 {
2959 	struct ieee80211com *ic = &sc->sc_ic;
2960 	uint8_t rx_ant;
2961 
2962 	/* TX is supported with the same MCS as RX. */
2963 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2964 
2965 	memset(ic->ic_sup_mcs, 0, sizeof(ic->ic_sup_mcs));
2966 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
2967 
2968 	if (!iwm_mimo_enabled(sc))
2969 		return;
2970 
2971 	rx_ant = iwm_fw_valid_rx_ant(sc);
2972 	if ((rx_ant & IWM_ANT_AB) == IWM_ANT_AB ||
2973 	    (rx_ant & IWM_ANT_BC) == IWM_ANT_BC)
2974 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
2975 }
2976 
2977 void
2978 iwm_init_reorder_buffer(struct iwm_reorder_buffer *reorder_buf,
2979     uint16_t ssn, uint16_t buf_size)
2980 {
2981 	reorder_buf->head_sn = ssn;
2982 	reorder_buf->num_stored = 0;
2983 	reorder_buf->buf_size = buf_size;
2984 	reorder_buf->last_amsdu = 0;
2985 	reorder_buf->last_sub_index = 0;
2986 	reorder_buf->removed = 0;
2987 	reorder_buf->valid = 0;
2988 	reorder_buf->consec_oldsn_drops = 0;
2989 	reorder_buf->consec_oldsn_ampdu_gp2 = 0;
2990 	reorder_buf->consec_oldsn_prev_drop = 0;
2991 }
2992 
2993 void
2994 iwm_clear_reorder_buffer(struct iwm_softc *sc, struct iwm_rxba_data *rxba)
2995 {
2996 	int i;
2997 	struct iwm_reorder_buffer *reorder_buf = &rxba->reorder_buf;
2998 	struct iwm_reorder_buf_entry *entry;
2999 
3000 	for (i = 0; i < reorder_buf->buf_size; i++) {
3001 		entry = &rxba->entries[i];
3002 		ml_purge(&entry->frames);
3003 		timerclear(&entry->reorder_time);
3004 	}
3005 
3006 	reorder_buf->removed = 1;
3007 	timeout_del(&reorder_buf->reorder_timer);
3008 	timerclear(&rxba->last_rx);
3009 	timeout_del(&rxba->session_timer);
3010 	rxba->baid = IWM_RX_REORDER_DATA_INVALID_BAID;
3011 }
3012 
3013 #define RX_REORDER_BUF_TIMEOUT_MQ_USEC (100000ULL)
3014 
3015 void
3016 iwm_rx_ba_session_expired(void *arg)
3017 {
3018 	struct iwm_rxba_data *rxba = arg;
3019 	struct iwm_softc *sc = rxba->sc;
3020 	struct ieee80211com *ic = &sc->sc_ic;
3021 	struct ieee80211_node *ni = ic->ic_bss;
3022 	struct timeval now, timeout, expiry;
3023 	int s;
3024 
3025 	s = splnet();
3026 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0 &&
3027 	    ic->ic_state == IEEE80211_S_RUN &&
3028 	    rxba->baid != IWM_RX_REORDER_DATA_INVALID_BAID) {
3029 		getmicrouptime(&now);
3030 		USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
3031 		timeradd(&rxba->last_rx, &timeout, &expiry);
3032 		if (timercmp(&now, &expiry, <)) {
3033 			timeout_add_usec(&rxba->session_timer, rxba->timeout);
3034 		} else {
3035 			ic->ic_stats.is_ht_rx_ba_timeout++;
3036 			ieee80211_delba_request(ic, ni,
3037 			    IEEE80211_REASON_TIMEOUT, 0, rxba->tid);
3038 		}
3039 	}
3040 	splx(s);
3041 }
3042 
3043 void
3044 iwm_reorder_timer_expired(void *arg)
3045 {
3046 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
3047 	struct iwm_reorder_buffer *buf = arg;
3048 	struct iwm_rxba_data *rxba = iwm_rxba_data_from_reorder_buf(buf);
3049 	struct iwm_reorder_buf_entry *entries = &rxba->entries[0];
3050 	struct iwm_softc *sc = rxba->sc;
3051 	struct ieee80211com *ic = &sc->sc_ic;
3052 	struct ieee80211_node *ni = ic->ic_bss;
3053 	int i, s;
3054 	uint16_t sn = 0, index = 0;
3055 	int expired = 0;
3056 	int cont = 0;
3057 	struct timeval now, timeout, expiry;
3058 
3059 	if (!buf->num_stored || buf->removed)
3060 		return;
3061 
3062 	s = splnet();
3063 	getmicrouptime(&now);
3064 	USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
3065 
3066 	for (i = 0; i < buf->buf_size ; i++) {
3067 		index = (buf->head_sn + i) % buf->buf_size;
3068 
3069 		if (ml_empty(&entries[index].frames)) {
3070 			/*
3071 			 * If there is a hole and the next frame didn't expire
3072 			 * we want to break and not advance SN.
3073 			 */
3074 			cont = 0;
3075 			continue;
3076 		}
3077 		timeradd(&entries[index].reorder_time, &timeout, &expiry);
3078 		if (!cont && timercmp(&now, &expiry, <))
3079 			break;
3080 
3081 		expired = 1;
3082 		/* continue until next hole after this expired frame */
3083 		cont = 1;
3084 		sn = (buf->head_sn + (i + 1)) & 0xfff;
3085 	}
3086 
3087 	if (expired) {
3088 		/* SN is set to the last expired frame + 1 */
3089 		iwm_release_frames(sc, ni, rxba, buf, sn, &ml);
3090 		if_input(&sc->sc_ic.ic_if, &ml);
3091 		ic->ic_stats.is_ht_rx_ba_window_gap_timeout++;
3092 	} else {
3093 		/*
3094 		 * If no frame expired and there are stored frames, index is now
3095 		 * pointing to the first unexpired frame - modify reorder timeout
3096 		 * accordingly.
3097 		 */
3098 		timeout_add_usec(&buf->reorder_timer,
3099 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
3100 	}
3101 
3102 	splx(s);
3103 }
3104 
3105 #define IWM_MAX_RX_BA_SESSIONS 16
3106 
3107 int
3108 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3109     uint16_t ssn, uint16_t winsize, int timeout_val, int start)
3110 {
3111 	struct ieee80211com *ic = &sc->sc_ic;
3112 	struct iwm_add_sta_cmd cmd;
3113 	struct iwm_node *in = (void *)ni;
3114 	int err, s;
3115 	uint32_t status;
3116 	size_t cmdsize;
3117 	struct iwm_rxba_data *rxba = NULL;
3118 	uint8_t baid = 0;
3119 
3120 	s = splnet();
3121 
3122 	if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
3123 		ieee80211_addba_req_refuse(ic, ni, tid);
3124 		splx(s);
3125 		return 0;
3126 	}
3127 
3128 	memset(&cmd, 0, sizeof(cmd));
3129 
3130 	cmd.sta_id = IWM_STATION_ID;
3131 	cmd.mac_id_n_color
3132 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3133 	cmd.add_modify = IWM_STA_MODE_MODIFY;
3134 
3135 	if (start) {
3136 		cmd.add_immediate_ba_tid = (uint8_t)tid;
3137 		cmd.add_immediate_ba_ssn = ssn;
3138 		cmd.rx_ba_window = winsize;
3139 	} else {
3140 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
3141 	}
3142 	cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
3143 	    IWM_STA_MODIFY_REMOVE_BA_TID;
3144 
3145 	status = IWM_ADD_STA_SUCCESS;
3146 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
3147 		cmdsize = sizeof(cmd);
3148 	else
3149 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
3150 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd,
3151 	    &status);
3152 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
3153 		err = EIO;
3154 	if (err) {
3155 		if (start)
3156 			ieee80211_addba_req_refuse(ic, ni, tid);
3157 		splx(s);
3158 		return err;
3159 	}
3160 
3161 	if (sc->sc_mqrx_supported) {
3162 		/* Deaggregation is done in hardware. */
3163 		if (start) {
3164 			if (!(status & IWM_ADD_STA_BAID_VALID_MASK)) {
3165 				ieee80211_addba_req_refuse(ic, ni, tid);
3166 				splx(s);
3167 				return EIO;
3168 			}
3169 			baid = (status & IWM_ADD_STA_BAID_MASK) >>
3170 			    IWM_ADD_STA_BAID_SHIFT;
3171 			if (baid == IWM_RX_REORDER_DATA_INVALID_BAID ||
3172 			    baid >= nitems(sc->sc_rxba_data)) {
3173 				ieee80211_addba_req_refuse(ic, ni, tid);
3174 				splx(s);
3175 				return EIO;
3176 			}
3177 			rxba = &sc->sc_rxba_data[baid];
3178 			if (rxba->baid != IWM_RX_REORDER_DATA_INVALID_BAID) {
3179 				ieee80211_addba_req_refuse(ic, ni, tid);
3180 				splx(s);
3181 				return 0;
3182 			}
3183 			rxba->sta_id = IWM_STATION_ID;
3184 			rxba->tid = tid;
3185 			rxba->baid = baid;
3186 			rxba->timeout = timeout_val;
3187 			getmicrouptime(&rxba->last_rx);
3188 			iwm_init_reorder_buffer(&rxba->reorder_buf, ssn,
3189 			    winsize);
3190 			if (timeout_val != 0) {
3191 				struct ieee80211_rx_ba *ba;
3192 				timeout_add_usec(&rxba->session_timer,
3193 				    timeout_val);
3194 				/* XXX disable net80211's BA timeout handler */
3195 				ba = &ni->ni_rx_ba[tid];
3196 				ba->ba_timeout_val = 0;
3197 			}
3198 		} else {
3199 			int i;
3200 			for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
3201 				rxba = &sc->sc_rxba_data[i];
3202 				if (rxba->baid ==
3203 				    IWM_RX_REORDER_DATA_INVALID_BAID)
3204 					continue;
3205 				if (rxba->tid != tid)
3206 					continue;
3207 				iwm_clear_reorder_buffer(sc, rxba);
3208 				break;
3209 			}
3210 		}
3211 	}
3212 
3213 	if (start) {
3214 		sc->sc_rx_ba_sessions++;
3215 		ieee80211_addba_req_accept(ic, ni, tid);
3216 	} else if (sc->sc_rx_ba_sessions > 0)
3217 		sc->sc_rx_ba_sessions--;
3218 
3219 	splx(s);
3220 	return 0;
3221 }
3222 
3223 void
3224 iwm_mac_ctxt_task(void *arg)
3225 {
3226 	struct iwm_softc *sc = arg;
3227 	struct ieee80211com *ic = &sc->sc_ic;
3228 	struct iwm_node *in = (void *)ic->ic_bss;
3229 	int err, s = splnet();
3230 
3231 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
3232 		refcnt_rele_wake(&sc->task_refs);
3233 		splx(s);
3234 		return;
3235 	}
3236 
3237 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
3238 	if (err)
3239 		printf("%s: failed to update MAC\n", DEVNAME(sc));
3240 
3241 	refcnt_rele_wake(&sc->task_refs);
3242 	splx(s);
3243 }
3244 
3245 void
3246 iwm_updateprot(struct ieee80211com *ic)
3247 {
3248 	struct iwm_softc *sc = ic->ic_softc;
3249 
3250 	if (ic->ic_state == IEEE80211_S_RUN)
3251 		iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3252 }
3253 
3254 void
3255 iwm_updateslot(struct ieee80211com *ic)
3256 {
3257 	struct iwm_softc *sc = ic->ic_softc;
3258 
3259 	if (ic->ic_state == IEEE80211_S_RUN)
3260 		iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3261 }
3262 
3263 void
3264 iwm_updateedca(struct ieee80211com *ic)
3265 {
3266 	struct iwm_softc *sc = ic->ic_softc;
3267 
3268 	if (ic->ic_state == IEEE80211_S_RUN)
3269 		iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3270 }
3271 
3272 int
3273 iwm_sta_tx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3274     uint16_t ssn, uint16_t winsize, int start)
3275 {
3276 	struct iwm_add_sta_cmd cmd;
3277 	struct ieee80211com *ic = &sc->sc_ic;
3278 	struct iwm_node *in = (void *)ni;
3279 	int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
3280 	struct iwm_tx_ring *ring;
3281 	struct ieee80211_tx_ba *ba;
3282 	enum ieee80211_edca_ac ac;
3283 	int fifo;
3284 	uint32_t status;
3285 	int err;
3286 	size_t cmdsize;
3287 
3288 	/* Ensure we can map this TID to an aggregation queue. */
3289 	if (tid >= IWM_MAX_TID_COUNT || qid > IWM_LAST_AGG_TX_QUEUE)
3290 		return ENOSPC;
3291 
3292 	if (start) {
3293 		if ((sc->tx_ba_queue_mask & (1 << qid)) != 0)
3294 			return 0;
3295 	} else {
3296 		if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
3297 			return 0;
3298 	}
3299 
3300 	ring = &sc->txq[qid];
3301 	ba = &ni->ni_tx_ba[tid];
3302 	ac = iwm_tid_to_ac[tid];
3303 	fifo = iwm_ac_to_tx_fifo[ac];
3304 
3305 	memset(&cmd, 0, sizeof(cmd));
3306 
3307 	cmd.sta_id = IWM_STATION_ID;
3308 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
3309 	    in->in_color));
3310 	cmd.add_modify = IWM_STA_MODE_MODIFY;
3311 
3312 	if (start) {
3313 		/* Enable Tx aggregation for this queue. */
3314 		in->tid_disable_ampdu &= ~(1 << tid);
3315 		in->tfd_queue_msk |= (1 << qid);
3316 	} else {
3317 		in->tid_disable_ampdu |= (1 << tid);
3318 		/* Queue remains enabled in the TFD queue mask. */
3319 		err = iwm_flush_sta(sc, in);
3320 		if (err)
3321 			return err;
3322 	}
3323 
3324 	cmd.tfd_queue_msk |= htole32(in->tfd_queue_msk);
3325 	cmd.tid_disable_tx = htole16(in->tid_disable_ampdu);
3326 	cmd.modify_mask = (IWM_STA_MODIFY_QUEUES |
3327 	    IWM_STA_MODIFY_TID_DISABLE_TX);
3328 
3329 	if (start && (sc->qenablemsk & (1 << qid)) == 0) {
3330 		if (!iwm_nic_lock(sc)) {
3331 			if (start)
3332 				ieee80211_addba_resp_refuse(ic, ni, tid,
3333 				    IEEE80211_STATUS_UNSPECIFIED);
3334 			return EBUSY;
3335 		}
3336 		err = iwm_enable_txq(sc, IWM_STATION_ID, qid, fifo, 1, tid,
3337 		    ssn);
3338 		iwm_nic_unlock(sc);
3339 		if (err) {
3340 			printf("%s: could not enable Tx queue %d (error %d)\n",
3341 			    DEVNAME(sc), qid, err);
3342 			if (start)
3343 				ieee80211_addba_resp_refuse(ic, ni, tid,
3344 				    IEEE80211_STATUS_UNSPECIFIED);
3345 			return err;
3346 		}
3347 		/*
3348 		 * If iwm_enable_txq() employed the SCD hardware bug
3349 		 * workaround we must skip the frame with seqnum SSN.
3350 		 */
3351 		if (ring->cur != IWM_AGG_SSN_TO_TXQ_IDX(ssn)) {
3352 			ssn = (ssn + 1) & 0xfff;
3353 			KASSERT(ring->cur == IWM_AGG_SSN_TO_TXQ_IDX(ssn));
3354 			ieee80211_output_ba_move_window(ic, ni, tid, ssn);
3355 			ni->ni_qos_txseqs[tid] = ssn;
3356 		}
3357 	}
3358 
3359 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
3360 		cmdsize = sizeof(cmd);
3361 	else
3362 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
3363 
3364 	status = 0;
3365 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd, &status);
3366 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
3367 		err = EIO;
3368 	if (err) {
3369 		printf("%s: could not update sta (error %d)\n",
3370 		    DEVNAME(sc), err);
3371 		if (start)
3372 			ieee80211_addba_resp_refuse(ic, ni, tid,
3373 			    IEEE80211_STATUS_UNSPECIFIED);
3374 		return err;
3375 	}
3376 
3377 	if (start) {
3378 		sc->tx_ba_queue_mask |= (1 << qid);
3379 		ieee80211_addba_resp_accept(ic, ni, tid);
3380 	} else {
3381 		sc->tx_ba_queue_mask &= ~(1 << qid);
3382 
3383 		/*
3384 		 * Clear pending frames but keep the queue enabled.
3385 		 * Firmware panics if we disable the queue here.
3386 		 */
3387 		iwm_txq_advance(sc, ring,
3388 		    IWM_AGG_SSN_TO_TXQ_IDX(ba->ba_winend));
3389 		iwm_clear_oactive(sc, ring);
3390 	}
3391 
3392 	return 0;
3393 }
3394 
3395 void
3396 iwm_ba_task(void *arg)
3397 {
3398 	struct iwm_softc *sc = arg;
3399 	struct ieee80211com *ic = &sc->sc_ic;
3400 	struct ieee80211_node *ni = ic->ic_bss;
3401 	int s = splnet();
3402 	int tid, err = 0;
3403 
3404 	for (tid = 0; tid < IWM_MAX_TID_COUNT && !err; tid++) {
3405 		if (sc->sc_flags & IWM_FLAG_SHUTDOWN)
3406 			break;
3407 		if (sc->ba_rx.start_tidmask & (1 << tid)) {
3408 			struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
3409 			err = iwm_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3410 			    ba->ba_winsize, ba->ba_timeout_val, 1);
3411 			sc->ba_rx.start_tidmask &= ~(1 << tid);
3412 		} else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3413 			err = iwm_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3414 			sc->ba_rx.stop_tidmask &= ~(1 << tid);
3415 		}
3416 	}
3417 
3418 	for (tid = 0; tid < IWM_MAX_TID_COUNT && !err; tid++) {
3419 		if (sc->sc_flags & IWM_FLAG_SHUTDOWN)
3420 			break;
3421 		if (sc->ba_tx.start_tidmask & (1 << tid)) {
3422 			struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3423 			err = iwm_sta_tx_agg(sc, ni, tid, ba->ba_winstart,
3424 			    ba->ba_winsize, 1);
3425 			sc->ba_tx.start_tidmask &= ~(1 << tid);
3426 		} else if (sc->ba_tx.stop_tidmask & (1 << tid)) {
3427 			err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
3428 			sc->ba_tx.stop_tidmask &= ~(1 << tid);
3429 		}
3430 	}
3431 
3432 	/*
3433 	 * We "recover" from failure to start or stop a BA session
3434 	 * by resetting the device.
3435 	 */
3436 	if (err && (sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
3437 		task_add(systq, &sc->init_task);
3438 
3439 	refcnt_rele_wake(&sc->task_refs);
3440 	splx(s);
3441 }
3442 
3443 /*
3444  * This function is called by upper layer when an ADDBA request is received
3445  * from another STA and before the ADDBA response is sent.
3446  */
3447 int
3448 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3449     uint8_t tid)
3450 {
3451 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3452 
3453 	if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS ||
3454 	    tid > IWM_MAX_TID_COUNT)
3455 		return ENOSPC;
3456 
3457 	if (sc->ba_rx.start_tidmask & (1 << tid))
3458 		return EBUSY;
3459 
3460 	sc->ba_rx.start_tidmask |= (1 << tid);
3461 	iwm_add_task(sc, systq, &sc->ba_task);
3462 
3463 	return EBUSY;
3464 }
3465 
3466 /*
3467  * This function is called by upper layer on teardown of an HT-immediate
3468  * Block Ack agreement (eg. upon receipt of a DELBA frame).
3469  */
3470 void
3471 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3472     uint8_t tid)
3473 {
3474 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3475 
3476 	if (tid > IWM_MAX_TID_COUNT || sc->ba_rx.stop_tidmask & (1 << tid))
3477 		return;
3478 
3479 	sc->ba_rx.stop_tidmask |= (1 << tid);
3480 	iwm_add_task(sc, systq, &sc->ba_task);
3481 }
3482 
3483 int
3484 iwm_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3485     uint8_t tid)
3486 {
3487 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3488 	struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3489 	int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
3490 
3491 	/* We only implement Tx aggregation with DQA-capable firmware. */
3492 	if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
3493 		return ENOTSUP;
3494 
3495 	/* Ensure we can map this TID to an aggregation queue. */
3496 	if (tid >= IWM_MAX_TID_COUNT)
3497 		return EINVAL;
3498 
3499 	/* We only support a fixed Tx aggregation window size, for now. */
3500 	if (ba->ba_winsize != IWM_FRAME_LIMIT)
3501 		return ENOTSUP;
3502 
3503 	/* Is firmware already using Tx aggregation on this queue? */
3504 	if ((sc->tx_ba_queue_mask & (1 << qid)) != 0)
3505 		return ENOSPC;
3506 
3507 	/* Are we already processing an ADDBA request? */
3508 	if (sc->ba_tx.start_tidmask & (1 << tid))
3509 		return EBUSY;
3510 
3511 	sc->ba_tx.start_tidmask |= (1 << tid);
3512 	iwm_add_task(sc, systq, &sc->ba_task);
3513 
3514 	return EBUSY;
3515 }
3516 
3517 void
3518 iwm_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3519     uint8_t tid)
3520 {
3521 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3522 	int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
3523 
3524 	if (tid > IWM_MAX_TID_COUNT || sc->ba_tx.stop_tidmask & (1 << tid))
3525 		return;
3526 
3527 	/* Is firmware currently using Tx aggregation on this queue? */
3528 	if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
3529 		return;
3530 
3531 	sc->ba_tx.stop_tidmask |= (1 << tid);
3532 	iwm_add_task(sc, systq, &sc->ba_task);
3533 }
3534 
3535 void
3536 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
3537     const uint16_t *mac_override, const uint16_t *nvm_hw)
3538 {
3539 	const uint8_t *hw_addr;
3540 
3541 	if (mac_override) {
3542 		static const uint8_t reserved_mac[] = {
3543 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3544 		};
3545 
3546 		hw_addr = (const uint8_t *)(mac_override +
3547 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
3548 
3549 		/*
3550 		 * Store the MAC address from MAO section.
3551 		 * No byte swapping is required in MAO section
3552 		 */
3553 		memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
3554 
3555 		/*
3556 		 * Force the use of the OTP MAC address in case of reserved MAC
3557 		 * address in the NVM, or if address is given but invalid.
3558 		 */
3559 		if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
3560 		    (memcmp(etherbroadcastaddr, data->hw_addr,
3561 		    sizeof(etherbroadcastaddr)) != 0) &&
3562 		    (memcmp(etheranyaddr, data->hw_addr,
3563 		    sizeof(etheranyaddr)) != 0) &&
3564 		    !ETHER_IS_MULTICAST(data->hw_addr))
3565 			return;
3566 	}
3567 
3568 	if (nvm_hw) {
3569 		/* Read the mac address from WFMP registers. */
3570 		uint32_t mac_addr0, mac_addr1;
3571 
3572 		if (!iwm_nic_lock(sc))
3573 			goto out;
3574 		mac_addr0 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
3575 		mac_addr1 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
3576 		iwm_nic_unlock(sc);
3577 
3578 		hw_addr = (const uint8_t *)&mac_addr0;
3579 		data->hw_addr[0] = hw_addr[3];
3580 		data->hw_addr[1] = hw_addr[2];
3581 		data->hw_addr[2] = hw_addr[1];
3582 		data->hw_addr[3] = hw_addr[0];
3583 
3584 		hw_addr = (const uint8_t *)&mac_addr1;
3585 		data->hw_addr[4] = hw_addr[1];
3586 		data->hw_addr[5] = hw_addr[0];
3587 
3588 		return;
3589 	}
3590 out:
3591 	printf("%s: mac address not found\n", DEVNAME(sc));
3592 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
3593 }
3594 
3595 int
3596 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
3597     const uint16_t *nvm_sw, const uint16_t *nvm_calib,
3598     const uint16_t *mac_override, const uint16_t *phy_sku,
3599     const uint16_t *regulatory, int n_regulatory)
3600 {
3601 	struct iwm_nvm_data *data = &sc->sc_nvm;
3602 	uint8_t hw_addr[ETHER_ADDR_LEN];
3603 	uint32_t sku;
3604 	uint16_t lar_config;
3605 
3606 	data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
3607 
3608 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3609 		uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
3610 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
3611 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
3612 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
3613 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
3614 
3615 		sku = le16_to_cpup(nvm_sw + IWM_SKU);
3616 	} else {
3617 		uint32_t radio_cfg =
3618 		    le32_to_cpup((uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
3619 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
3620 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
3621 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
3622 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
3623 		data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
3624 		data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
3625 
3626 		sku = le32_to_cpup((uint32_t *)(phy_sku + IWM_SKU_8000));
3627 	}
3628 
3629 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
3630 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
3631 	data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
3632 	data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
3633 
3634 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
3635 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
3636 				       IWM_NVM_LAR_OFFSET_8000_OLD :
3637 				       IWM_NVM_LAR_OFFSET_8000;
3638 
3639 		lar_config = le16_to_cpup(regulatory + lar_offset);
3640 		data->lar_enabled = !!(lar_config &
3641 				       IWM_NVM_LAR_ENABLED_8000);
3642 		data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS_8000);
3643 	} else
3644 		data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
3645 
3646 
3647 	/* The byte order is little endian 16 bit, meaning 214365 */
3648 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3649 		memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
3650 		data->hw_addr[0] = hw_addr[1];
3651 		data->hw_addr[1] = hw_addr[0];
3652 		data->hw_addr[2] = hw_addr[3];
3653 		data->hw_addr[3] = hw_addr[2];
3654 		data->hw_addr[4] = hw_addr[5];
3655 		data->hw_addr[5] = hw_addr[4];
3656 	} else
3657 		iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
3658 
3659 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3660 		if (sc->nvm_type == IWM_NVM_SDP) {
3661 			iwm_init_channel_map(sc, regulatory, iwm_nvm_channels,
3662 			    MIN(n_regulatory, nitems(iwm_nvm_channels)));
3663 		} else {
3664 			iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
3665 			    iwm_nvm_channels, nitems(iwm_nvm_channels));
3666 		}
3667 	} else
3668 		iwm_init_channel_map(sc, &regulatory[IWM_NVM_CHANNELS_8000],
3669 		    iwm_nvm_channels_8000,
3670 		    MIN(n_regulatory, nitems(iwm_nvm_channels_8000)));
3671 
3672 	data->calib_version = 255;   /* TODO:
3673 					this value will prevent some checks from
3674 					failing, we need to check if this
3675 					field is still needed, and if it does,
3676 					where is it in the NVM */
3677 
3678 	return 0;
3679 }
3680 
3681 int
3682 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
3683 {
3684 	const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
3685 	const uint16_t *regulatory = NULL;
3686 	int n_regulatory = 0;
3687 
3688 	/* Checking for required sections */
3689 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3690 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3691 		    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
3692 			return ENOENT;
3693 		}
3694 
3695 		hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
3696 
3697 		if (sc->nvm_type == IWM_NVM_SDP) {
3698 			if (!sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data)
3699 				return ENOENT;
3700 			regulatory = (const uint16_t *)
3701 			    sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data;
3702 			n_regulatory =
3703 			    sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].length;
3704 		}
3705 	} else if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
3706 		/* SW and REGULATORY sections are mandatory */
3707 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3708 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
3709 			return ENOENT;
3710 		}
3711 		/* MAC_OVERRIDE or at least HW section must exist */
3712 		if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
3713 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
3714 			return ENOENT;
3715 		}
3716 
3717 		/* PHY_SKU section is mandatory in B0 */
3718 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
3719 			return ENOENT;
3720 		}
3721 
3722 		regulatory = (const uint16_t *)
3723 		    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
3724 		n_regulatory = sections[IWM_NVM_SECTION_TYPE_REGULATORY].length;
3725 		hw = (const uint16_t *)
3726 		    sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
3727 		mac_override =
3728 			(const uint16_t *)
3729 			sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
3730 		phy_sku = (const uint16_t *)
3731 		    sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
3732 	} else {
3733 		panic("unknown device family %d", sc->sc_device_family);
3734 	}
3735 
3736 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
3737 	calib = (const uint16_t *)
3738 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
3739 
3740 	/* XXX should pass in the length of every section */
3741 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
3742 	    phy_sku, regulatory, n_regulatory);
3743 }
3744 
3745 int
3746 iwm_nvm_init(struct iwm_softc *sc)
3747 {
3748 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
3749 	int i, section, err;
3750 	uint16_t len;
3751 	uint8_t *buf;
3752 	const size_t bufsz = sc->sc_nvm_max_section_size;
3753 
3754 	memset(nvm_sections, 0, sizeof(nvm_sections));
3755 
3756 	buf = malloc(bufsz, M_DEVBUF, M_WAIT);
3757 	if (buf == NULL)
3758 		return ENOMEM;
3759 
3760 	for (i = 0; i < nitems(iwm_nvm_to_read); i++) {
3761 		section = iwm_nvm_to_read[i];
3762 		KASSERT(section <= nitems(nvm_sections));
3763 
3764 		err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
3765 		if (err) {
3766 			err = 0;
3767 			continue;
3768 		}
3769 		nvm_sections[section].data = malloc(len, M_DEVBUF, M_WAIT);
3770 		if (nvm_sections[section].data == NULL) {
3771 			err = ENOMEM;
3772 			break;
3773 		}
3774 		memcpy(nvm_sections[section].data, buf, len);
3775 		nvm_sections[section].length = len;
3776 	}
3777 	free(buf, M_DEVBUF, bufsz);
3778 	if (err == 0)
3779 		err = iwm_parse_nvm_sections(sc, nvm_sections);
3780 
3781 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
3782 		if (nvm_sections[i].data != NULL)
3783 			free(nvm_sections[i].data, M_DEVBUF,
3784 			    nvm_sections[i].length);
3785 	}
3786 
3787 	return err;
3788 }
3789 
3790 int
3791 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
3792     const uint8_t *section, uint32_t byte_cnt)
3793 {
3794 	int err = EINVAL;
3795 	uint32_t chunk_sz, offset;
3796 
3797 	chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
3798 
3799 	for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
3800 		uint32_t addr, len;
3801 		const uint8_t *data;
3802 
3803 		addr = dst_addr + offset;
3804 		len = MIN(chunk_sz, byte_cnt - offset);
3805 		data = section + offset;
3806 
3807 		err = iwm_firmware_load_chunk(sc, addr, data, len);
3808 		if (err)
3809 			break;
3810 	}
3811 
3812 	return err;
3813 }
3814 
3815 int
3816 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
3817     const uint8_t *chunk, uint32_t byte_cnt)
3818 {
3819 	struct iwm_dma_info *dma = &sc->fw_dma;
3820 	int err;
3821 
3822 	/* Copy firmware chunk into pre-allocated DMA-safe memory. */
3823 	memcpy(dma->vaddr, chunk, byte_cnt);
3824 	bus_dmamap_sync(sc->sc_dmat,
3825 	    dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE);
3826 
3827 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
3828 	    dst_addr <= IWM_FW_MEM_EXTENDED_END)
3829 		iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
3830 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3831 
3832 	sc->sc_fw_chunk_done = 0;
3833 
3834 	if (!iwm_nic_lock(sc))
3835 		return EBUSY;
3836 
3837 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3838 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
3839 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
3840 	    dst_addr);
3841 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
3842 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
3843 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
3844 	    (iwm_get_dma_hi_addr(dma->paddr)
3845 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
3846 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
3847 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
3848 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
3849 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
3850 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3851 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
3852 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
3853 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
3854 
3855 	iwm_nic_unlock(sc);
3856 
3857 	/* Wait for this segment to load. */
3858 	err = 0;
3859 	while (!sc->sc_fw_chunk_done) {
3860 		err = tsleep_nsec(&sc->sc_fw, 0, "iwmfw", SEC_TO_NSEC(1));
3861 		if (err)
3862 			break;
3863 	}
3864 
3865 	if (!sc->sc_fw_chunk_done)
3866 		printf("%s: fw chunk addr 0x%x len %d failed to load\n",
3867 		    DEVNAME(sc), dst_addr, byte_cnt);
3868 
3869 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
3870 	    dst_addr <= IWM_FW_MEM_EXTENDED_END) {
3871 		iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
3872 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3873 	}
3874 
3875 	return err;
3876 }
3877 
3878 int
3879 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3880 {
3881 	struct iwm_fw_sects *fws;
3882 	int err, i;
3883 	void *data;
3884 	uint32_t dlen;
3885 	uint32_t offset;
3886 
3887 	fws = &sc->sc_fw.fw_sects[ucode_type];
3888 	for (i = 0; i < fws->fw_count; i++) {
3889 		data = fws->fw_sect[i].fws_data;
3890 		dlen = fws->fw_sect[i].fws_len;
3891 		offset = fws->fw_sect[i].fws_devoff;
3892 		if (dlen > sc->sc_fwdmasegsz) {
3893 			err = EFBIG;
3894 		} else
3895 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
3896 		if (err) {
3897 			printf("%s: could not load firmware chunk %u of %u\n",
3898 			    DEVNAME(sc), i, fws->fw_count);
3899 			return err;
3900 		}
3901 	}
3902 
3903 	iwm_enable_interrupts(sc);
3904 
3905 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
3906 
3907 	return 0;
3908 }
3909 
3910 int
3911 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
3912     int cpu, int *first_ucode_section)
3913 {
3914 	int shift_param;
3915 	int i, err = 0, sec_num = 0x1;
3916 	uint32_t val, last_read_idx = 0;
3917 	void *data;
3918 	uint32_t dlen;
3919 	uint32_t offset;
3920 
3921 	if (cpu == 1) {
3922 		shift_param = 0;
3923 		*first_ucode_section = 0;
3924 	} else {
3925 		shift_param = 16;
3926 		(*first_ucode_section)++;
3927 	}
3928 
3929 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
3930 		last_read_idx = i;
3931 		data = fws->fw_sect[i].fws_data;
3932 		dlen = fws->fw_sect[i].fws_len;
3933 		offset = fws->fw_sect[i].fws_devoff;
3934 
3935 		/*
3936 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
3937 		 * CPU1 to CPU2.
3938 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
3939 		 * CPU2 non paged to CPU2 paging sec.
3940 		 */
3941 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
3942 		    offset == IWM_PAGING_SEPARATOR_SECTION)
3943 			break;
3944 
3945 		if (dlen > sc->sc_fwdmasegsz) {
3946 			err = EFBIG;
3947 		} else
3948 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
3949 		if (err) {
3950 			printf("%s: could not load firmware chunk %d "
3951 			    "(error %d)\n", DEVNAME(sc), i, err);
3952 			return err;
3953 		}
3954 
3955 		/* Notify the ucode of the loaded section number and status */
3956 		if (iwm_nic_lock(sc)) {
3957 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
3958 			val = val | (sec_num << shift_param);
3959 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
3960 			sec_num = (sec_num << 1) | 0x1;
3961 			iwm_nic_unlock(sc);
3962 		} else {
3963 			err = EBUSY;
3964 			printf("%s: could not load firmware chunk %d "
3965 			    "(error %d)\n", DEVNAME(sc), i, err);
3966 			return err;
3967 		}
3968 	}
3969 
3970 	*first_ucode_section = last_read_idx;
3971 
3972 	if (iwm_nic_lock(sc)) {
3973 		if (cpu == 1)
3974 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
3975 		else
3976 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
3977 		iwm_nic_unlock(sc);
3978 	} else {
3979 		err = EBUSY;
3980 		printf("%s: could not finalize firmware loading (error %d)\n",
3981 		    DEVNAME(sc), err);
3982 		return err;
3983 	}
3984 
3985 	return 0;
3986 }
3987 
3988 int
3989 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3990 {
3991 	struct iwm_fw_sects *fws;
3992 	int err = 0;
3993 	int first_ucode_section;
3994 
3995 	fws = &sc->sc_fw.fw_sects[ucode_type];
3996 
3997 	/* configure the ucode to be ready to get the secured image */
3998 	/* release CPU reset */
3999 	if (iwm_nic_lock(sc)) {
4000 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
4001 		    IWM_RELEASE_CPU_RESET_BIT);
4002 		iwm_nic_unlock(sc);
4003 	}
4004 
4005 	/* load to FW the binary Secured sections of CPU1 */
4006 	err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
4007 	if (err)
4008 		return err;
4009 
4010 	/* load to FW the binary sections of CPU2 */
4011 	err = iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
4012 	if (err)
4013 		return err;
4014 
4015 	iwm_enable_interrupts(sc);
4016 	return 0;
4017 }
4018 
4019 int
4020 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4021 {
4022 	int err, w;
4023 
4024 	sc->sc_uc.uc_intr = 0;
4025 
4026 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000)
4027 		err = iwm_load_firmware_8000(sc, ucode_type);
4028 	else
4029 		err = iwm_load_firmware_7000(sc, ucode_type);
4030 
4031 	if (err)
4032 		return err;
4033 
4034 	/* wait for the firmware to load */
4035 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
4036 		err = tsleep_nsec(&sc->sc_uc, 0, "iwmuc", MSEC_TO_NSEC(100));
4037 	}
4038 	if (err || !sc->sc_uc.uc_ok)
4039 		printf("%s: could not load firmware\n", DEVNAME(sc));
4040 
4041 	return err;
4042 }
4043 
4044 int
4045 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4046 {
4047 	int err;
4048 
4049 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
4050 
4051 	err = iwm_nic_init(sc);
4052 	if (err) {
4053 		printf("%s: unable to init nic\n", DEVNAME(sc));
4054 		return err;
4055 	}
4056 
4057 	/* make sure rfkill handshake bits are cleared */
4058 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
4059 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
4060 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4061 
4062 	/* clear (again), then enable firwmare load interrupt */
4063 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
4064 	iwm_enable_fwload_interrupt(sc);
4065 
4066 	/* really make sure rfkill handshake bits are cleared */
4067 	/* maybe we should write a few times more?  just to make sure */
4068 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
4069 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
4070 
4071 	return iwm_load_firmware(sc, ucode_type);
4072 }
4073 
4074 int
4075 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
4076 {
4077 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
4078 		.valid = htole32(valid_tx_ant),
4079 	};
4080 
4081 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
4082 	    0, sizeof(tx_ant_cmd), &tx_ant_cmd);
4083 }
4084 
4085 int
4086 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
4087 {
4088 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
4089 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
4090 
4091 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
4092 	phy_cfg_cmd.calib_control.event_trigger =
4093 	    sc->sc_default_calib[ucode_type].event_trigger;
4094 	phy_cfg_cmd.calib_control.flow_trigger =
4095 	    sc->sc_default_calib[ucode_type].flow_trigger;
4096 
4097 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
4098 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
4099 }
4100 
4101 int
4102 iwm_send_dqa_cmd(struct iwm_softc *sc)
4103 {
4104 	struct iwm_dqa_enable_cmd dqa_cmd = {
4105 		.cmd_queue = htole32(IWM_DQA_CMD_QUEUE),
4106 	};
4107 	uint32_t cmd_id;
4108 
4109 	cmd_id = iwm_cmd_id(IWM_DQA_ENABLE_CMD, IWM_DATA_PATH_GROUP, 0);
4110 	return iwm_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
4111 }
4112 
4113 int
4114 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
4115 	enum iwm_ucode_type ucode_type)
4116 {
4117 	enum iwm_ucode_type old_type = sc->sc_uc_current;
4118 	struct iwm_fw_sects *fw = &sc->sc_fw.fw_sects[ucode_type];
4119 	int err;
4120 
4121 	err = iwm_read_firmware(sc, ucode_type);
4122 	if (err)
4123 		return err;
4124 
4125 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
4126 		sc->cmdqid = IWM_DQA_CMD_QUEUE;
4127 	else
4128 		sc->cmdqid = IWM_CMD_QUEUE;
4129 
4130 	sc->sc_uc_current = ucode_type;
4131 	err = iwm_start_fw(sc, ucode_type);
4132 	if (err) {
4133 		sc->sc_uc_current = old_type;
4134 		return err;
4135 	}
4136 
4137 	err = iwm_post_alive(sc);
4138 	if (err)
4139 		return err;
4140 
4141 	/*
4142 	 * configure and operate fw paging mechanism.
4143 	 * driver configures the paging flow only once, CPU2 paging image
4144 	 * included in the IWM_UCODE_INIT image.
4145 	 */
4146 	if (fw->paging_mem_size) {
4147 		err = iwm_save_fw_paging(sc, fw);
4148 		if (err) {
4149 			printf("%s: failed to save the FW paging image\n",
4150 			    DEVNAME(sc));
4151 			return err;
4152 		}
4153 
4154 		err = iwm_send_paging_cmd(sc, fw);
4155 		if (err) {
4156 			printf("%s: failed to send the paging cmd\n",
4157 			    DEVNAME(sc));
4158 			iwm_free_fw_paging(sc);
4159 			return err;
4160 		}
4161 	}
4162 
4163 	return 0;
4164 }
4165 
4166 int
4167 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
4168 {
4169 	const int wait_flags = (IWM_INIT_COMPLETE | IWM_CALIB_COMPLETE);
4170 	int err;
4171 
4172 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
4173 		printf("%s: radio is disabled by hardware switch\n",
4174 		    DEVNAME(sc));
4175 		return EPERM;
4176 	}
4177 
4178 	sc->sc_init_complete = 0;
4179 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
4180 	if (err) {
4181 		printf("%s: failed to load init firmware\n", DEVNAME(sc));
4182 		return err;
4183 	}
4184 
4185 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000) {
4186 		err = iwm_send_bt_init_conf(sc);
4187 		if (err) {
4188 			printf("%s: could not init bt coex (error %d)\n",
4189 			    DEVNAME(sc), err);
4190 			return err;
4191 		}
4192 	}
4193 
4194 	if (justnvm) {
4195 		err = iwm_nvm_init(sc);
4196 		if (err) {
4197 			printf("%s: failed to read nvm\n", DEVNAME(sc));
4198 			return err;
4199 		}
4200 
4201 		if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
4202 			IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
4203 			    sc->sc_nvm.hw_addr);
4204 
4205 		return 0;
4206 	}
4207 
4208 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
4209 	if (err)
4210 		return err;
4211 
4212 	/* Send TX valid antennas before triggering calibrations */
4213 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
4214 	if (err)
4215 		return err;
4216 
4217 	/*
4218 	 * Send phy configurations command to init uCode
4219 	 * to start the 16.0 uCode init image internal calibrations.
4220 	 */
4221 	err = iwm_send_phy_cfg_cmd(sc);
4222 	if (err)
4223 		return err;
4224 
4225 	/*
4226 	 * Nothing to do but wait for the init complete and phy DB
4227 	 * notifications from the firmware.
4228 	 */
4229 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4230 		err = tsleep_nsec(&sc->sc_init_complete, 0, "iwminit",
4231 		    SEC_TO_NSEC(2));
4232 		if (err)
4233 			break;
4234 	}
4235 
4236 	return err;
4237 }
4238 
4239 int
4240 iwm_config_ltr(struct iwm_softc *sc)
4241 {
4242 	struct iwm_ltr_config_cmd cmd = {
4243 		.flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
4244 	};
4245 
4246 	if (!sc->sc_ltr_enabled)
4247 		return 0;
4248 
4249 	return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
4250 }
4251 
4252 int
4253 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
4254 {
4255 	struct iwm_rx_ring *ring = &sc->rxq;
4256 	struct iwm_rx_data *data = &ring->data[idx];
4257 	struct mbuf *m;
4258 	int err;
4259 	int fatal = 0;
4260 
4261 	m = m_gethdr(M_DONTWAIT, MT_DATA);
4262 	if (m == NULL)
4263 		return ENOBUFS;
4264 
4265 	if (size <= MCLBYTES) {
4266 		MCLGET(m, M_DONTWAIT);
4267 	} else {
4268 		MCLGETL(m, M_DONTWAIT, IWM_RBUF_SIZE);
4269 	}
4270 	if ((m->m_flags & M_EXT) == 0) {
4271 		m_freem(m);
4272 		return ENOBUFS;
4273 	}
4274 
4275 	if (data->m != NULL) {
4276 		bus_dmamap_unload(sc->sc_dmat, data->map);
4277 		fatal = 1;
4278 	}
4279 
4280 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4281 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4282 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
4283 	if (err) {
4284 		/* XXX */
4285 		if (fatal)
4286 			panic("iwm: could not load RX mbuf");
4287 		m_freem(m);
4288 		return err;
4289 	}
4290 	data->m = m;
4291 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
4292 
4293 	/* Update RX descriptor. */
4294 	if (sc->sc_mqrx_supported) {
4295 		((uint64_t *)ring->desc)[idx] =
4296 		    htole64(data->map->dm_segs[0].ds_addr);
4297 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
4298 		    idx * sizeof(uint64_t), sizeof(uint64_t),
4299 		    BUS_DMASYNC_PREWRITE);
4300 	} else {
4301 		((uint32_t *)ring->desc)[idx] =
4302 		    htole32(data->map->dm_segs[0].ds_addr >> 8);
4303 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
4304 		    idx * sizeof(uint32_t), sizeof(uint32_t),
4305 		    BUS_DMASYNC_PREWRITE);
4306 	}
4307 
4308 	return 0;
4309 }
4310 
4311 /*
4312  * RSSI values are reported by the FW as positive values - need to negate
4313  * to obtain their dBM.  Account for missing antennas by replacing 0
4314  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
4315  */
4316 int
4317 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
4318 {
4319 	int energy_a, energy_b, energy_c, max_energy;
4320 	uint32_t val;
4321 
4322 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
4323 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
4324 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
4325 	energy_a = energy_a ? -energy_a : -256;
4326 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
4327 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
4328 	energy_b = energy_b ? -energy_b : -256;
4329 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
4330 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
4331 	energy_c = energy_c ? -energy_c : -256;
4332 	max_energy = MAX(energy_a, energy_b);
4333 	max_energy = MAX(max_energy, energy_c);
4334 
4335 	return max_energy;
4336 }
4337 
4338 int
4339 iwm_rxmq_get_signal_strength(struct iwm_softc *sc,
4340     struct iwm_rx_mpdu_desc *desc)
4341 {
4342 	int energy_a, energy_b;
4343 
4344 	energy_a = desc->v1.energy_a;
4345 	energy_b = desc->v1.energy_b;
4346 	energy_a = energy_a ? -energy_a : -256;
4347 	energy_b = energy_b ? -energy_b : -256;
4348 	return MAX(energy_a, energy_b);
4349 }
4350 
4351 void
4352 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4353     struct iwm_rx_data *data)
4354 {
4355 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
4356 
4357 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
4358 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
4359 
4360 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
4361 }
4362 
4363 /*
4364  * Retrieve the average noise (in dBm) among receivers.
4365  */
4366 int
4367 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
4368 {
4369 	int i, total, nbant, noise;
4370 
4371 	total = nbant = noise = 0;
4372 	for (i = 0; i < 3; i++) {
4373 		noise = letoh32(stats->beacon_silence_rssi[i]) & 0xff;
4374 		if (noise) {
4375 			total += noise;
4376 			nbant++;
4377 		}
4378 	}
4379 
4380 	/* There should be at least one antenna but check anyway. */
4381 	return (nbant == 0) ? -127 : (total / nbant) - 107;
4382 }
4383 
4384 int
4385 iwm_ccmp_decap(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
4386     struct ieee80211_rxinfo *rxi)
4387 {
4388 	struct ieee80211com *ic = &sc->sc_ic;
4389 	struct ieee80211_key *k = &ni->ni_pairwise_key;
4390 	struct ieee80211_frame *wh;
4391 	uint64_t pn, *prsc;
4392 	uint8_t *ivp;
4393 	uint8_t tid;
4394 	int hdrlen, hasqos;
4395 
4396 	wh = mtod(m, struct ieee80211_frame *);
4397 	hdrlen = ieee80211_get_hdrlen(wh);
4398 	ivp = (uint8_t *)wh + hdrlen;
4399 
4400 	/* Check that ExtIV bit is set. */
4401 	if (!(ivp[3] & IEEE80211_WEP_EXTIV))
4402 		return 1;
4403 
4404 	hasqos = ieee80211_has_qos(wh);
4405 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4406 	prsc = &k->k_rsc[tid];
4407 
4408 	/* Extract the 48-bit PN from the CCMP header. */
4409 	pn = (uint64_t)ivp[0]       |
4410 	     (uint64_t)ivp[1] <<  8 |
4411 	     (uint64_t)ivp[4] << 16 |
4412 	     (uint64_t)ivp[5] << 24 |
4413 	     (uint64_t)ivp[6] << 32 |
4414 	     (uint64_t)ivp[7] << 40;
4415 	if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
4416 		if (pn < *prsc) {
4417 			ic->ic_stats.is_ccmp_replays++;
4418 			return 1;
4419 		}
4420 	} else if (pn <= *prsc) {
4421 		ic->ic_stats.is_ccmp_replays++;
4422 		return 1;
4423 	}
4424 	/* Last seen packet number is updated in ieee80211_inputm(). */
4425 
4426 	/*
4427 	 * Some firmware versions strip the MIC, and some don't. It is not
4428 	 * clear which of the capability flags could tell us what to expect.
4429 	 * For now, keep things simple and just leave the MIC in place if
4430 	 * it is present.
4431 	 *
4432 	 * The IV will be stripped by ieee80211_inputm().
4433 	 */
4434 	return 0;
4435 }
4436 
4437 int
4438 iwm_rx_hwdecrypt(struct iwm_softc *sc, struct mbuf *m, uint32_t rx_pkt_status,
4439     struct ieee80211_rxinfo *rxi)
4440 {
4441 	struct ieee80211com *ic = &sc->sc_ic;
4442 	struct ifnet *ifp = IC2IFP(ic);
4443 	struct ieee80211_frame *wh;
4444 	struct ieee80211_node *ni;
4445 	int ret = 0;
4446 	uint8_t type, subtype;
4447 
4448 	wh = mtod(m, struct ieee80211_frame *);
4449 
4450 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4451 	if (type == IEEE80211_FC0_TYPE_CTL)
4452 		return 0;
4453 
4454 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4455 	if (ieee80211_has_qos(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA))
4456 		return 0;
4457 
4458 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4459 	    !(wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
4460 		return 0;
4461 
4462 	ni = ieee80211_find_rxnode(ic, wh);
4463 	/* Handle hardware decryption. */
4464 	if ((ni->ni_flags & IEEE80211_NODE_RXPROT) &&
4465 	    ni->ni_pairwise_key.k_cipher == IEEE80211_CIPHER_CCMP) {
4466 		if ((rx_pkt_status & IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
4467 		    IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4468 			ic->ic_stats.is_ccmp_dec_errs++;
4469 			ret = 1;
4470 			goto out;
4471 		}
4472 		/* Check whether decryption was successful or not. */
4473 		if ((rx_pkt_status &
4474 		    (IWM_RX_MPDU_RES_STATUS_DEC_DONE |
4475 		    IWM_RX_MPDU_RES_STATUS_MIC_OK)) !=
4476 		    (IWM_RX_MPDU_RES_STATUS_DEC_DONE |
4477 		    IWM_RX_MPDU_RES_STATUS_MIC_OK)) {
4478 			ic->ic_stats.is_ccmp_dec_errs++;
4479 			ret = 1;
4480 			goto out;
4481 		}
4482 		rxi->rxi_flags |= IEEE80211_RXI_HWDEC;
4483 	}
4484 out:
4485 	if (ret)
4486 		ifp->if_ierrors++;
4487 	ieee80211_release_node(ic, ni);
4488 	return ret;
4489 }
4490 
4491 void
4492 iwm_rx_frame(struct iwm_softc *sc, struct mbuf *m, int chanidx,
4493     uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
4494     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
4495     struct mbuf_list *ml)
4496 {
4497 	struct ieee80211com *ic = &sc->sc_ic;
4498 	struct ifnet *ifp = IC2IFP(ic);
4499 	struct ieee80211_frame *wh;
4500 	struct ieee80211_node *ni;
4501 	struct ieee80211_channel *bss_chan;
4502 	uint8_t saved_bssid[IEEE80211_ADDR_LEN] = { 0 };
4503 
4504 	if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
4505 		chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
4506 
4507 	wh = mtod(m, struct ieee80211_frame *);
4508 	ni = ieee80211_find_rxnode(ic, wh);
4509 	if (ni == ic->ic_bss) {
4510 		/*
4511 		 * We may switch ic_bss's channel during scans.
4512 		 * Record the current channel so we can restore it later.
4513 		 */
4514 		bss_chan = ni->ni_chan;
4515 		IEEE80211_ADDR_COPY(&saved_bssid, ni->ni_macaddr);
4516 	}
4517 	ni->ni_chan = &ic->ic_channels[chanidx];
4518 
4519 	if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
4520 	    iwm_ccmp_decap(sc, m, ni, rxi) != 0) {
4521 		ifp->if_ierrors++;
4522 		m_freem(m);
4523 		ieee80211_release_node(ic, ni);
4524 		return;
4525 	}
4526 
4527 #if NBPFILTER > 0
4528 	if (sc->sc_drvbpf != NULL) {
4529 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
4530 		uint16_t chan_flags;
4531 
4532 		tap->wr_flags = 0;
4533 		if (is_shortpre)
4534 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
4535 		tap->wr_chan_freq =
4536 		    htole16(ic->ic_channels[chanidx].ic_freq);
4537 		chan_flags = ic->ic_channels[chanidx].ic_flags;
4538 		if (ic->ic_curmode != IEEE80211_MODE_11N)
4539 			chan_flags &= ~IEEE80211_CHAN_HT;
4540 		tap->wr_chan_flags = htole16(chan_flags);
4541 		tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
4542 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4543 		tap->wr_tsft = device_timestamp;
4544 		if (rate_n_flags & IWM_RATE_MCS_HT_MSK) {
4545 			uint8_t mcs = (rate_n_flags &
4546 			    (IWM_RATE_HT_MCS_RATE_CODE_MSK |
4547 			    IWM_RATE_HT_MCS_NSS_MSK));
4548 			tap->wr_rate = (0x80 | mcs);
4549 		} else {
4550 			uint8_t rate = (rate_n_flags &
4551 			    IWM_RATE_LEGACY_RATE_MSK);
4552 			switch (rate) {
4553 			/* CCK rates. */
4554 			case  10: tap->wr_rate =   2; break;
4555 			case  20: tap->wr_rate =   4; break;
4556 			case  55: tap->wr_rate =  11; break;
4557 			case 110: tap->wr_rate =  22; break;
4558 			/* OFDM rates. */
4559 			case 0xd: tap->wr_rate =  12; break;
4560 			case 0xf: tap->wr_rate =  18; break;
4561 			case 0x5: tap->wr_rate =  24; break;
4562 			case 0x7: tap->wr_rate =  36; break;
4563 			case 0x9: tap->wr_rate =  48; break;
4564 			case 0xb: tap->wr_rate =  72; break;
4565 			case 0x1: tap->wr_rate =  96; break;
4566 			case 0x3: tap->wr_rate = 108; break;
4567 			/* Unknown rate: should not happen. */
4568 			default:  tap->wr_rate =   0;
4569 			}
4570 		}
4571 
4572 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
4573 		    m, BPF_DIRECTION_IN);
4574 	}
4575 #endif
4576 	ieee80211_inputm(IC2IFP(ic), m, ni, rxi, ml);
4577 	/*
4578 	 * ieee80211_inputm() might have changed our BSS.
4579 	 * Restore ic_bss's channel if we are still in the same BSS.
4580 	 */
4581 	if (ni == ic->ic_bss && IEEE80211_ADDR_EQ(saved_bssid, ni->ni_macaddr))
4582 		ni->ni_chan = bss_chan;
4583 	ieee80211_release_node(ic, ni);
4584 }
4585 
4586 void
4587 iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
4588     size_t maxlen, struct mbuf_list *ml)
4589 {
4590 	struct ieee80211com *ic = &sc->sc_ic;
4591 	struct ieee80211_rxinfo rxi;
4592 	struct iwm_rx_phy_info *phy_info;
4593 	struct iwm_rx_mpdu_res_start *rx_res;
4594 	int device_timestamp;
4595 	uint16_t phy_flags;
4596 	uint32_t len;
4597 	uint32_t rx_pkt_status;
4598 	int rssi, chanidx, rate_n_flags;
4599 
4600 	memset(&rxi, 0, sizeof(rxi));
4601 
4602 	phy_info = &sc->sc_last_phy_info;
4603 	rx_res = (struct iwm_rx_mpdu_res_start *)pktdata;
4604 	len = le16toh(rx_res->byte_count);
4605 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4606 		/* Allow control frames in monitor mode. */
4607 		if (len < sizeof(struct ieee80211_frame_cts)) {
4608 			ic->ic_stats.is_rx_tooshort++;
4609 			IC2IFP(ic)->if_ierrors++;
4610 			m_freem(m);
4611 			return;
4612 		}
4613 	} else if (len < sizeof(struct ieee80211_frame)) {
4614 		ic->ic_stats.is_rx_tooshort++;
4615 		IC2IFP(ic)->if_ierrors++;
4616 		m_freem(m);
4617 		return;
4618 	}
4619 	if (len > maxlen - sizeof(*rx_res)) {
4620 		IC2IFP(ic)->if_ierrors++;
4621 		m_freem(m);
4622 		return;
4623 	}
4624 
4625 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
4626 		m_freem(m);
4627 		return;
4628 	}
4629 
4630 	rx_pkt_status = le32toh(*(uint32_t *)(pktdata + sizeof(*rx_res) + len));
4631 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
4632 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
4633 		m_freem(m);
4634 		return; /* drop */
4635 	}
4636 
4637 	m->m_data = pktdata + sizeof(*rx_res);
4638 	m->m_pkthdr.len = m->m_len = len;
4639 
4640 	if (iwm_rx_hwdecrypt(sc, m, rx_pkt_status, &rxi)) {
4641 		m_freem(m);
4642 		return;
4643 	}
4644 
4645 	chanidx = letoh32(phy_info->channel);
4646 	device_timestamp = le32toh(phy_info->system_timestamp);
4647 	phy_flags = letoh16(phy_info->phy_flags);
4648 	rate_n_flags = le32toh(phy_info->rate_n_flags);
4649 
4650 	rssi = iwm_get_signal_strength(sc, phy_info);
4651 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
4652 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
4653 
4654 	rxi.rxi_rssi = rssi;
4655 	rxi.rxi_tstamp = device_timestamp;
4656 
4657 	iwm_rx_frame(sc, m, chanidx, rx_pkt_status,
4658 	    (phy_flags & IWM_PHY_INFO_FLAG_SHPREAMBLE),
4659 	    rate_n_flags, device_timestamp, &rxi, ml);
4660 }
4661 
4662 void
4663 iwm_flip_address(uint8_t *addr)
4664 {
4665 	int i;
4666 	uint8_t mac_addr[ETHER_ADDR_LEN];
4667 
4668 	for (i = 0; i < ETHER_ADDR_LEN; i++)
4669 		mac_addr[i] = addr[ETHER_ADDR_LEN - i - 1];
4670 	IEEE80211_ADDR_COPY(addr, mac_addr);
4671 }
4672 
4673 /*
4674  * Drop duplicate 802.11 retransmissions
4675  * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
4676  * and handle pseudo-duplicate frames which result from deaggregation
4677  * of A-MSDU frames in hardware.
4678  */
4679 int
4680 iwm_detect_duplicate(struct iwm_softc *sc, struct mbuf *m,
4681     struct iwm_rx_mpdu_desc *desc, struct ieee80211_rxinfo *rxi)
4682 {
4683 	struct ieee80211com *ic = &sc->sc_ic;
4684 	struct iwm_node *in = (void *)ic->ic_bss;
4685 	struct iwm_rxq_dup_data *dup_data = &in->dup_data;
4686 	uint8_t tid = IWM_MAX_TID_COUNT, subframe_idx;
4687 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4688 	uint8_t type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4689 	uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4690 	int hasqos = ieee80211_has_qos(wh);
4691 	uint16_t seq;
4692 
4693 	if (type == IEEE80211_FC0_TYPE_CTL ||
4694 	    (hasqos && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) ||
4695 	    IEEE80211_IS_MULTICAST(wh->i_addr1))
4696 		return 0;
4697 
4698 	if (hasqos) {
4699 		tid = (ieee80211_get_qos(wh) & IEEE80211_QOS_TID);
4700 		if (tid > IWM_MAX_TID_COUNT)
4701 			tid = IWM_MAX_TID_COUNT;
4702 	}
4703 
4704 	/* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
4705 	subframe_idx = desc->amsdu_info &
4706 		IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
4707 
4708 	seq = letoh16(*(u_int16_t *)wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
4709 	if ((wh->i_fc[1] & IEEE80211_FC1_RETRY) &&
4710 	    dup_data->last_seq[tid] == seq &&
4711 	    dup_data->last_sub_frame[tid] >= subframe_idx)
4712 		return 1;
4713 
4714 	/*
4715 	 * Allow the same frame sequence number for all A-MSDU subframes
4716 	 * following the first subframe.
4717 	 * Otherwise these subframes would be discarded as replays.
4718 	 */
4719 	if (dup_data->last_seq[tid] == seq &&
4720 	    subframe_idx > dup_data->last_sub_frame[tid] &&
4721 	    (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU)) {
4722 		rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
4723 	}
4724 
4725 	dup_data->last_seq[tid] = seq;
4726 	dup_data->last_sub_frame[tid] = subframe_idx;
4727 
4728 	return 0;
4729 }
4730 
4731 /*
4732  * Returns true if sn2 - buffer_size < sn1 < sn2.
4733  * To be used only in order to compare reorder buffer head with NSSN.
4734  * We fully trust NSSN unless it is behind us due to reorder timeout.
4735  * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
4736  */
4737 int
4738 iwm_is_sn_less(uint16_t sn1, uint16_t sn2, uint16_t buffer_size)
4739 {
4740 	return SEQ_LT(sn1, sn2) && !SEQ_LT(sn1, sn2 - buffer_size);
4741 }
4742 
4743 void
4744 iwm_release_frames(struct iwm_softc *sc, struct ieee80211_node *ni,
4745     struct iwm_rxba_data *rxba, struct iwm_reorder_buffer *reorder_buf,
4746     uint16_t nssn, struct mbuf_list *ml)
4747 {
4748 	struct iwm_reorder_buf_entry *entries = &rxba->entries[0];
4749 	uint16_t ssn = reorder_buf->head_sn;
4750 
4751 	/* ignore nssn smaller than head sn - this can happen due to timeout */
4752 	if (iwm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
4753 		goto set_timer;
4754 
4755 	while (iwm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
4756 		int index = ssn % reorder_buf->buf_size;
4757 		struct mbuf *m;
4758 		int chanidx, is_shortpre;
4759 		uint32_t rx_pkt_status, rate_n_flags, device_timestamp;
4760 		struct ieee80211_rxinfo *rxi;
4761 
4762 		/* This data is the same for all A-MSDU subframes. */
4763 		chanidx = entries[index].chanidx;
4764 		rx_pkt_status = entries[index].rx_pkt_status;
4765 		is_shortpre = entries[index].is_shortpre;
4766 		rate_n_flags = entries[index].rate_n_flags;
4767 		device_timestamp = entries[index].device_timestamp;
4768 		rxi = &entries[index].rxi;
4769 
4770 		/*
4771 		 * Empty the list. Will have more than one frame for A-MSDU.
4772 		 * Empty list is valid as well since nssn indicates frames were
4773 		 * received.
4774 		 */
4775 		while ((m = ml_dequeue(&entries[index].frames)) != NULL) {
4776 			iwm_rx_frame(sc, m, chanidx, rx_pkt_status, is_shortpre,
4777 			    rate_n_flags, device_timestamp, rxi, ml);
4778 			reorder_buf->num_stored--;
4779 
4780 			/*
4781 			 * Allow the same frame sequence number and CCMP PN for
4782 			 * all A-MSDU subframes following the first subframe.
4783 			 * Otherwise they would be discarded as replays.
4784 			 */
4785 			rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
4786 			rxi->rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
4787 		}
4788 
4789 		ssn = (ssn + 1) & 0xfff;
4790 	}
4791 	reorder_buf->head_sn = nssn;
4792 
4793 set_timer:
4794 	if (reorder_buf->num_stored && !reorder_buf->removed) {
4795 		timeout_add_usec(&reorder_buf->reorder_timer,
4796 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
4797 	} else
4798 		timeout_del(&reorder_buf->reorder_timer);
4799 }
4800 
4801 int
4802 iwm_oldsn_workaround(struct iwm_softc *sc, struct ieee80211_node *ni, int tid,
4803     struct iwm_reorder_buffer *buffer, uint32_t reorder_data, uint32_t gp2)
4804 {
4805 	struct ieee80211com *ic = &sc->sc_ic;
4806 
4807 	if (gp2 != buffer->consec_oldsn_ampdu_gp2) {
4808 		/* we have a new (A-)MPDU ... */
4809 
4810 		/*
4811 		 * reset counter to 0 if we didn't have any oldsn in
4812 		 * the last A-MPDU (as detected by GP2 being identical)
4813 		 */
4814 		if (!buffer->consec_oldsn_prev_drop)
4815 			buffer->consec_oldsn_drops = 0;
4816 
4817 		/* either way, update our tracking state */
4818 		buffer->consec_oldsn_ampdu_gp2 = gp2;
4819 	} else if (buffer->consec_oldsn_prev_drop) {
4820 		/*
4821 		 * tracking state didn't change, and we had an old SN
4822 		 * indication before - do nothing in this case, we
4823 		 * already noted this one down and are waiting for the
4824 		 * next A-MPDU (by GP2)
4825 		 */
4826 		return 0;
4827 	}
4828 
4829 	/* return unless this MPDU has old SN */
4830 	if (!(reorder_data & IWM_RX_MPDU_REORDER_BA_OLD_SN))
4831 		return 0;
4832 
4833 	/* update state */
4834 	buffer->consec_oldsn_prev_drop = 1;
4835 	buffer->consec_oldsn_drops++;
4836 
4837 	/* if limit is reached, send del BA and reset state */
4838 	if (buffer->consec_oldsn_drops == IWM_AMPDU_CONSEC_DROPS_DELBA) {
4839 		ieee80211_delba_request(ic, ni, IEEE80211_REASON_UNSPECIFIED,
4840 		    0, tid);
4841 		buffer->consec_oldsn_prev_drop = 0;
4842 		buffer->consec_oldsn_drops = 0;
4843 		return 1;
4844 	}
4845 
4846 	return 0;
4847 }
4848 
4849 /*
4850  * Handle re-ordering of frames which were de-aggregated in hardware.
4851  * Returns 1 if the MPDU was consumed (buffered or dropped).
4852  * Returns 0 if the MPDU should be passed to upper layer.
4853  */
4854 int
4855 iwm_rx_reorder(struct iwm_softc *sc, struct mbuf *m, int chanidx,
4856     struct iwm_rx_mpdu_desc *desc, int is_shortpre, int rate_n_flags,
4857     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
4858     struct mbuf_list *ml)
4859 {
4860 	struct ieee80211com *ic = &sc->sc_ic;
4861 	struct ieee80211_frame *wh;
4862 	struct ieee80211_node *ni;
4863 	struct iwm_rxba_data *rxba;
4864 	struct iwm_reorder_buffer *buffer;
4865 	uint32_t reorder_data = le32toh(desc->reorder_data);
4866 	int is_amsdu = (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU);
4867 	int last_subframe =
4868 		(desc->amsdu_info & IWM_RX_MPDU_AMSDU_LAST_SUBFRAME);
4869 	uint8_t tid;
4870 	uint8_t subframe_idx = (desc->amsdu_info &
4871 	    IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
4872 	struct iwm_reorder_buf_entry *entries;
4873 	int index;
4874 	uint16_t nssn, sn;
4875 	uint8_t baid, type, subtype;
4876 	int hasqos;
4877 
4878 	wh = mtod(m, struct ieee80211_frame *);
4879 	hasqos = ieee80211_has_qos(wh);
4880 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4881 
4882 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4883 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4884 	ni = ieee80211_find_rxnode(ic, wh);
4885 
4886 	/*
4887 	 * We are only interested in Block Ack requests and unicast QoS data.
4888 	 */
4889 	if (IEEE80211_IS_MULTICAST(wh->i_addr1))
4890 		return 0;
4891 	if (hasqos) {
4892 		if (subtype & IEEE80211_FC0_SUBTYPE_NODATA)
4893 			return 0;
4894 	} else {
4895 		if (type != IEEE80211_FC0_TYPE_CTL ||
4896 		    subtype != IEEE80211_FC0_SUBTYPE_BAR)
4897 			return 0;
4898 	}
4899 
4900 	baid = (reorder_data & IWM_RX_MPDU_REORDER_BAID_MASK) >>
4901 		IWM_RX_MPDU_REORDER_BAID_SHIFT;
4902 	if (baid == IWM_RX_REORDER_DATA_INVALID_BAID ||
4903 	    baid >= nitems(sc->sc_rxba_data))
4904 		return 0;
4905 
4906 	rxba = &sc->sc_rxba_data[baid];
4907 	if (rxba == NULL || tid != rxba->tid || rxba->sta_id != IWM_STATION_ID)
4908 		return 0;
4909 
4910 	/* Bypass A-MPDU re-ordering in net80211. */
4911 	rxi->rxi_flags |= IEEE80211_RXI_AMPDU_DONE;
4912 
4913 	nssn = reorder_data & IWM_RX_MPDU_REORDER_NSSN_MASK;
4914 	sn = (reorder_data & IWM_RX_MPDU_REORDER_SN_MASK) >>
4915 		IWM_RX_MPDU_REORDER_SN_SHIFT;
4916 
4917 	buffer = &rxba->reorder_buf;
4918 	entries = &rxba->entries[0];
4919 
4920 	if (!buffer->valid) {
4921 		if (reorder_data & IWM_RX_MPDU_REORDER_BA_OLD_SN)
4922 			return 0;
4923 		buffer->valid = 1;
4924 	}
4925 
4926 	if (type == IEEE80211_FC0_TYPE_CTL &&
4927 	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
4928 		iwm_release_frames(sc, ni, rxba, buffer, nssn, ml);
4929 		goto drop;
4930 	}
4931 
4932 	/*
4933 	 * If there was a significant jump in the nssn - adjust.
4934 	 * If the SN is smaller than the NSSN it might need to first go into
4935 	 * the reorder buffer, in which case we just release up to it and the
4936 	 * rest of the function will take care of storing it and releasing up to
4937 	 * the nssn.
4938 	 */
4939 	if (!iwm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
4940 	    buffer->buf_size) ||
4941 	    !SEQ_LT(sn, buffer->head_sn + buffer->buf_size)) {
4942 		uint16_t min_sn = SEQ_LT(sn, nssn) ? sn : nssn;
4943 		ic->ic_stats.is_ht_rx_frame_above_ba_winend++;
4944 		iwm_release_frames(sc, ni, rxba, buffer, min_sn, ml);
4945 	}
4946 
4947 	if (iwm_oldsn_workaround(sc, ni, tid, buffer, reorder_data,
4948 	    device_timestamp)) {
4949 		 /* BA session will be torn down. */
4950 		ic->ic_stats.is_ht_rx_ba_window_jump++;
4951 		goto drop;
4952 
4953 	}
4954 
4955 	/* drop any outdated packets */
4956 	if (SEQ_LT(sn, buffer->head_sn)) {
4957 		ic->ic_stats.is_ht_rx_frame_below_ba_winstart++;
4958 		goto drop;
4959 	}
4960 
4961 	/* release immediately if allowed by nssn and no stored frames */
4962 	if (!buffer->num_stored && SEQ_LT(sn, nssn)) {
4963 		if (iwm_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) &&
4964 		   (!is_amsdu || last_subframe))
4965 			buffer->head_sn = nssn;
4966 		return 0;
4967 	}
4968 
4969 	/*
4970 	 * release immediately if there are no stored frames, and the sn is
4971 	 * equal to the head.
4972 	 * This can happen due to reorder timer, where NSSN is behind head_sn.
4973 	 * When we released everything, and we got the next frame in the
4974 	 * sequence, according to the NSSN we can't release immediately,
4975 	 * while technically there is no hole and we can move forward.
4976 	 */
4977 	if (!buffer->num_stored && sn == buffer->head_sn) {
4978 		if (!is_amsdu || last_subframe)
4979 			buffer->head_sn = (buffer->head_sn + 1) & 0xfff;
4980 		return 0;
4981 	}
4982 
4983 	index = sn % buffer->buf_size;
4984 
4985 	/*
4986 	 * Check if we already stored this frame
4987 	 * As AMSDU is either received or not as whole, logic is simple:
4988 	 * If we have frames in that position in the buffer and the last frame
4989 	 * originated from AMSDU had a different SN then it is a retransmission.
4990 	 * If it is the same SN then if the subframe index is incrementing it
4991 	 * is the same AMSDU - otherwise it is a retransmission.
4992 	 */
4993 	if (!ml_empty(&entries[index].frames)) {
4994 		if (!is_amsdu) {
4995 			ic->ic_stats.is_ht_rx_ba_no_buf++;
4996 			goto drop;
4997 		} else if (sn != buffer->last_amsdu ||
4998 		    buffer->last_sub_index >= subframe_idx) {
4999 			ic->ic_stats.is_ht_rx_ba_no_buf++;
5000 			goto drop;
5001 		}
5002 	} else {
5003 		/* This data is the same for all A-MSDU subframes. */
5004 		entries[index].chanidx = chanidx;
5005 		entries[index].is_shortpre = is_shortpre;
5006 		entries[index].rate_n_flags = rate_n_flags;
5007 		entries[index].device_timestamp = device_timestamp;
5008 		memcpy(&entries[index].rxi, rxi, sizeof(entries[index].rxi));
5009 	}
5010 
5011 	/* put in reorder buffer */
5012 	ml_enqueue(&entries[index].frames, m);
5013 	buffer->num_stored++;
5014 	getmicrouptime(&entries[index].reorder_time);
5015 
5016 	if (is_amsdu) {
5017 		buffer->last_amsdu = sn;
5018 		buffer->last_sub_index = subframe_idx;
5019 	}
5020 
5021 	/*
5022 	 * We cannot trust NSSN for AMSDU sub-frames that are not the last.
5023 	 * The reason is that NSSN advances on the first sub-frame, and may
5024 	 * cause the reorder buffer to advance before all the sub-frames arrive.
5025 	 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
5026 	 * SN 1. NSSN for first sub frame will be 3 with the result of driver
5027 	 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
5028 	 * already ahead and it will be dropped.
5029 	 * If the last sub-frame is not on this queue - we will get frame
5030 	 * release notification with up to date NSSN.
5031 	 */
5032 	if (!is_amsdu || last_subframe)
5033 		iwm_release_frames(sc, ni, rxba, buffer, nssn, ml);
5034 
5035 	return 1;
5036 
5037 drop:
5038 	m_freem(m);
5039 	return 1;
5040 }
5041 
5042 void
5043 iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
5044     size_t maxlen, struct mbuf_list *ml)
5045 {
5046 	struct ieee80211com *ic = &sc->sc_ic;
5047 	struct ieee80211_rxinfo rxi;
5048 	struct iwm_rx_mpdu_desc *desc;
5049 	uint32_t len, hdrlen, rate_n_flags, device_timestamp;
5050 	int rssi;
5051 	uint8_t chanidx;
5052 	uint16_t phy_info;
5053 
5054 	memset(&rxi, 0, sizeof(rxi));
5055 
5056 	desc = (struct iwm_rx_mpdu_desc *)pktdata;
5057 
5058 	if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) ||
5059 	    !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
5060 		m_freem(m);
5061 		return; /* drop */
5062 	}
5063 
5064 	len = le16toh(desc->mpdu_len);
5065 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5066 		/* Allow control frames in monitor mode. */
5067 		if (len < sizeof(struct ieee80211_frame_cts)) {
5068 			ic->ic_stats.is_rx_tooshort++;
5069 			IC2IFP(ic)->if_ierrors++;
5070 			m_freem(m);
5071 			return;
5072 		}
5073 	} else if (len < sizeof(struct ieee80211_frame)) {
5074 		ic->ic_stats.is_rx_tooshort++;
5075 		IC2IFP(ic)->if_ierrors++;
5076 		m_freem(m);
5077 		return;
5078 	}
5079 	if (len > maxlen - sizeof(*desc)) {
5080 		IC2IFP(ic)->if_ierrors++;
5081 		m_freem(m);
5082 		return;
5083 	}
5084 
5085 	m->m_data = pktdata + sizeof(*desc);
5086 	m->m_pkthdr.len = m->m_len = len;
5087 
5088 	/* Account for padding following the frame header. */
5089 	if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD) {
5090 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
5091 		int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5092 		if (type == IEEE80211_FC0_TYPE_CTL) {
5093 			switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
5094 			case IEEE80211_FC0_SUBTYPE_CTS:
5095 				hdrlen = sizeof(struct ieee80211_frame_cts);
5096 				break;
5097 			case IEEE80211_FC0_SUBTYPE_ACK:
5098 				hdrlen = sizeof(struct ieee80211_frame_ack);
5099 				break;
5100 			default:
5101 				hdrlen = sizeof(struct ieee80211_frame_min);
5102 				break;
5103 			}
5104 		} else
5105 			hdrlen = ieee80211_get_hdrlen(wh);
5106 
5107 		if ((le16toh(desc->status) &
5108 		    IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
5109 		    IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
5110 			/* Padding is inserted after the IV. */
5111 			hdrlen += IEEE80211_CCMP_HDRLEN;
5112 		}
5113 
5114 		memmove(m->m_data + 2, m->m_data, hdrlen);
5115 		m_adj(m, 2);
5116 	}
5117 
5118 	/*
5119 	 * Hardware de-aggregates A-MSDUs and copies the same MAC header
5120 	 * in place for each subframe. But it leaves the 'A-MSDU present'
5121 	 * bit set in the frame header. We need to clear this bit ourselves.
5122 	 *
5123 	 * And we must allow the same CCMP PN for subframes following the
5124 	 * first subframe. Otherwise they would be discarded as replays.
5125 	 */
5126 	if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU) {
5127 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
5128 		uint8_t subframe_idx = (desc->amsdu_info &
5129 		    IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
5130 		if (subframe_idx > 0)
5131 			rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
5132 		if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
5133 		    m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) {
5134 			struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,
5135 			    struct ieee80211_qosframe_addr4 *);
5136 			qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
5137 
5138 			/* HW reverses addr3 and addr4. */
5139 			iwm_flip_address(qwh4->i_addr3);
5140 			iwm_flip_address(qwh4->i_addr4);
5141 		} else if (ieee80211_has_qos(wh) &&
5142 		    m->m_len >= sizeof(struct ieee80211_qosframe)) {
5143 			struct ieee80211_qosframe *qwh = mtod(m,
5144 			    struct ieee80211_qosframe *);
5145 			qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
5146 
5147 			/* HW reverses addr3. */
5148 			iwm_flip_address(qwh->i_addr3);
5149 		}
5150 	}
5151 
5152 	/*
5153 	 * Verify decryption before duplicate detection. The latter uses
5154 	 * the TID supplied in QoS frame headers and this TID is implicitly
5155 	 * verified as part of the CCMP nonce.
5156 	 */
5157 	if (iwm_rx_hwdecrypt(sc, m, le16toh(desc->status), &rxi)) {
5158 		m_freem(m);
5159 		return;
5160 	}
5161 
5162 	if (iwm_detect_duplicate(sc, m, desc, &rxi)) {
5163 		m_freem(m);
5164 		return;
5165 	}
5166 
5167 	phy_info = le16toh(desc->phy_info);
5168 	rate_n_flags = le32toh(desc->v1.rate_n_flags);
5169 	chanidx = desc->v1.channel;
5170 	device_timestamp = desc->v1.gp2_on_air_rise;
5171 
5172 	rssi = iwm_rxmq_get_signal_strength(sc, desc);
5173 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
5174 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
5175 
5176 	rxi.rxi_rssi = rssi;
5177 	rxi.rxi_tstamp = le64toh(desc->v1.tsf_on_air_rise);
5178 
5179 	if (iwm_rx_reorder(sc, m, chanidx, desc,
5180 	    (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE),
5181 	    rate_n_flags, device_timestamp, &rxi, ml))
5182 		return;
5183 
5184 	iwm_rx_frame(sc, m, chanidx, le16toh(desc->status),
5185 	    (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE),
5186 	    rate_n_flags, device_timestamp, &rxi, ml);
5187 }
5188 
5189 void
5190 iwm_ra_choose(struct iwm_softc *sc, struct ieee80211_node *ni)
5191 {
5192 	struct ieee80211com *ic = &sc->sc_ic;
5193 	struct iwm_node *in = (void *)ni;
5194 	int old_txmcs = ni->ni_txmcs;
5195 
5196 	ieee80211_ra_choose(&in->in_rn, ic, ni);
5197 
5198 	/*
5199 	 * If RA has chosen a new TX rate we must update
5200 	 * the firmware's LQ rate table.
5201 	 */
5202 	if (ni->ni_txmcs != old_txmcs)
5203 		iwm_setrates(in, 1);
5204 }
5205 
5206 void
5207 iwm_ht_single_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni,
5208     int txmcs, uint8_t failure_frame, int txfail)
5209 {
5210 	struct ieee80211com *ic = &sc->sc_ic;
5211 	struct iwm_node *in = (void *)ni;
5212 
5213 	/* Ignore Tx reports which don't match our last LQ command. */
5214 	if (txmcs != ni->ni_txmcs) {
5215 		if (++in->lq_rate_mismatch > 15) {
5216 			/* Try to sync firmware with the driver... */
5217 			iwm_setrates(in, 1);
5218 			in->lq_rate_mismatch = 0;
5219 		}
5220 	} else {
5221 		int mcs = txmcs;
5222 		const struct ieee80211_ht_rateset *rs =
5223 		    ieee80211_ra_get_ht_rateset(txmcs,
5224 		    ieee80211_node_supports_ht_sgi20(ni));
5225 		unsigned int retries = 0, i;
5226 
5227 		in->lq_rate_mismatch = 0;
5228 
5229 		for (i = 0; i < failure_frame; i++) {
5230 			if (mcs > rs->min_mcs) {
5231 				ieee80211_ra_add_stats_ht(&in->in_rn,
5232 				    ic, ni, mcs, 1, 1);
5233 				mcs--;
5234 			} else
5235 				retries++;
5236 		}
5237 
5238 		if (txfail && failure_frame == 0) {
5239 			ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5240 			    txmcs, 1, 1);
5241 		} else {
5242 			ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5243 			    mcs, retries + 1, retries);
5244 		}
5245 
5246 		iwm_ra_choose(sc, ni);
5247 	}
5248 }
5249 
5250 void
5251 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
5252     struct iwm_node *in, int txmcs, int txrate)
5253 {
5254 	struct ieee80211com *ic = &sc->sc_ic;
5255 	struct ieee80211_node *ni = &in->in_ni;
5256 	struct ifnet *ifp = IC2IFP(ic);
5257 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
5258 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
5259 	int txfail;
5260 
5261 	KASSERT(tx_resp->frame_count == 1);
5262 
5263 	txfail = (status != IWM_TX_STATUS_SUCCESS &&
5264 	    status != IWM_TX_STATUS_DIRECT_DONE);
5265 
5266 	/*
5267 	 * Update rate control statistics.
5268 	 * Only report frames which were actually queued with the currently
5269 	 * selected Tx rate. Because Tx queues are relatively long we may
5270 	 * encounter previously selected rates here during Tx bursts.
5271 	 * Providing feedback based on such frames can lead to suboptimal
5272 	 * Tx rate control decisions.
5273 	 */
5274 	if ((ni->ni_flags & IEEE80211_NODE_HT) == 0) {
5275 		if (txrate != ni->ni_txrate) {
5276 			if (++in->lq_rate_mismatch > 15) {
5277 				/* Try to sync firmware with the driver... */
5278 				iwm_setrates(in, 1);
5279 				in->lq_rate_mismatch = 0;
5280 			}
5281 		} else {
5282 			in->lq_rate_mismatch = 0;
5283 
5284 			in->in_amn.amn_txcnt++;
5285 			if (txfail)
5286 				in->in_amn.amn_retrycnt++;
5287 			if (tx_resp->failure_frame > 0)
5288 				in->in_amn.amn_retrycnt++;
5289 		}
5290 	} else if (ic->ic_fixed_mcs == -1 && ic->ic_state == IEEE80211_S_RUN &&
5291 	    (le32toh(tx_resp->initial_rate) & IWM_RATE_MCS_HT_MSK)) {
5292 		int txmcs = le32toh(tx_resp->initial_rate) &
5293 		    (IWM_RATE_HT_MCS_RATE_CODE_MSK | IWM_RATE_HT_MCS_NSS_MSK);
5294 		iwm_ht_single_rate_control(sc, ni, txmcs,
5295 		    tx_resp->failure_frame, txfail);
5296 	}
5297 
5298 	if (txfail)
5299 		ifp->if_oerrors++;
5300 }
5301 
5302 void
5303 iwm_txd_done(struct iwm_softc *sc, struct iwm_tx_data *txd)
5304 {
5305 	struct ieee80211com *ic = &sc->sc_ic;
5306 
5307 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
5308 	    BUS_DMASYNC_POSTWRITE);
5309 	bus_dmamap_unload(sc->sc_dmat, txd->map);
5310 	m_freem(txd->m);
5311 	txd->m = NULL;
5312 
5313 	KASSERT(txd->in);
5314 	ieee80211_release_node(ic, &txd->in->in_ni);
5315 	txd->in = NULL;
5316 	txd->ampdu_nframes = 0;
5317 	txd->ampdu_txmcs = 0;
5318 }
5319 
5320 void
5321 iwm_txq_advance(struct iwm_softc *sc, struct iwm_tx_ring *ring, int idx)
5322 {
5323 	struct iwm_tx_data *txd;
5324 
5325 	while (ring->tail != idx) {
5326 		txd = &ring->data[ring->tail];
5327 		if (txd->m != NULL) {
5328 			if (ring->qid < IWM_FIRST_AGG_TX_QUEUE)
5329 				DPRINTF(("%s: missed Tx completion: tail=%d "
5330 				    "idx=%d\n", __func__, ring->tail, idx));
5331 			iwm_reset_sched(sc, ring->qid, ring->tail, IWM_STATION_ID);
5332 			iwm_txd_done(sc, txd);
5333 			ring->queued--;
5334 		}
5335 		ring->tail = (ring->tail + 1) % IWM_TX_RING_COUNT;
5336 	}
5337 
5338 	wakeup(ring);
5339 }
5340 
5341 void
5342 iwm_ampdu_tx_done(struct iwm_softc *sc, struct iwm_cmd_header *cmd_hdr,
5343     struct iwm_node *in, struct iwm_tx_ring *txq, uint32_t initial_rate,
5344     uint8_t nframes, uint8_t failure_frame, uint16_t ssn, int status,
5345     struct iwm_agg_tx_status *agg_status)
5346 {
5347 	struct ieee80211com *ic = &sc->sc_ic;
5348 	int tid = cmd_hdr->qid - IWM_FIRST_AGG_TX_QUEUE;
5349 	struct iwm_tx_data *txdata = &txq->data[cmd_hdr->idx];
5350 	struct ieee80211_node *ni = &in->in_ni;
5351 	struct ieee80211_tx_ba *ba;
5352 	int txfail = (status != IWM_TX_STATUS_SUCCESS &&
5353 	    status != IWM_TX_STATUS_DIRECT_DONE);
5354 	uint16_t seq;
5355 
5356 	sc->sc_tx_timer = 0;
5357 
5358 	if (ic->ic_state != IEEE80211_S_RUN)
5359 		return;
5360 
5361 	if (nframes > 1) {
5362 		int i;
5363  		/*
5364 		 * Collect information about this A-MPDU.
5365 		 */
5366 
5367 		for (i = 0; i < nframes; i++) {
5368 			uint8_t qid = agg_status[i].qid;
5369 			uint8_t idx = agg_status[i].idx;
5370 			uint16_t txstatus = (le16toh(agg_status[i].status) &
5371 			    IWM_AGG_TX_STATE_STATUS_MSK);
5372 
5373 			if (txstatus != IWM_AGG_TX_STATE_TRANSMITTED)
5374 				continue;
5375 
5376 			if (qid != cmd_hdr->qid)
5377 				continue;
5378 
5379 			txdata = &txq->data[idx];
5380 			if (txdata->m == NULL)
5381 				continue;
5382 
5383 			/* The Tx rate was the same for all subframes. */
5384 			txdata->ampdu_txmcs = initial_rate &
5385 			   (IWM_RATE_HT_MCS_RATE_CODE_MSK |
5386 			   IWM_RATE_HT_MCS_NSS_MSK);
5387 			txdata->ampdu_nframes = nframes;
5388 		}
5389 		return;
5390 	}
5391 
5392 	ba = &ni->ni_tx_ba[tid];
5393 	if (ba->ba_state != IEEE80211_BA_AGREED)
5394 		return;
5395 	if (SEQ_LT(ssn, ba->ba_winstart))
5396 		return;
5397 
5398 	/* This was a final single-frame Tx attempt for frame SSN-1. */
5399 	seq = (ssn - 1) & 0xfff;
5400 
5401 	/*
5402 	 * Skip rate control if our Tx rate is fixed.
5403 	 * Don't report frames to MiRA which were sent at a different
5404 	 * Tx rate than ni->ni_txmcs.
5405 	 */
5406 	if (ic->ic_fixed_mcs == -1) {
5407 		if (txdata->ampdu_nframes > 1) {
5408 			/*
5409 			 * This frame was once part of an A-MPDU.
5410 			 * Report one failed A-MPDU Tx attempt.
5411 			 * The firmware might have made several such
5412 			 * attempts but we don't keep track of this.
5413 			 */
5414 			ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5415 			    txdata->ampdu_txmcs, 1, 1);
5416 		}
5417 
5418 		/* Report the final single-frame Tx attempt. */
5419 		if (initial_rate & IWM_RATE_HT_MCS_RATE_CODE_MSK) {
5420 			int txmcs = initial_rate &
5421 			   (IWM_RATE_HT_MCS_RATE_CODE_MSK |
5422 			   IWM_RATE_HT_MCS_NSS_MSK);
5423 			iwm_ht_single_rate_control(sc, ni, txmcs,
5424 			    failure_frame, txfail);
5425 		}
5426 	}
5427 
5428 	if (txfail)
5429 		ieee80211_tx_compressed_bar(ic, ni, tid, ssn);
5430 
5431 	/*
5432 	 * SSN corresponds to the first (perhaps not yet transmitted) frame
5433 	 * in firmware's BA window. Firmware is not going to retransmit any
5434 	 * frames before its BA window so mark them all as done.
5435 	 */
5436 	ieee80211_output_ba_move_window(ic, ni, tid, ssn);
5437 	iwm_txq_advance(sc, txq, IWM_AGG_SSN_TO_TXQ_IDX(ssn));
5438 	iwm_clear_oactive(sc, txq);
5439 }
5440 
5441 void
5442 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
5443     struct iwm_rx_data *data)
5444 {
5445 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
5446 	int idx = cmd_hdr->idx;
5447 	int qid = cmd_hdr->qid;
5448 	struct iwm_tx_ring *ring = &sc->txq[qid];
5449 	struct iwm_tx_data *txd;
5450 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
5451 	uint32_t ssn;
5452 	uint32_t len = iwm_rx_packet_len(pkt);
5453 
5454 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
5455 	    BUS_DMASYNC_POSTREAD);
5456 
5457 	sc->sc_tx_timer = 0;
5458 
5459 	/* Sanity checks. */
5460 	if (sizeof(*tx_resp) > len)
5461 		return;
5462 	if (qid < IWM_FIRST_AGG_TX_QUEUE && tx_resp->frame_count > 1)
5463 		return;
5464 	if (qid >= IWM_FIRST_AGG_TX_QUEUE && sizeof(*tx_resp) + sizeof(ssn) +
5465 	    tx_resp->frame_count * sizeof(tx_resp->status) > len)
5466 		return;
5467 
5468 	txd = &ring->data[idx];
5469 	if (txd->m == NULL)
5470 		return;
5471 
5472 	if (qid >= IWM_FIRST_AGG_TX_QUEUE) {
5473 		int status;
5474 		memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn));
5475 		ssn = le32toh(ssn) & 0xfff;
5476 		status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
5477 		iwm_ampdu_tx_done(sc, cmd_hdr, txd->in, ring,
5478 		    le32toh(tx_resp->initial_rate), tx_resp->frame_count,
5479 		    tx_resp->failure_frame, ssn, status, &tx_resp->status);
5480 	} else {
5481 		iwm_rx_tx_cmd_single(sc, pkt, txd->in, txd->txmcs, txd->txrate);
5482 		iwm_txd_done(sc, txd);
5483 		ring->queued--;
5484 
5485 		/*
5486 		 * XXX Sometimes we miss Tx completion interrupts.
5487 		 * We cannot check Tx success/failure for affected frames;
5488 		 * just free the associated mbuf and release the associated
5489 		 * node reference.
5490 		 */
5491 		iwm_txq_advance(sc, ring, idx);
5492 		iwm_clear_oactive(sc, ring);
5493 	}
5494 }
5495 
5496 void
5497 iwm_clear_oactive(struct iwm_softc *sc, struct iwm_tx_ring *ring)
5498 {
5499 	struct ieee80211com *ic = &sc->sc_ic;
5500 	struct ifnet *ifp = IC2IFP(ic);
5501 
5502 	if (ring->queued < IWM_TX_RING_LOMARK) {
5503 		sc->qfullmsk &= ~(1 << ring->qid);
5504 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
5505 			ifq_clr_oactive(&ifp->if_snd);
5506 			/*
5507 			 * Well, we're in interrupt context, but then again
5508 			 * I guess net80211 does all sorts of stunts in
5509 			 * interrupt context, so maybe this is no biggie.
5510 			 */
5511 			(*ifp->if_start)(ifp);
5512 		}
5513 	}
5514 }
5515 
5516 void
5517 iwm_ampdu_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni,
5518     struct iwm_tx_ring *txq, int tid, uint16_t seq, uint16_t ssn)
5519 {
5520 	struct ieee80211com *ic = &sc->sc_ic;
5521 	struct iwm_node *in = (void *)ni;
5522 	int idx, end_idx;
5523 
5524 	/*
5525 	 * Update Tx rate statistics for A-MPDUs before firmware's BA window.
5526 	 */
5527 	idx = IWM_AGG_SSN_TO_TXQ_IDX(seq);
5528 	end_idx = IWM_AGG_SSN_TO_TXQ_IDX(ssn);
5529 	while (idx != end_idx) {
5530 		struct iwm_tx_data *txdata = &txq->data[idx];
5531 		if (txdata->m != NULL && txdata->ampdu_nframes > 1) {
5532 			/*
5533 			 * We can assume that this subframe has been ACKed
5534 			 * because ACK failures come as single frames and
5535 			 * before failing an A-MPDU subframe the firmware
5536 			 * sends it as a single frame at least once.
5537 			 */
5538 			ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5539 			    txdata->ampdu_txmcs, 1, 0);
5540 
5541 			/* Report this frame only once. */
5542 			txdata->ampdu_nframes = 0;
5543 		}
5544 
5545 		idx = (idx + 1) % IWM_TX_RING_COUNT;
5546 	}
5547 
5548 	iwm_ra_choose(sc, ni);
5549 }
5550 
5551 void
5552 iwm_rx_compressed_ba(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
5553     struct iwm_rx_data *data)
5554 {
5555 	struct iwm_ba_notif *ban = (void *)pkt->data;
5556 	struct ieee80211com *ic = &sc->sc_ic;
5557 	struct ieee80211_node *ni;
5558 	struct ieee80211_tx_ba *ba;
5559 	struct iwm_node *in;
5560 	struct iwm_tx_ring *ring;
5561 	uint16_t seq, ssn;
5562 	int qid;
5563 
5564 	if (ic->ic_state != IEEE80211_S_RUN)
5565 		return;
5566 
5567 	if (iwm_rx_packet_payload_len(pkt) < sizeof(*ban))
5568 		return;
5569 
5570 	if (ban->sta_id != IWM_STATION_ID ||
5571 	    !IEEE80211_ADDR_EQ(ic->ic_bss->ni_macaddr, ban->sta_addr))
5572 		return;
5573 
5574 	ni = ic->ic_bss;
5575 	in = (void *)ni;
5576 
5577 	qid = le16toh(ban->scd_flow);
5578 	if (qid < IWM_FIRST_AGG_TX_QUEUE || qid > IWM_LAST_AGG_TX_QUEUE)
5579 		return;
5580 
5581 	/* Protect against a firmware bug where the queue/TID are off. */
5582 	if (qid != IWM_FIRST_AGG_TX_QUEUE + ban->tid)
5583 		return;
5584 
5585 	ba = &ni->ni_tx_ba[ban->tid];
5586 	if (ba->ba_state != IEEE80211_BA_AGREED)
5587 		return;
5588 
5589 	ring = &sc->txq[qid];
5590 
5591 	/*
5592 	 * The first bit in ban->bitmap corresponds to the sequence number
5593 	 * stored in the sequence control field ban->seq_ctl.
5594 	 * Multiple BA notifications in a row may be using this number, with
5595 	 * additional bits being set in cba->bitmap. It is unclear how the
5596 	 * firmware decides to shift this window forward.
5597 	 * We rely on ba->ba_winstart instead.
5598 	 */
5599 	seq = le16toh(ban->seq_ctl) >> IEEE80211_SEQ_SEQ_SHIFT;
5600 
5601 	/*
5602 	 * The firmware's new BA window starting sequence number
5603 	 * corresponds to the first hole in ban->scd_ssn, implying
5604 	 * that all frames between 'seq' and 'ssn' (non-inclusive)
5605 	 * have been acked.
5606 	 */
5607 	ssn = le16toh(ban->scd_ssn);
5608 
5609 	if (SEQ_LT(ssn, ba->ba_winstart))
5610 		return;
5611 
5612 	/* Skip rate control if our Tx rate is fixed. */
5613 	if (ic->ic_fixed_mcs == -1)
5614 		iwm_ampdu_rate_control(sc, ni, ring, ban->tid,
5615 		    ba->ba_winstart, ssn);
5616 
5617 	/*
5618 	 * SSN corresponds to the first (perhaps not yet transmitted) frame
5619 	 * in firmware's BA window. Firmware is not going to retransmit any
5620 	 * frames before its BA window so mark them all as done.
5621 	 */
5622 	ieee80211_output_ba_move_window(ic, ni, ban->tid, ssn);
5623 	iwm_txq_advance(sc, ring, IWM_AGG_SSN_TO_TXQ_IDX(ssn));
5624 	iwm_clear_oactive(sc, ring);
5625 }
5626 
5627 void
5628 iwm_rx_bmiss(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
5629     struct iwm_rx_data *data)
5630 {
5631 	struct ieee80211com *ic = &sc->sc_ic;
5632 	struct iwm_missed_beacons_notif *mbn = (void *)pkt->data;
5633 	uint32_t missed;
5634 
5635 	if ((ic->ic_opmode != IEEE80211_M_STA) ||
5636 	    (ic->ic_state != IEEE80211_S_RUN))
5637 		return;
5638 
5639 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
5640 	    sizeof(*mbn), BUS_DMASYNC_POSTREAD);
5641 
5642 	missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
5643 	if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
5644 		if (ic->ic_if.if_flags & IFF_DEBUG)
5645 			printf("%s: receiving no beacons from %s; checking if "
5646 			    "this AP is still responding to probe requests\n",
5647 			    DEVNAME(sc), ether_sprintf(ic->ic_bss->ni_macaddr));
5648 		/*
5649 		 * Rather than go directly to scan state, try to send a
5650 		 * directed probe request first. If that fails then the
5651 		 * state machine will drop us into scanning after timing
5652 		 * out waiting for a probe response.
5653 		 */
5654 		IEEE80211_SEND_MGMT(ic, ic->ic_bss,
5655 		    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
5656 	}
5657 
5658 }
5659 
5660 int
5661 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
5662 {
5663 	struct iwm_binding_cmd cmd;
5664 	struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
5665 	uint32_t mac_id = IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
5666 	int i, err, active = (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE);
5667 	uint32_t status;
5668 
5669 	if (action == IWM_FW_CTXT_ACTION_ADD && active)
5670 		panic("binding already added");
5671 	if (action == IWM_FW_CTXT_ACTION_REMOVE && !active)
5672 		panic("binding already removed");
5673 
5674 	if (phyctxt == NULL) /* XXX race with iwm_stop() */
5675 		return EINVAL;
5676 
5677 	memset(&cmd, 0, sizeof(cmd));
5678 
5679 	cmd.id_and_color
5680 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
5681 	cmd.action = htole32(action);
5682 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
5683 
5684 	cmd.macs[0] = htole32(mac_id);
5685 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
5686 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
5687 
5688 	status = 0;
5689 	err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
5690 	    sizeof(cmd), &cmd, &status);
5691 	if (err == 0 && status != 0)
5692 		err = EIO;
5693 
5694 	return err;
5695 }
5696 
5697 void
5698 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
5699     struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
5700 {
5701 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
5702 
5703 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
5704 	    ctxt->color));
5705 	cmd->action = htole32(action);
5706 	cmd->apply_time = htole32(apply_time);
5707 }
5708 
5709 void
5710 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
5711     struct ieee80211_channel *chan, uint8_t chains_static,
5712     uint8_t chains_dynamic)
5713 {
5714 	struct ieee80211com *ic = &sc->sc_ic;
5715 	uint8_t active_cnt, idle_cnt;
5716 
5717 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
5718 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
5719 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
5720 	cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
5721 	cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
5722 
5723 	/* Set rx the chains */
5724 	idle_cnt = chains_static;
5725 	active_cnt = chains_dynamic;
5726 
5727 	cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
5728 					IWM_PHY_RX_CHAIN_VALID_POS);
5729 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
5730 	cmd->rxchain_info |= htole32(active_cnt <<
5731 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
5732 
5733 	cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
5734 }
5735 
5736 int
5737 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
5738     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
5739     uint32_t apply_time)
5740 {
5741 	struct iwm_phy_context_cmd cmd;
5742 
5743 	iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
5744 
5745 	iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
5746 	    chains_static, chains_dynamic);
5747 
5748 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
5749 	    sizeof(struct iwm_phy_context_cmd), &cmd);
5750 }
5751 
5752 int
5753 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
5754 {
5755 	struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
5756 	struct iwm_tfd *desc;
5757 	struct iwm_tx_data *txdata;
5758 	struct iwm_device_cmd *cmd;
5759 	struct mbuf *m;
5760 	bus_addr_t paddr;
5761 	uint32_t addr_lo;
5762 	int err = 0, i, paylen, off, s;
5763 	int idx, code, async, group_id;
5764 	size_t hdrlen, datasz;
5765 	uint8_t *data;
5766 	int generation = sc->sc_generation;
5767 
5768 	code = hcmd->id;
5769 	async = hcmd->flags & IWM_CMD_ASYNC;
5770 	idx = ring->cur;
5771 
5772 	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
5773 		paylen += hcmd->len[i];
5774 	}
5775 
5776 	/* If this command waits for a response, allocate response buffer. */
5777 	hcmd->resp_pkt = NULL;
5778 	if (hcmd->flags & IWM_CMD_WANT_RESP) {
5779 		uint8_t *resp_buf;
5780 		KASSERT(!async);
5781 		KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwm_rx_packet));
5782 		KASSERT(hcmd->resp_pkt_len <= IWM_CMD_RESP_MAX);
5783 		if (sc->sc_cmd_resp_pkt[idx] != NULL)
5784 			return ENOSPC;
5785 		resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
5786 		    M_NOWAIT | M_ZERO);
5787 		if (resp_buf == NULL)
5788 			return ENOMEM;
5789 		sc->sc_cmd_resp_pkt[idx] = resp_buf;
5790 		sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
5791 	} else {
5792 		sc->sc_cmd_resp_pkt[idx] = NULL;
5793 	}
5794 
5795 	s = splnet();
5796 
5797 	desc = &ring->desc[idx];
5798 	txdata = &ring->data[idx];
5799 
5800 	group_id = iwm_cmd_groupid(code);
5801 	if (group_id != 0) {
5802 		hdrlen = sizeof(cmd->hdr_wide);
5803 		datasz = sizeof(cmd->data_wide);
5804 	} else {
5805 		hdrlen = sizeof(cmd->hdr);
5806 		datasz = sizeof(cmd->data);
5807 	}
5808 
5809 	if (paylen > datasz) {
5810 		/* Command is too large to fit in pre-allocated space. */
5811 		size_t totlen = hdrlen + paylen;
5812 		if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
5813 			printf("%s: firmware command too long (%zd bytes)\n",
5814 			    DEVNAME(sc), totlen);
5815 			err = EINVAL;
5816 			goto out;
5817 		}
5818 		m = MCLGETL(NULL, M_DONTWAIT, totlen);
5819 		if (m == NULL) {
5820 			printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
5821 			    DEVNAME(sc), totlen);
5822 			err = ENOMEM;
5823 			goto out;
5824 		}
5825 		cmd = mtod(m, struct iwm_device_cmd *);
5826 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
5827 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
5828 		if (err) {
5829 			printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
5830 			    DEVNAME(sc), totlen);
5831 			m_freem(m);
5832 			goto out;
5833 		}
5834 		txdata->m = m; /* mbuf will be freed in iwm_cmd_done() */
5835 		paddr = txdata->map->dm_segs[0].ds_addr;
5836 	} else {
5837 		cmd = &ring->cmd[idx];
5838 		paddr = txdata->cmd_paddr;
5839 	}
5840 
5841 	if (group_id != 0) {
5842 		cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
5843 		cmd->hdr_wide.group_id = group_id;
5844 		cmd->hdr_wide.qid = ring->qid;
5845 		cmd->hdr_wide.idx = idx;
5846 		cmd->hdr_wide.length = htole16(paylen);
5847 		cmd->hdr_wide.version = iwm_cmd_version(code);
5848 		data = cmd->data_wide;
5849 	} else {
5850 		cmd->hdr.code = code;
5851 		cmd->hdr.flags = 0;
5852 		cmd->hdr.qid = ring->qid;
5853 		cmd->hdr.idx = idx;
5854 		data = cmd->data;
5855 	}
5856 
5857 	for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
5858 		if (hcmd->len[i] == 0)
5859 			continue;
5860 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
5861 		off += hcmd->len[i];
5862 	}
5863 	KASSERT(off == paylen);
5864 
5865 	/* lo field is not aligned */
5866 	addr_lo = htole32((uint32_t)paddr);
5867 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
5868 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
5869 	    | ((hdrlen + paylen) << 4));
5870 	desc->num_tbs = 1;
5871 
5872 	if (paylen > datasz) {
5873 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
5874 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
5875 	} else {
5876 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
5877 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
5878 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
5879 	}
5880 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
5881 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
5882 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
5883 
5884 	/*
5885 	 * Wake up the NIC to make sure that the firmware will see the host
5886 	 * command - we will let the NIC sleep once all the host commands
5887 	 * returned. This needs to be done only on 7000 family NICs.
5888 	 */
5889 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
5890 		if (ring->queued == 0 && !iwm_nic_lock(sc)) {
5891 			err = EBUSY;
5892 			goto out;
5893 		}
5894 	}
5895 
5896 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
5897 
5898 	/* Kick command ring. */
5899 	ring->queued++;
5900 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
5901 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
5902 
5903 	if (!async) {
5904 		err = tsleep_nsec(desc, PCATCH, "iwmcmd", SEC_TO_NSEC(1));
5905 		if (err == 0) {
5906 			/* if hardware is no longer up, return error */
5907 			if (generation != sc->sc_generation) {
5908 				err = ENXIO;
5909 				goto out;
5910 			}
5911 
5912 			/* Response buffer will be freed in iwm_free_resp(). */
5913 			hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
5914 			sc->sc_cmd_resp_pkt[idx] = NULL;
5915 		} else if (generation == sc->sc_generation) {
5916 			free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
5917 			    sc->sc_cmd_resp_len[idx]);
5918 			sc->sc_cmd_resp_pkt[idx] = NULL;
5919 		}
5920 	}
5921  out:
5922 	splx(s);
5923 
5924 	return err;
5925 }
5926 
5927 int
5928 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
5929     uint16_t len, const void *data)
5930 {
5931 	struct iwm_host_cmd cmd = {
5932 		.id = id,
5933 		.len = { len, },
5934 		.data = { data, },
5935 		.flags = flags,
5936 	};
5937 
5938 	return iwm_send_cmd(sc, &cmd);
5939 }
5940 
5941 int
5942 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
5943     uint32_t *status)
5944 {
5945 	struct iwm_rx_packet *pkt;
5946 	struct iwm_cmd_response *resp;
5947 	int err, resp_len;
5948 
5949 	KASSERT((cmd->flags & IWM_CMD_WANT_RESP) == 0);
5950 	cmd->flags |= IWM_CMD_WANT_RESP;
5951 	cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
5952 
5953 	err = iwm_send_cmd(sc, cmd);
5954 	if (err)
5955 		return err;
5956 
5957 	pkt = cmd->resp_pkt;
5958 	if (pkt == NULL || (pkt->hdr.flags & IWM_CMD_FAILED_MSK))
5959 		return EIO;
5960 
5961 	resp_len = iwm_rx_packet_payload_len(pkt);
5962 	if (resp_len != sizeof(*resp)) {
5963 		iwm_free_resp(sc, cmd);
5964 		return EIO;
5965 	}
5966 
5967 	resp = (void *)pkt->data;
5968 	*status = le32toh(resp->status);
5969 	iwm_free_resp(sc, cmd);
5970 	return err;
5971 }
5972 
5973 int
5974 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
5975     const void *data, uint32_t *status)
5976 {
5977 	struct iwm_host_cmd cmd = {
5978 		.id = id,
5979 		.len = { len, },
5980 		.data = { data, },
5981 	};
5982 
5983 	return iwm_send_cmd_status(sc, &cmd, status);
5984 }
5985 
5986 void
5987 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
5988 {
5989 	KASSERT((hcmd->flags & (IWM_CMD_WANT_RESP)) == IWM_CMD_WANT_RESP);
5990 	free(hcmd->resp_pkt, M_DEVBUF, hcmd->resp_pkt_len);
5991 	hcmd->resp_pkt = NULL;
5992 }
5993 
5994 void
5995 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx, int code)
5996 {
5997 	struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
5998 	struct iwm_tx_data *data;
5999 
6000 	if (qid != sc->cmdqid) {
6001 		return;	/* Not a command ack. */
6002 	}
6003 
6004 	data = &ring->data[idx];
6005 
6006 	if (data->m != NULL) {
6007 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
6008 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
6009 		bus_dmamap_unload(sc->sc_dmat, data->map);
6010 		m_freem(data->m);
6011 		data->m = NULL;
6012 	}
6013 	wakeup(&ring->desc[idx]);
6014 
6015 	if (ring->queued == 0) {
6016 		DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
6017 		    DEVNAME(sc), code));
6018 	} else if (--ring->queued == 0) {
6019 		/*
6020 		 * 7000 family NICs are locked while commands are in progress.
6021 		 * All commands are now done so we may unlock the NIC again.
6022 		 */
6023 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
6024 			iwm_nic_unlock(sc);
6025 	}
6026 }
6027 
6028 void
6029 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
6030     uint16_t len)
6031 {
6032 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
6033 	uint16_t val;
6034 
6035 	scd_bc_tbl = sc->sched_dma.vaddr;
6036 
6037 	len += IWM_TX_CRC_SIZE + IWM_TX_DELIMITER_SIZE;
6038 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
6039 		len = roundup(len, 4) / 4;
6040 
6041 	val = htole16(sta_id << 12 | len);
6042 
6043 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6044 	    0, sc->sched_dma.size, BUS_DMASYNC_PREWRITE);
6045 
6046 	/* Update TX scheduler. */
6047 	scd_bc_tbl[qid].tfd_offset[idx] = val;
6048 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP)
6049 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = val;
6050 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6051 	    0, sc->sched_dma.size, BUS_DMASYNC_POSTWRITE);
6052 }
6053 
6054 void
6055 iwm_reset_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id)
6056 {
6057 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
6058 	uint16_t val;
6059 
6060 	scd_bc_tbl = sc->sched_dma.vaddr;
6061 
6062 	val = htole16(1 | (sta_id << 12));
6063 
6064 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6065 	    0, sc->sched_dma.size, BUS_DMASYNC_PREWRITE);
6066 
6067 	/* Update TX scheduler. */
6068 	scd_bc_tbl[qid].tfd_offset[idx] = val;
6069 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP)
6070 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = val;
6071 
6072 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6073 	    0, sc->sched_dma.size, BUS_DMASYNC_POSTWRITE);
6074 }
6075 
6076 /*
6077  * Fill in various bit for management frames, and leave them
6078  * unfilled for data frames (firmware takes care of that).
6079  * Return the selected TX rate.
6080  */
6081 const struct iwm_rate *
6082 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
6083     struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
6084 {
6085 	struct ieee80211com *ic = &sc->sc_ic;
6086 	struct ieee80211_node *ni = &in->in_ni;
6087 	const struct iwm_rate *rinfo;
6088 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
6089 	int min_ridx = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
6090 	int ridx, rate_flags;
6091 
6092 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
6093 	tx->data_retry_limit = IWM_LOW_RETRY_LIMIT;
6094 
6095 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
6096 	    type != IEEE80211_FC0_TYPE_DATA) {
6097 		/* for non-data, use the lowest supported rate */
6098 		ridx = min_ridx;
6099 		tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
6100 	} else if (ic->ic_fixed_mcs != -1) {
6101 		ridx = sc->sc_fixed_ridx;
6102 	} else if (ic->ic_fixed_rate != -1) {
6103 		ridx = sc->sc_fixed_ridx;
6104  	} else {
6105 		int i;
6106 		/* Use firmware rateset retry table. */
6107 		tx->initial_rate_index = 0;
6108 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
6109 		if (ni->ni_flags & IEEE80211_NODE_HT) {
6110 			ridx = iwm_mcs2ridx[ni->ni_txmcs];
6111 			return &iwm_rates[ridx];
6112 		}
6113 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
6114 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
6115 		for (i = 0; i < ni->ni_rates.rs_nrates; i++) {
6116 			if (iwm_rates[i].rate == (ni->ni_txrate &
6117 			    IEEE80211_RATE_VAL)) {
6118 				ridx = i;
6119 				break;
6120 			}
6121 		}
6122 		return &iwm_rates[ridx];
6123 	}
6124 
6125 	rinfo = &iwm_rates[ridx];
6126 	if (iwm_is_mimo_ht_plcp(rinfo->ht_plcp))
6127 		rate_flags = IWM_RATE_MCS_ANT_AB_MSK;
6128 	else
6129 		rate_flags = IWM_RATE_MCS_ANT_A_MSK;
6130 	if (IWM_RIDX_IS_CCK(ridx))
6131 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
6132 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6133 	    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
6134 		rate_flags |= IWM_RATE_MCS_HT_MSK;
6135 		if (ieee80211_node_supports_ht_sgi20(ni))
6136 			rate_flags |= IWM_RATE_MCS_SGI_MSK;
6137 		tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
6138 	} else
6139 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
6140 
6141 	return rinfo;
6142 }
6143 
6144 #define TB0_SIZE 16
6145 int
6146 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
6147 {
6148 	struct ieee80211com *ic = &sc->sc_ic;
6149 	struct iwm_node *in = (void *)ni;
6150 	struct iwm_tx_ring *ring;
6151 	struct iwm_tx_data *data;
6152 	struct iwm_tfd *desc;
6153 	struct iwm_device_cmd *cmd;
6154 	struct iwm_tx_cmd *tx;
6155 	struct ieee80211_frame *wh;
6156 	struct ieee80211_key *k = NULL;
6157 	const struct iwm_rate *rinfo;
6158 	uint8_t *ivp;
6159 	uint32_t flags;
6160 	u_int hdrlen;
6161 	bus_dma_segment_t *seg;
6162 	uint8_t tid, type, subtype;
6163 	int i, totlen, err, pad;
6164 	int qid, hasqos;
6165 
6166 	wh = mtod(m, struct ieee80211_frame *);
6167 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
6168 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
6169 	if (type == IEEE80211_FC0_TYPE_CTL)
6170 		hdrlen = sizeof(struct ieee80211_frame_min);
6171 	else
6172 		hdrlen = ieee80211_get_hdrlen(wh);
6173 
6174 	hasqos = ieee80211_has_qos(wh);
6175 	if (type == IEEE80211_FC0_TYPE_DATA)
6176 		tid = IWM_TID_NON_QOS;
6177 	else
6178 		tid = IWM_MAX_TID_COUNT;
6179 
6180 	/*
6181 	 * Map EDCA categories to Tx data queues.
6182 	 *
6183 	 * We use static data queue assignments even in DQA mode. We do not
6184 	 * need to share Tx queues between stations because we only implement
6185 	 * client mode; the firmware's station table contains only one entry
6186 	 * which represents our access point.
6187 	 */
6188 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
6189 		qid = IWM_DQA_MIN_MGMT_QUEUE + ac;
6190 	else
6191 		qid = ac;
6192 
6193 	/* If possible, put this frame on an aggregation queue. */
6194 	if (hasqos) {
6195 		struct ieee80211_tx_ba *ba;
6196 		uint16_t qos = ieee80211_get_qos(wh);
6197 		int qostid = qos & IEEE80211_QOS_TID;
6198 		int agg_qid = IWM_FIRST_AGG_TX_QUEUE + qostid;
6199 
6200 		ba = &ni->ni_tx_ba[qostid];
6201 		if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6202 		    type == IEEE80211_FC0_TYPE_DATA &&
6203 		    subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
6204 		    (sc->tx_ba_queue_mask & (1 << agg_qid)) &&
6205 		    ba->ba_state == IEEE80211_BA_AGREED) {
6206 			qid = agg_qid;
6207 			tid = qostid;
6208 			ac = ieee80211_up_to_ac(ic, qostid);
6209 		}
6210 	}
6211 
6212 	ring = &sc->txq[qid];
6213 	desc = &ring->desc[ring->cur];
6214 	memset(desc, 0, sizeof(*desc));
6215 	data = &ring->data[ring->cur];
6216 
6217 	cmd = &ring->cmd[ring->cur];
6218 	cmd->hdr.code = IWM_TX_CMD;
6219 	cmd->hdr.flags = 0;
6220 	cmd->hdr.qid = ring->qid;
6221 	cmd->hdr.idx = ring->cur;
6222 
6223 	tx = (void *)cmd->data;
6224 	memset(tx, 0, sizeof(*tx));
6225 
6226 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
6227 
6228 #if NBPFILTER > 0
6229 	if (sc->sc_drvbpf != NULL) {
6230 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
6231 		uint16_t chan_flags;
6232 
6233 		tap->wt_flags = 0;
6234 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
6235 		chan_flags = ni->ni_chan->ic_flags;
6236 		if (ic->ic_curmode != IEEE80211_MODE_11N)
6237 			chan_flags &= ~IEEE80211_CHAN_HT;
6238 		tap->wt_chan_flags = htole16(chan_flags);
6239 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6240 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6241 		    type == IEEE80211_FC0_TYPE_DATA &&
6242 		    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
6243 			tap->wt_rate = (0x80 | rinfo->ht_plcp);
6244 		} else
6245 			tap->wt_rate = rinfo->rate;
6246 		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
6247 		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
6248 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
6249 
6250 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
6251 		    m, BPF_DIRECTION_OUT);
6252 	}
6253 #endif
6254 	totlen = m->m_pkthdr.len;
6255 
6256 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
6257 		k = ieee80211_get_txkey(ic, wh, ni);
6258 		if ((k->k_flags & IEEE80211_KEY_GROUP) ||
6259 		    (k->k_cipher != IEEE80211_CIPHER_CCMP)) {
6260 			if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
6261 				return ENOBUFS;
6262 			/* 802.11 header may have moved. */
6263 			wh = mtod(m, struct ieee80211_frame *);
6264 			totlen = m->m_pkthdr.len;
6265 			k = NULL; /* skip hardware crypto below */
6266 		} else {
6267 			/* HW appends CCMP MIC */
6268 			totlen += IEEE80211_CCMP_HDRLEN;
6269 		}
6270 	}
6271 
6272 	flags = 0;
6273 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
6274 		flags |= IWM_TX_CMD_FLG_ACK;
6275 	}
6276 
6277 	if (type == IEEE80211_FC0_TYPE_DATA &&
6278 	    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6279 	    (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold ||
6280 	    (ic->ic_flags & IEEE80211_F_USEPROT)))
6281 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
6282 
6283 	tx->sta_id = IWM_STATION_ID;
6284 
6285 	if (type == IEEE80211_FC0_TYPE_MGT) {
6286 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
6287 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
6288 			tx->pm_frame_timeout = htole16(3);
6289 		else
6290 			tx->pm_frame_timeout = htole16(2);
6291 	} else {
6292 		if (type == IEEE80211_FC0_TYPE_CTL &&
6293 		    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
6294 			struct ieee80211_frame_min *mwh;
6295 			uint8_t *barfrm;
6296 			uint16_t ctl;
6297 			mwh = mtod(m, struct ieee80211_frame_min *);
6298 			barfrm = (uint8_t *)&mwh[1];
6299 			ctl = LE_READ_2(barfrm);
6300 			tid = (ctl & IEEE80211_BA_TID_INFO_MASK) >>
6301 			    IEEE80211_BA_TID_INFO_SHIFT;
6302 			flags |= IWM_TX_CMD_FLG_ACK | IWM_TX_CMD_FLG_BAR;
6303 			tx->data_retry_limit = IWM_BAR_DFAULT_RETRY_LIMIT;
6304 		}
6305 
6306 		tx->pm_frame_timeout = htole16(0);
6307 	}
6308 
6309 	if (hdrlen & 3) {
6310 		/* First segment length must be a multiple of 4. */
6311 		flags |= IWM_TX_CMD_FLG_MH_PAD;
6312 		tx->offload_assist |= htole16(IWM_TX_CMD_OFFLD_PAD);
6313 		pad = 4 - (hdrlen & 3);
6314 	} else
6315 		pad = 0;
6316 
6317 	tx->len = htole16(totlen);
6318 	tx->tid_tspec = tid;
6319 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
6320 
6321 	/* Set physical address of "scratch area". */
6322 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
6323 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
6324 
6325 	/* Copy 802.11 header in TX command. */
6326 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
6327 
6328 	if  (k != NULL && k->k_cipher == IEEE80211_CIPHER_CCMP) {
6329 		/* Trim 802.11 header and prepend CCMP IV. */
6330 		m_adj(m, hdrlen - IEEE80211_CCMP_HDRLEN);
6331 		ivp = mtod(m, u_int8_t *);
6332 		k->k_tsc++;	/* increment the 48-bit PN */
6333 		ivp[0] = k->k_tsc; /* PN0 */
6334 		ivp[1] = k->k_tsc >> 8; /* PN1 */
6335 		ivp[2] = 0;        /* Rsvd */
6336 		ivp[3] = k->k_id << 6 | IEEE80211_WEP_EXTIV;
6337 		ivp[4] = k->k_tsc >> 16; /* PN2 */
6338 		ivp[5] = k->k_tsc >> 24; /* PN3 */
6339 		ivp[6] = k->k_tsc >> 32; /* PN4 */
6340 		ivp[7] = k->k_tsc >> 40; /* PN5 */
6341 
6342 		tx->sec_ctl = IWM_TX_CMD_SEC_CCM;
6343 		memcpy(tx->key, k->k_key, MIN(sizeof(tx->key), k->k_len));
6344 		/* TX scheduler includes CCMP MIC length. */
6345 		totlen += IEEE80211_CCMP_MICLEN;
6346 	} else {
6347 		/* Trim 802.11 header. */
6348 		m_adj(m, hdrlen);
6349 		tx->sec_ctl = 0;
6350 	}
6351 
6352 	flags |= IWM_TX_CMD_FLG_BT_DIS;
6353 	if (!hasqos)
6354 		flags |= IWM_TX_CMD_FLG_SEQ_CTL;
6355 
6356 	tx->tx_flags |= htole32(flags);
6357 
6358 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
6359 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6360 	if (err && err != EFBIG) {
6361 		printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
6362 		m_freem(m);
6363 		return err;
6364 	}
6365 	if (err) {
6366 		/* Too many DMA segments, linearize mbuf. */
6367 		if (m_defrag(m, M_DONTWAIT)) {
6368 			m_freem(m);
6369 			return ENOBUFS;
6370 		}
6371 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
6372 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6373 		if (err) {
6374 			printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
6375 			    err);
6376 			m_freem(m);
6377 			return err;
6378 		}
6379 	}
6380 	data->m = m;
6381 	data->in = in;
6382 	data->txmcs = ni->ni_txmcs;
6383 	data->txrate = ni->ni_txrate;
6384 	data->ampdu_txmcs = ni->ni_txmcs; /* updated upon Tx interrupt */
6385 
6386 	/* Fill TX descriptor. */
6387 	desc->num_tbs = 2 + data->map->dm_nsegs;
6388 
6389 	desc->tbs[0].lo = htole32(data->cmd_paddr);
6390 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
6391 	    (TB0_SIZE << 4));
6392 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
6393 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
6394 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
6395 	      + hdrlen + pad - TB0_SIZE) << 4));
6396 
6397 	/* Other DMA segments are for data payload. */
6398 	seg = data->map->dm_segs;
6399 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
6400 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
6401 		desc->tbs[i+2].hi_n_len = \
6402 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr)
6403 		    | ((seg->ds_len) << 4));
6404 	}
6405 
6406 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
6407 	    BUS_DMASYNC_PREWRITE);
6408 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
6409 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
6410 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
6411 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
6412 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
6413 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
6414 
6415 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, totlen);
6416 
6417 	/* Kick TX ring. */
6418 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
6419 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
6420 
6421 	/* Mark TX ring as full if we reach a certain threshold. */
6422 	if (++ring->queued > IWM_TX_RING_HIMARK) {
6423 		sc->qfullmsk |= 1 << ring->qid;
6424 	}
6425 
6426 	return 0;
6427 }
6428 
6429 int
6430 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_queue_msk)
6431 {
6432 	struct iwm_tx_path_flush_cmd flush_cmd = {
6433 		.queues_ctl = htole32(tfd_queue_msk),
6434 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
6435 	};
6436 	int err;
6437 
6438 	err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, 0,
6439 	    sizeof(flush_cmd), &flush_cmd);
6440 	if (err)
6441                 printf("%s: Flushing tx queue failed: %d\n", DEVNAME(sc), err);
6442 	return err;
6443 }
6444 
6445 #define IWM_FLUSH_WAIT_MS	2000
6446 
6447 int
6448 iwm_wait_tx_queues_empty(struct iwm_softc *sc)
6449 {
6450 	int i, err;
6451 
6452 	for (i = 0; i < IWM_MAX_QUEUES; i++) {
6453 		struct iwm_tx_ring *ring = &sc->txq[i];
6454 
6455 		if (i == sc->cmdqid)
6456 			continue;
6457 
6458 		while (ring->queued > 0) {
6459 			err = tsleep_nsec(ring, 0, "iwmflush",
6460 			    MSEC_TO_NSEC(IWM_FLUSH_WAIT_MS));
6461 			if (err)
6462 				return err;
6463 		}
6464 	}
6465 
6466 	return 0;
6467 }
6468 
6469 void
6470 iwm_led_enable(struct iwm_softc *sc)
6471 {
6472 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
6473 }
6474 
6475 void
6476 iwm_led_disable(struct iwm_softc *sc)
6477 {
6478 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
6479 }
6480 
6481 int
6482 iwm_led_is_enabled(struct iwm_softc *sc)
6483 {
6484 	return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
6485 }
6486 
6487 #define IWM_LED_BLINK_TIMEOUT_MSEC    200
6488 
6489 void
6490 iwm_led_blink_timeout(void *arg)
6491 {
6492 	struct iwm_softc *sc = arg;
6493 
6494 	if (iwm_led_is_enabled(sc))
6495 		iwm_led_disable(sc);
6496 	else
6497 		iwm_led_enable(sc);
6498 
6499 	timeout_add_msec(&sc->sc_led_blink_to, IWM_LED_BLINK_TIMEOUT_MSEC);
6500 }
6501 
6502 void
6503 iwm_led_blink_start(struct iwm_softc *sc)
6504 {
6505 	timeout_add_msec(&sc->sc_led_blink_to, IWM_LED_BLINK_TIMEOUT_MSEC);
6506 	iwm_led_enable(sc);
6507 }
6508 
6509 void
6510 iwm_led_blink_stop(struct iwm_softc *sc)
6511 {
6512 	timeout_del(&sc->sc_led_blink_to);
6513 	iwm_led_disable(sc);
6514 }
6515 
6516 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
6517 
6518 int
6519 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
6520     struct iwm_beacon_filter_cmd *cmd)
6521 {
6522 	return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
6523 	    0, sizeof(struct iwm_beacon_filter_cmd), cmd);
6524 }
6525 
6526 void
6527 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
6528     struct iwm_beacon_filter_cmd *cmd)
6529 {
6530 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
6531 }
6532 
6533 int
6534 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
6535 {
6536 	struct iwm_beacon_filter_cmd cmd = {
6537 		IWM_BF_CMD_CONFIG_DEFAULTS,
6538 		.bf_enable_beacon_filter = htole32(1),
6539 		.ba_enable_beacon_abort = htole32(enable),
6540 	};
6541 
6542 	if (!sc->sc_bf.bf_enabled)
6543 		return 0;
6544 
6545 	sc->sc_bf.ba_enabled = enable;
6546 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
6547 	return iwm_beacon_filter_send_cmd(sc, &cmd);
6548 }
6549 
6550 void
6551 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
6552     struct iwm_mac_power_cmd *cmd)
6553 {
6554 	struct ieee80211com *ic = &sc->sc_ic;
6555 	struct ieee80211_node *ni = &in->in_ni;
6556 	int dtim_period, dtim_msec, keep_alive;
6557 
6558 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
6559 	    in->in_color));
6560 	if (ni->ni_dtimperiod)
6561 		dtim_period = ni->ni_dtimperiod;
6562 	else
6563 		dtim_period = 1;
6564 
6565 	/*
6566 	 * Regardless of power management state the driver must set
6567 	 * keep alive period. FW will use it for sending keep alive NDPs
6568 	 * immediately after association. Check that keep alive period
6569 	 * is at least 3 * DTIM.
6570 	 */
6571 	dtim_msec = dtim_period * ni->ni_intval;
6572 	keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
6573 	keep_alive = roundup(keep_alive, 1000) / 1000;
6574 	cmd->keep_alive_seconds = htole16(keep_alive);
6575 
6576 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
6577 		cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6578 }
6579 
6580 int
6581 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
6582 {
6583 	int err;
6584 	int ba_enable;
6585 	struct iwm_mac_power_cmd cmd;
6586 
6587 	memset(&cmd, 0, sizeof(cmd));
6588 
6589 	iwm_power_build_cmd(sc, in, &cmd);
6590 
6591 	err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
6592 	    sizeof(cmd), &cmd);
6593 	if (err != 0)
6594 		return err;
6595 
6596 	ba_enable = !!(cmd.flags &
6597 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
6598 	return iwm_update_beacon_abort(sc, in, ba_enable);
6599 }
6600 
6601 int
6602 iwm_power_update_device(struct iwm_softc *sc)
6603 {
6604 	struct iwm_device_power_cmd cmd = { };
6605 	struct ieee80211com *ic = &sc->sc_ic;
6606 
6607 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
6608 		cmd.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6609 
6610 	return iwm_send_cmd_pdu(sc,
6611 	    IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
6612 }
6613 
6614 int
6615 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
6616 {
6617 	struct iwm_beacon_filter_cmd cmd = {
6618 		IWM_BF_CMD_CONFIG_DEFAULTS,
6619 		.bf_enable_beacon_filter = htole32(1),
6620 	};
6621 	int err;
6622 
6623 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
6624 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
6625 
6626 	if (err == 0)
6627 		sc->sc_bf.bf_enabled = 1;
6628 
6629 	return err;
6630 }
6631 
6632 int
6633 iwm_disable_beacon_filter(struct iwm_softc *sc)
6634 {
6635 	struct iwm_beacon_filter_cmd cmd;
6636 	int err;
6637 
6638 	memset(&cmd, 0, sizeof(cmd));
6639 
6640 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
6641 	if (err == 0)
6642 		sc->sc_bf.bf_enabled = 0;
6643 
6644 	return err;
6645 }
6646 
6647 int
6648 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
6649 {
6650 	struct iwm_add_sta_cmd add_sta_cmd;
6651 	int err;
6652 	uint32_t status;
6653 	size_t cmdsize;
6654 	struct ieee80211com *ic = &sc->sc_ic;
6655 
6656 	if (!update && (sc->sc_flags & IWM_FLAG_STA_ACTIVE))
6657 		panic("STA already added");
6658 
6659 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
6660 
6661 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6662 		add_sta_cmd.sta_id = IWM_MONITOR_STA_ID;
6663 	else
6664 		add_sta_cmd.sta_id = IWM_STATION_ID;
6665 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)) {
6666 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
6667 			add_sta_cmd.station_type = IWM_STA_GENERAL_PURPOSE;
6668 		else
6669 			add_sta_cmd.station_type = IWM_STA_LINK;
6670 	}
6671 	add_sta_cmd.mac_id_n_color
6672 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
6673 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6674 		int qid;
6675 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, etheranyaddr);
6676 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
6677 			qid = IWM_DQA_INJECT_MONITOR_QUEUE;
6678 		else
6679 			qid = IWM_AUX_QUEUE;
6680 		in->tfd_queue_msk |= (1 << qid);
6681 	} else {
6682 		int ac;
6683 		for (ac = 0; ac < EDCA_NUM_AC; ac++) {
6684 			int qid = ac;
6685 			if (isset(sc->sc_enabled_capa,
6686 			    IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
6687 				qid += IWM_DQA_MIN_MGMT_QUEUE;
6688 			in->tfd_queue_msk |= (1 << qid);
6689 		}
6690 	}
6691 	if (!update) {
6692 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
6693 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
6694 			    etherbroadcastaddr);
6695 		else
6696 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
6697 			    in->in_ni.ni_bssid);
6698 	}
6699 	add_sta_cmd.add_modify = update ? 1 : 0;
6700 	add_sta_cmd.station_flags_msk
6701 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
6702 	if (update) {
6703 		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_QUEUES |
6704 		    IWM_STA_MODIFY_TID_DISABLE_TX);
6705 	}
6706 	add_sta_cmd.tid_disable_tx = htole16(in->tid_disable_ampdu);
6707 	add_sta_cmd.tfd_queue_msk = htole32(in->tfd_queue_msk);
6708 
6709 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
6710 		add_sta_cmd.station_flags_msk
6711 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
6712 		    IWM_STA_FLG_AGG_MPDU_DENS_MSK);
6713 
6714 		if (iwm_mimo_enabled(sc)) {
6715 			if (in->in_ni.ni_rxmcs[1] != 0) {
6716 				add_sta_cmd.station_flags |=
6717 				    htole32(IWM_STA_FLG_MIMO_EN_MIMO2);
6718 			}
6719 			if (in->in_ni.ni_rxmcs[2] != 0) {
6720 				add_sta_cmd.station_flags |=
6721 				    htole32(IWM_STA_FLG_MIMO_EN_MIMO3);
6722 			}
6723 		}
6724 
6725 		add_sta_cmd.station_flags
6726 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_64K);
6727 		switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
6728 		case IEEE80211_AMPDU_PARAM_SS_2:
6729 			add_sta_cmd.station_flags
6730 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
6731 			break;
6732 		case IEEE80211_AMPDU_PARAM_SS_4:
6733 			add_sta_cmd.station_flags
6734 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
6735 			break;
6736 		case IEEE80211_AMPDU_PARAM_SS_8:
6737 			add_sta_cmd.station_flags
6738 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
6739 			break;
6740 		case IEEE80211_AMPDU_PARAM_SS_16:
6741 			add_sta_cmd.station_flags
6742 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
6743 			break;
6744 		default:
6745 			break;
6746 		}
6747 	}
6748 
6749 	status = IWM_ADD_STA_SUCCESS;
6750 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
6751 		cmdsize = sizeof(add_sta_cmd);
6752 	else
6753 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
6754 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize,
6755 	    &add_sta_cmd, &status);
6756 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
6757 		err = EIO;
6758 
6759 	return err;
6760 }
6761 
6762 int
6763 iwm_add_aux_sta(struct iwm_softc *sc)
6764 {
6765 	struct iwm_add_sta_cmd cmd;
6766 	int err, qid;
6767 	uint32_t status;
6768 	size_t cmdsize;
6769 
6770 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)) {
6771 		qid = IWM_DQA_AUX_QUEUE;
6772 		err = iwm_enable_txq(sc, IWM_AUX_STA_ID, qid,
6773 		    IWM_TX_FIFO_MCAST, 0, IWM_MAX_TID_COUNT, 0);
6774 	} else {
6775 		qid = IWM_AUX_QUEUE;
6776 		err = iwm_enable_ac_txq(sc, qid, IWM_TX_FIFO_MCAST);
6777 	}
6778 	if (err)
6779 		return err;
6780 
6781 	memset(&cmd, 0, sizeof(cmd));
6782 	cmd.sta_id = IWM_AUX_STA_ID;
6783 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
6784 		cmd.station_type = IWM_STA_AUX_ACTIVITY;
6785 	cmd.mac_id_n_color =
6786 	    htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
6787 	cmd.tfd_queue_msk = htole32(1 << qid);
6788 	cmd.tid_disable_tx = htole16(0xffff);
6789 
6790 	status = IWM_ADD_STA_SUCCESS;
6791 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
6792 		cmdsize = sizeof(cmd);
6793 	else
6794 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
6795 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd,
6796 	    &status);
6797 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
6798 		err = EIO;
6799 
6800 	return err;
6801 }
6802 
6803 int
6804 iwm_drain_sta(struct iwm_softc *sc, struct iwm_node* in, int drain)
6805 {
6806 	struct iwm_add_sta_cmd cmd;
6807 	int err;
6808 	uint32_t status;
6809 	size_t cmdsize;
6810 
6811 	memset(&cmd, 0, sizeof(cmd));
6812 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
6813 	    in->in_color));
6814 	cmd.sta_id = IWM_STATION_ID;
6815 	cmd.add_modify = IWM_STA_MODE_MODIFY;
6816 	cmd.station_flags = drain ? htole32(IWM_STA_FLG_DRAIN_FLOW) : 0;
6817 	cmd.station_flags_msk = htole32(IWM_STA_FLG_DRAIN_FLOW);
6818 
6819 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
6820 		cmdsize = sizeof(cmd);
6821 	else
6822 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
6823 
6824 	status = IWM_ADD_STA_SUCCESS;
6825 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA,
6826 	    cmdsize, &cmd, &status);
6827 	if (err) {
6828 		printf("%s: could not update sta (error %d)\n",
6829 		    DEVNAME(sc), err);
6830 		return err;
6831 	}
6832 
6833 	switch (status & IWM_ADD_STA_STATUS_MASK) {
6834 	case IWM_ADD_STA_SUCCESS:
6835 		break;
6836 	default:
6837 		err = EIO;
6838 		printf("%s: Couldn't %s draining for station\n",
6839 		    DEVNAME(sc), drain ? "enable" : "disable");
6840 		break;
6841 	}
6842 
6843 	return err;
6844 }
6845 
6846 int
6847 iwm_flush_sta(struct iwm_softc *sc, struct iwm_node *in)
6848 {
6849 	int err;
6850 
6851 	sc->sc_flags |= IWM_FLAG_TXFLUSH;
6852 
6853 	err = iwm_drain_sta(sc, in, 1);
6854 	if (err)
6855 		goto done;
6856 
6857 	err = iwm_flush_tx_path(sc, in->tfd_queue_msk);
6858 	if (err) {
6859 		printf("%s: could not flush Tx path (error %d)\n",
6860 		    DEVNAME(sc), err);
6861 		goto done;
6862 	}
6863 
6864 	err = iwm_wait_tx_queues_empty(sc);
6865 	if (err) {
6866 		printf("%s: Could not empty Tx queues (error %d)\n",
6867 		    DEVNAME(sc), err);
6868 #if 1
6869 		iwm_dump_driver_status(sc);
6870 #endif
6871 		goto done;
6872 	}
6873 
6874 	err = iwm_drain_sta(sc, in, 0);
6875 done:
6876 	sc->sc_flags &= ~IWM_FLAG_TXFLUSH;
6877 	return err;
6878 }
6879 
6880 int
6881 iwm_rm_sta_cmd(struct iwm_softc *sc, struct iwm_node *in)
6882 {
6883 	struct ieee80211com *ic = &sc->sc_ic;
6884 	struct iwm_rm_sta_cmd rm_sta_cmd;
6885 	int err;
6886 
6887 	if ((sc->sc_flags & IWM_FLAG_STA_ACTIVE) == 0)
6888 		panic("sta already removed");
6889 
6890 	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
6891 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6892 		rm_sta_cmd.sta_id = IWM_MONITOR_STA_ID;
6893 	else
6894 		rm_sta_cmd.sta_id = IWM_STATION_ID;
6895 
6896 	err = iwm_send_cmd_pdu(sc, IWM_REMOVE_STA, 0, sizeof(rm_sta_cmd),
6897 	    &rm_sta_cmd);
6898 
6899 	return err;
6900 }
6901 
6902 uint16_t
6903 iwm_scan_rx_chain(struct iwm_softc *sc)
6904 {
6905 	uint16_t rx_chain;
6906 	uint8_t rx_ant;
6907 
6908 	rx_ant = iwm_fw_valid_rx_ant(sc);
6909 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
6910 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
6911 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
6912 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
6913 	return htole16(rx_chain);
6914 }
6915 
6916 uint32_t
6917 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
6918 {
6919 	uint32_t tx_ant;
6920 	int i, ind;
6921 
6922 	for (i = 0, ind = sc->sc_scan_last_antenna;
6923 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
6924 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
6925 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
6926 			sc->sc_scan_last_antenna = ind;
6927 			break;
6928 		}
6929 	}
6930 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
6931 
6932 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
6933 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
6934 				   tx_ant);
6935 	else
6936 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
6937 }
6938 
6939 uint8_t
6940 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
6941     struct iwm_scan_channel_cfg_lmac *chan, int n_ssids, int bgscan)
6942 {
6943 	struct ieee80211com *ic = &sc->sc_ic;
6944 	struct ieee80211_channel *c;
6945 	uint8_t nchan;
6946 
6947 	for (nchan = 0, c = &ic->ic_channels[1];
6948 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
6949 	    nchan < sc->sc_capa_n_scan_channels;
6950 	    c++) {
6951 		if (c->ic_flags == 0)
6952 			continue;
6953 
6954 		chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
6955 		chan->iter_count = htole16(1);
6956 		chan->iter_interval = 0;
6957 		chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
6958 		if (n_ssids != 0 && !bgscan)
6959 			chan->flags |= htole32(1 << 1); /* select SSID 0 */
6960 		chan++;
6961 		nchan++;
6962 	}
6963 
6964 	return nchan;
6965 }
6966 
6967 uint8_t
6968 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
6969     struct iwm_scan_channel_cfg_umac *chan, int n_ssids, int bgscan)
6970 {
6971 	struct ieee80211com *ic = &sc->sc_ic;
6972 	struct ieee80211_channel *c;
6973 	uint8_t nchan;
6974 
6975 	for (nchan = 0, c = &ic->ic_channels[1];
6976 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
6977 	    nchan < sc->sc_capa_n_scan_channels;
6978 	    c++) {
6979 		if (c->ic_flags == 0)
6980 			continue;
6981 
6982 		chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
6983 		chan->iter_count = 1;
6984 		chan->iter_interval = htole16(0);
6985 		if (n_ssids != 0 && !bgscan)
6986 			chan->flags = htole32(1 << 0); /* select SSID 0 */
6987 		chan++;
6988 		nchan++;
6989 	}
6990 
6991 	return nchan;
6992 }
6993 
6994 int
6995 iwm_fill_probe_req_v1(struct iwm_softc *sc, struct iwm_scan_probe_req_v1 *preq1)
6996 {
6997 	struct iwm_scan_probe_req preq2;
6998 	int err, i;
6999 
7000 	err = iwm_fill_probe_req(sc, &preq2);
7001 	if (err)
7002 		return err;
7003 
7004 	preq1->mac_header = preq2.mac_header;
7005 	for (i = 0; i < nitems(preq1->band_data); i++)
7006 		preq1->band_data[i] = preq2.band_data[i];
7007 	preq1->common_data = preq2.common_data;
7008 	memcpy(preq1->buf, preq2.buf, sizeof(preq1->buf));
7009 	return 0;
7010 }
7011 
7012 int
7013 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
7014 {
7015 	struct ieee80211com *ic = &sc->sc_ic;
7016 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
7017 	struct ieee80211_rateset *rs;
7018 	size_t remain = sizeof(preq->buf);
7019 	uint8_t *frm, *pos;
7020 
7021 	memset(preq, 0, sizeof(*preq));
7022 
7023 	if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
7024 		return ENOBUFS;
7025 
7026 	/*
7027 	 * Build a probe request frame.  Most of the following code is a
7028 	 * copy & paste of what is done in net80211.
7029 	 */
7030 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
7031 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
7032 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
7033 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
7034 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
7035 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
7036 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
7037 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
7038 
7039 	frm = (uint8_t *)(wh + 1);
7040 	frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
7041 
7042 	/* Tell the firmware where the MAC header is. */
7043 	preq->mac_header.offset = 0;
7044 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
7045 	remain -= frm - (uint8_t *)wh;
7046 
7047 	/* Fill in 2GHz IEs and tell firmware where they are. */
7048 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
7049 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
7050 		if (remain < 4 + rs->rs_nrates)
7051 			return ENOBUFS;
7052 	} else if (remain < 2 + rs->rs_nrates)
7053 		return ENOBUFS;
7054 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
7055 	pos = frm;
7056 	frm = ieee80211_add_rates(frm, rs);
7057 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
7058 		frm = ieee80211_add_xrates(frm, rs);
7059 	preq->band_data[0].len = htole16(frm - pos);
7060 	remain -= frm - pos;
7061 
7062 	if (isset(sc->sc_enabled_capa,
7063 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
7064 		if (remain < 3)
7065 			return ENOBUFS;
7066 		*frm++ = IEEE80211_ELEMID_DSPARMS;
7067 		*frm++ = 1;
7068 		*frm++ = 0;
7069 		remain -= 3;
7070 	}
7071 
7072 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
7073 		/* Fill in 5GHz IEs. */
7074 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
7075 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
7076 			if (remain < 4 + rs->rs_nrates)
7077 				return ENOBUFS;
7078 		} else if (remain < 2 + rs->rs_nrates)
7079 			return ENOBUFS;
7080 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
7081 		pos = frm;
7082 		frm = ieee80211_add_rates(frm, rs);
7083 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
7084 			frm = ieee80211_add_xrates(frm, rs);
7085 		preq->band_data[1].len = htole16(frm - pos);
7086 		remain -= frm - pos;
7087 	}
7088 
7089 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
7090 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
7091 	pos = frm;
7092 	if (ic->ic_flags & IEEE80211_F_HTON) {
7093 		if (remain < 28)
7094 			return ENOBUFS;
7095 		frm = ieee80211_add_htcaps(frm, ic);
7096 		/* XXX add WME info? */
7097 	}
7098 	preq->common_data.len = htole16(frm - pos);
7099 
7100 	return 0;
7101 }
7102 
7103 int
7104 iwm_lmac_scan(struct iwm_softc *sc, int bgscan)
7105 {
7106 	struct ieee80211com *ic = &sc->sc_ic;
7107 	struct iwm_host_cmd hcmd = {
7108 		.id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
7109 		.len = { 0, },
7110 		.data = { NULL, },
7111 		.flags = 0,
7112 	};
7113 	struct iwm_scan_req_lmac *req;
7114 	struct iwm_scan_probe_req_v1 *preq;
7115 	size_t req_len;
7116 	int err, async = bgscan;
7117 
7118 	req_len = sizeof(struct iwm_scan_req_lmac) +
7119 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
7120 	    sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req_v1);
7121 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
7122 		return ENOMEM;
7123 	req = malloc(req_len, M_DEVBUF,
7124 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
7125 	if (req == NULL)
7126 		return ENOMEM;
7127 
7128 	hcmd.len[0] = (uint16_t)req_len;
7129 	hcmd.data[0] = (void *)req;
7130 	hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
7131 
7132 	/* These timings correspond to iwlwifi's UNASSOC scan. */
7133 	req->active_dwell = 10;
7134 	req->passive_dwell = 110;
7135 	req->fragmented_dwell = 44;
7136 	req->extended_dwell = 90;
7137 	if (bgscan) {
7138 		req->max_out_time = htole32(120);
7139 		req->suspend_time = htole32(120);
7140 	} else {
7141 		req->max_out_time = htole32(0);
7142 		req->suspend_time = htole32(0);
7143 	}
7144 	req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
7145 	req->rx_chain_select = iwm_scan_rx_chain(sc);
7146 	req->iter_num = htole32(1);
7147 	req->delay = 0;
7148 
7149 	req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
7150 	    IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
7151 	    IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
7152 	if (ic->ic_des_esslen == 0)
7153 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
7154 	else
7155 		req->scan_flags |=
7156 		    htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
7157 	if (isset(sc->sc_enabled_capa,
7158 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
7159 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
7160 
7161 	req->flags = htole32(IWM_PHY_BAND_24);
7162 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
7163 		req->flags |= htole32(IWM_PHY_BAND_5);
7164 	req->filter_flags =
7165 	    htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
7166 
7167 	/* Tx flags 2 GHz. */
7168 	req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
7169 	    IWM_TX_CMD_FLG_BT_DIS);
7170 	req->tx_cmd[0].rate_n_flags =
7171 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
7172 	req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
7173 
7174 	/* Tx flags 5 GHz. */
7175 	req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
7176 	    IWM_TX_CMD_FLG_BT_DIS);
7177 	req->tx_cmd[1].rate_n_flags =
7178 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
7179 	req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
7180 
7181 	/* Check if we're doing an active directed scan. */
7182 	if (ic->ic_des_esslen != 0) {
7183 		req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
7184 		req->direct_scan[0].len = ic->ic_des_esslen;
7185 		memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
7186 		    ic->ic_des_esslen);
7187 	}
7188 
7189 	req->n_channels = iwm_lmac_scan_fill_channels(sc,
7190 	    (struct iwm_scan_channel_cfg_lmac *)req->data,
7191 	    ic->ic_des_esslen != 0, bgscan);
7192 
7193 	preq = (struct iwm_scan_probe_req_v1 *)(req->data +
7194 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
7195 	    sc->sc_capa_n_scan_channels));
7196 	err = iwm_fill_probe_req_v1(sc, preq);
7197 	if (err) {
7198 		free(req, M_DEVBUF, req_len);
7199 		return err;
7200 	}
7201 
7202 	/* Specify the scan plan: We'll do one iteration. */
7203 	req->schedule[0].iterations = 1;
7204 	req->schedule[0].full_scan_mul = 1;
7205 
7206 	/* Disable EBS. */
7207 	req->channel_opt[0].non_ebs_ratio = 1;
7208 	req->channel_opt[1].non_ebs_ratio = 1;
7209 
7210 	err = iwm_send_cmd(sc, &hcmd);
7211 	free(req, M_DEVBUF, req_len);
7212 	return err;
7213 }
7214 
7215 int
7216 iwm_config_umac_scan(struct iwm_softc *sc)
7217 {
7218 	struct ieee80211com *ic = &sc->sc_ic;
7219 	struct iwm_scan_config *scan_config;
7220 	int err, nchan;
7221 	size_t cmd_size;
7222 	struct ieee80211_channel *c;
7223 	struct iwm_host_cmd hcmd = {
7224 		.id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_LONG_GROUP, 0),
7225 		.flags = 0,
7226 	};
7227 	static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
7228 	    IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
7229 	    IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
7230 	    IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
7231 	    IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
7232 	    IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
7233 	    IWM_SCAN_CONFIG_RATE_54M);
7234 
7235 	cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
7236 
7237 	scan_config = malloc(cmd_size, M_DEVBUF, M_WAIT | M_CANFAIL | M_ZERO);
7238 	if (scan_config == NULL)
7239 		return ENOMEM;
7240 
7241 	scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
7242 	scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
7243 	scan_config->legacy_rates = htole32(rates |
7244 	    IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
7245 
7246 	/* These timings correspond to iwlwifi's UNASSOC scan. */
7247 	scan_config->dwell_active = 10;
7248 	scan_config->dwell_passive = 110;
7249 	scan_config->dwell_fragmented = 44;
7250 	scan_config->dwell_extended = 90;
7251 	scan_config->out_of_channel_time = htole32(0);
7252 	scan_config->suspend_time = htole32(0);
7253 
7254 	IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
7255 
7256 	scan_config->bcast_sta_id = IWM_AUX_STA_ID;
7257 	scan_config->channel_flags = 0;
7258 
7259 	for (c = &ic->ic_channels[1], nchan = 0;
7260 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
7261 	    nchan < sc->sc_capa_n_scan_channels; c++) {
7262 		if (c->ic_flags == 0)
7263 			continue;
7264 		scan_config->channel_array[nchan++] =
7265 		    ieee80211_mhz2ieee(c->ic_freq, 0);
7266 	}
7267 
7268 	scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
7269 	    IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
7270 	    IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
7271 	    IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
7272 	    IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
7273 	    IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
7274 	    IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
7275 	    IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
7276 	    IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
7277 	    IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
7278 	    IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
7279 
7280 	hcmd.data[0] = scan_config;
7281 	hcmd.len[0] = cmd_size;
7282 
7283 	err = iwm_send_cmd(sc, &hcmd);
7284 	free(scan_config, M_DEVBUF, cmd_size);
7285 	return err;
7286 }
7287 
7288 int
7289 iwm_umac_scan_size(struct iwm_softc *sc)
7290 {
7291 	int base_size = IWM_SCAN_REQ_UMAC_SIZE_V1;
7292 	int tail_size;
7293 
7294 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
7295 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V8;
7296 	else if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
7297 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V7;
7298 #ifdef notyet
7299 	else if (sc->sc_device_family >= IWM_DEVICE_FAMILY_22000)
7300 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V6;
7301 #endif
7302 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
7303 		tail_size = sizeof(struct iwm_scan_req_umac_tail_v2);
7304 	else
7305 		tail_size = sizeof(struct iwm_scan_req_umac_tail_v1);
7306 
7307 	return base_size + sizeof(struct iwm_scan_channel_cfg_umac) *
7308 	    sc->sc_capa_n_scan_channels + tail_size;
7309 }
7310 
7311 struct iwm_scan_umac_chan_param *
7312 iwm_get_scan_req_umac_chan_param(struct iwm_softc *sc,
7313     struct iwm_scan_req_umac *req)
7314 {
7315 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
7316 		return &req->v8.channel;
7317 
7318 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
7319 		return &req->v7.channel;
7320 #ifdef notyet
7321 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_22000)
7322 		return &req->v6.channel;
7323 #endif
7324 	return &req->v1.channel;
7325 }
7326 
7327 void *
7328 iwm_get_scan_req_umac_data(struct iwm_softc *sc, struct iwm_scan_req_umac *req)
7329 {
7330 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
7331 		return (void *)&req->v8.data;
7332 
7333 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
7334 		return (void *)&req->v7.data;
7335 #ifdef notyet
7336 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_22000)
7337 		return (void *)&req->v6.data;
7338 #endif
7339 	return (void *)&req->v1.data;
7340 
7341 }
7342 
7343 /* adaptive dwell max budget time [TU] for full scan */
7344 #define IWM_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
7345 /* adaptive dwell max budget time [TU] for directed scan */
7346 #define IWM_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
7347 /* adaptive dwell default high band APs number */
7348 #define IWM_SCAN_ADWELL_DEFAULT_HB_N_APS 8
7349 /* adaptive dwell default low band APs number */
7350 #define IWM_SCAN_ADWELL_DEFAULT_LB_N_APS 2
7351 /* adaptive dwell default APs number in social channels (1, 6, 11) */
7352 #define IWM_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
7353 
7354 int
7355 iwm_umac_scan(struct iwm_softc *sc, int bgscan)
7356 {
7357 	struct ieee80211com *ic = &sc->sc_ic;
7358 	struct iwm_host_cmd hcmd = {
7359 		.id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_LONG_GROUP, 0),
7360 		.len = { 0, },
7361 		.data = { NULL, },
7362 		.flags = 0,
7363 	};
7364 	struct iwm_scan_req_umac *req;
7365 	void *cmd_data, *tail_data;
7366 	struct iwm_scan_req_umac_tail_v2 *tail;
7367 	struct iwm_scan_req_umac_tail_v1 *tailv1;
7368 	struct iwm_scan_umac_chan_param *chanparam;
7369 	size_t req_len;
7370 	int err, async = bgscan;
7371 
7372 	req_len = iwm_umac_scan_size(sc);
7373 	if ((req_len < IWM_SCAN_REQ_UMAC_SIZE_V1 +
7374 	    sizeof(struct iwm_scan_req_umac_tail_v1)) ||
7375 	    req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
7376 		return ERANGE;
7377 	req = malloc(req_len, M_DEVBUF,
7378 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
7379 	if (req == NULL)
7380 		return ENOMEM;
7381 
7382 	hcmd.len[0] = (uint16_t)req_len;
7383 	hcmd.data[0] = (void *)req;
7384 	hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
7385 
7386 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
7387 		req->v7.adwell_default_n_aps_social =
7388 			IWM_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
7389 		req->v7.adwell_default_n_aps =
7390 			IWM_SCAN_ADWELL_DEFAULT_LB_N_APS;
7391 
7392 		if (ic->ic_des_esslen != 0)
7393 			req->v7.adwell_max_budget =
7394 			    htole16(IWM_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
7395 		else
7396 			req->v7.adwell_max_budget =
7397 			    htole16(IWM_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
7398 
7399 		req->v7.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
7400 		req->v7.max_out_time[IWM_SCAN_LB_LMAC_IDX] = 0;
7401 		req->v7.suspend_time[IWM_SCAN_LB_LMAC_IDX] = 0;
7402 
7403 		if (isset(sc->sc_ucode_api,
7404 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
7405 			req->v8.active_dwell[IWM_SCAN_LB_LMAC_IDX] = 10;
7406 			req->v8.passive_dwell[IWM_SCAN_LB_LMAC_IDX] = 110;
7407 		} else {
7408 			req->v7.active_dwell = 10;
7409 			req->v7.passive_dwell = 110;
7410 			req->v7.fragmented_dwell = 44;
7411 		}
7412 	} else {
7413 		/* These timings correspond to iwlwifi's UNASSOC scan. */
7414 		req->v1.active_dwell = 10;
7415 		req->v1.passive_dwell = 110;
7416 		req->v1.fragmented_dwell = 44;
7417 		req->v1.extended_dwell = 90;
7418 	}
7419 
7420 	if (bgscan) {
7421 		const uint32_t timeout = htole32(120);
7422 		if (isset(sc->sc_ucode_api,
7423 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
7424 			req->v8.max_out_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
7425 			req->v8.suspend_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
7426 		} else if (isset(sc->sc_ucode_api,
7427 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
7428 			req->v7.max_out_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
7429 			req->v7.suspend_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
7430 		} else {
7431 			req->v1.max_out_time = timeout;
7432 			req->v1.suspend_time = timeout;
7433 		}
7434 	}
7435 
7436 	req->v1.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
7437 	req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
7438 
7439 	cmd_data = iwm_get_scan_req_umac_data(sc, req);
7440 	chanparam = iwm_get_scan_req_umac_chan_param(sc, req);
7441 	chanparam->count = iwm_umac_scan_fill_channels(sc,
7442 	    (struct iwm_scan_channel_cfg_umac *)cmd_data,
7443 	    ic->ic_des_esslen != 0, bgscan);
7444 	chanparam->flags = 0;
7445 
7446 	tail_data = cmd_data + sizeof(struct iwm_scan_channel_cfg_umac) *
7447 	    sc->sc_capa_n_scan_channels;
7448 	tail = tail_data;
7449 	/* tail v1 layout differs in preq and direct_scan member fields. */
7450 	tailv1 = tail_data;
7451 
7452 	req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
7453 	    IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE);
7454 
7455 	/* Check if we're doing an active directed scan. */
7456 	if (ic->ic_des_esslen != 0) {
7457 		if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
7458 			tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
7459 			tail->direct_scan[0].len = ic->ic_des_esslen;
7460 			memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
7461 			    ic->ic_des_esslen);
7462 		} else {
7463 			tailv1->direct_scan[0].id = IEEE80211_ELEMID_SSID;
7464 			tailv1->direct_scan[0].len = ic->ic_des_esslen;
7465 			memcpy(tailv1->direct_scan[0].ssid, ic->ic_des_essid,
7466 			    ic->ic_des_esslen);
7467 		}
7468 		req->general_flags |=
7469 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
7470 	} else
7471 		req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
7472 
7473 	if (isset(sc->sc_enabled_capa,
7474 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
7475 		req->general_flags |=
7476 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
7477 
7478 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
7479 		req->general_flags |=
7480 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL);
7481 	} else {
7482 		req->general_flags |=
7483 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
7484 	}
7485 
7486 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
7487 		err = iwm_fill_probe_req(sc, &tail->preq);
7488 	else
7489 		err = iwm_fill_probe_req_v1(sc, &tailv1->preq);
7490 	if (err) {
7491 		free(req, M_DEVBUF, req_len);
7492 		return err;
7493 	}
7494 
7495 	/* Specify the scan plan: We'll do one iteration. */
7496 	tail->schedule[0].interval = 0;
7497 	tail->schedule[0].iter_count = 1;
7498 
7499 	err = iwm_send_cmd(sc, &hcmd);
7500 	free(req, M_DEVBUF, req_len);
7501 	return err;
7502 }
7503 
7504 uint8_t
7505 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
7506 {
7507 	int i;
7508 	uint8_t rval;
7509 
7510 	for (i = 0; i < rs->rs_nrates; i++) {
7511 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
7512 		if (rval == iwm_rates[ridx].rate)
7513 			return rs->rs_rates[i];
7514 	}
7515 
7516 	return 0;
7517 }
7518 
7519 int
7520 iwm_rval2ridx(int rval)
7521 {
7522 	int ridx;
7523 
7524 	for (ridx = 0; ridx < nitems(iwm_rates); ridx++) {
7525 		if (iwm_rates[ridx].plcp == IWM_RATE_INVM_PLCP)
7526 			continue;
7527 		if (rval == iwm_rates[ridx].rate)
7528 			break;
7529 	}
7530 
7531        return ridx;
7532 }
7533 
7534 void
7535 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
7536     int *ofdm_rates)
7537 {
7538 	struct ieee80211_node *ni = &in->in_ni;
7539 	struct ieee80211_rateset *rs = &ni->ni_rates;
7540 	int lowest_present_ofdm = -1;
7541 	int lowest_present_cck = -1;
7542 	uint8_t cck = 0;
7543 	uint8_t ofdm = 0;
7544 	int i;
7545 
7546 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
7547 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
7548 		for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
7549 			if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
7550 				continue;
7551 			cck |= (1 << i);
7552 			if (lowest_present_cck == -1 || lowest_present_cck > i)
7553 				lowest_present_cck = i;
7554 		}
7555 	}
7556 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
7557 		if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
7558 			continue;
7559 		ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
7560 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
7561 			lowest_present_ofdm = i;
7562 	}
7563 
7564 	/*
7565 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
7566 	 * variables. This isn't sufficient though, as there might not
7567 	 * be all the right rates in the bitmap. E.g. if the only basic
7568 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
7569 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
7570 	 *
7571 	 *    [...] a STA responding to a received frame shall transmit
7572 	 *    its Control Response frame [...] at the highest rate in the
7573 	 *    BSSBasicRateSet parameter that is less than or equal to the
7574 	 *    rate of the immediately previous frame in the frame exchange
7575 	 *    sequence ([...]) and that is of the same modulation class
7576 	 *    ([...]) as the received frame. If no rate contained in the
7577 	 *    BSSBasicRateSet parameter meets these conditions, then the
7578 	 *    control frame sent in response to a received frame shall be
7579 	 *    transmitted at the highest mandatory rate of the PHY that is
7580 	 *    less than or equal to the rate of the received frame, and
7581 	 *    that is of the same modulation class as the received frame.
7582 	 *
7583 	 * As a consequence, we need to add all mandatory rates that are
7584 	 * lower than all of the basic rates to these bitmaps.
7585 	 */
7586 
7587 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
7588 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
7589 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
7590 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
7591 	/* 6M already there or needed so always add */
7592 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
7593 
7594 	/*
7595 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
7596 	 * Note, however:
7597 	 *  - if no CCK rates are basic, it must be ERP since there must
7598 	 *    be some basic rates at all, so they're OFDM => ERP PHY
7599 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
7600 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
7601 	 *  - if 5.5M is basic, 1M and 2M are mandatory
7602 	 *  - if 2M is basic, 1M is mandatory
7603 	 *  - if 1M is basic, that's the only valid ACK rate.
7604 	 * As a consequence, it's not as complicated as it sounds, just add
7605 	 * any lower rates to the ACK rate bitmap.
7606 	 */
7607 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
7608 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
7609 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
7610 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
7611 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
7612 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
7613 	/* 1M already there or needed so always add */
7614 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
7615 
7616 	*cck_rates = cck;
7617 	*ofdm_rates = ofdm;
7618 }
7619 
7620 void
7621 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
7622     struct iwm_mac_ctx_cmd *cmd, uint32_t action)
7623 {
7624 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
7625 	struct ieee80211com *ic = &sc->sc_ic;
7626 	struct ieee80211_node *ni = ic->ic_bss;
7627 	int cck_ack_rates, ofdm_ack_rates;
7628 	int i;
7629 
7630 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
7631 	    in->in_color));
7632 	cmd->action = htole32(action);
7633 
7634 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7635 		cmd->mac_type = htole32(IWM_FW_MAC_TYPE_LISTENER);
7636 	else if (ic->ic_opmode == IEEE80211_M_STA)
7637 		cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
7638 	else
7639 		panic("unsupported operating mode %d", ic->ic_opmode);
7640 	cmd->tsf_id = htole32(IWM_TSF_ID_A);
7641 
7642 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
7643 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7644 		IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
7645 		return;
7646 	}
7647 
7648 	IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
7649 	iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
7650 	cmd->cck_rates = htole32(cck_ack_rates);
7651 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
7652 
7653 	cmd->cck_short_preamble
7654 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
7655 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
7656 	cmd->short_slot
7657 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
7658 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
7659 
7660 	for (i = 0; i < EDCA_NUM_AC; i++) {
7661 		struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
7662 		int txf = iwm_ac_to_tx_fifo[i];
7663 
7664 		cmd->ac[txf].cw_min = htole16(IWM_EXP2(ac->ac_ecwmin));
7665 		cmd->ac[txf].cw_max = htole16(IWM_EXP2(ac->ac_ecwmax));
7666 		cmd->ac[txf].aifsn = ac->ac_aifsn;
7667 		cmd->ac[txf].fifos_mask = (1 << txf);
7668 		cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32);
7669 	}
7670 	if (ni->ni_flags & IEEE80211_NODE_QOS)
7671 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
7672 
7673 	if (ni->ni_flags & IEEE80211_NODE_HT) {
7674 		enum ieee80211_htprot htprot =
7675 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
7676 		switch (htprot) {
7677 		case IEEE80211_HTPROT_NONE:
7678 			break;
7679 		case IEEE80211_HTPROT_NONMEMBER:
7680 		case IEEE80211_HTPROT_NONHT_MIXED:
7681 			cmd->protection_flags |=
7682 			    htole32(IWM_MAC_PROT_FLG_HT_PROT);
7683 			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
7684 				cmd->protection_flags |=
7685 				    htole32(IWM_MAC_PROT_FLG_SELF_CTS_EN);
7686 			break;
7687 		case IEEE80211_HTPROT_20MHZ:
7688 			if (ic->ic_htcaps & IEEE80211_HTCAP_CBW20_40) {
7689 				/* XXX ... and if our channel is 40 MHz ... */
7690 				cmd->protection_flags |=
7691 				    htole32(IWM_MAC_PROT_FLG_HT_PROT |
7692 				    IWM_MAC_PROT_FLG_FAT_PROT);
7693 				if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
7694 					cmd->protection_flags |= htole32(
7695 					    IWM_MAC_PROT_FLG_SELF_CTS_EN);
7696 			}
7697 			break;
7698 		default:
7699 			break;
7700 		}
7701 
7702 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
7703 	}
7704 	if (ic->ic_flags & IEEE80211_F_USEPROT)
7705 		cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
7706 
7707 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
7708 #undef IWM_EXP2
7709 }
7710 
7711 void
7712 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
7713     struct iwm_mac_data_sta *sta, int assoc)
7714 {
7715 	struct ieee80211_node *ni = &in->in_ni;
7716 	uint32_t dtim_off;
7717 	uint64_t tsf;
7718 
7719 	dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU;
7720 	memcpy(&tsf, ni->ni_tstamp, sizeof(tsf));
7721 	tsf = letoh64(tsf);
7722 
7723 	sta->is_assoc = htole32(assoc);
7724 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
7725 	sta->dtim_tsf = htole64(tsf + dtim_off);
7726 	sta->bi = htole32(ni->ni_intval);
7727 	sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
7728 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod);
7729 	sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
7730 	sta->listen_interval = htole32(10);
7731 	sta->assoc_id = htole32(ni->ni_associd);
7732 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
7733 }
7734 
7735 int
7736 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
7737     int assoc)
7738 {
7739 	struct ieee80211com *ic = &sc->sc_ic;
7740 	struct ieee80211_node *ni = &in->in_ni;
7741 	struct iwm_mac_ctx_cmd cmd;
7742 	int active = (sc->sc_flags & IWM_FLAG_MAC_ACTIVE);
7743 
7744 	if (action == IWM_FW_CTXT_ACTION_ADD && active)
7745 		panic("MAC already added");
7746 	if (action == IWM_FW_CTXT_ACTION_REMOVE && !active)
7747 		panic("MAC already removed");
7748 
7749 	memset(&cmd, 0, sizeof(cmd));
7750 
7751 	iwm_mac_ctxt_cmd_common(sc, in, &cmd, action);
7752 
7753 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7754 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_PROMISC |
7755 		    IWM_MAC_FILTER_IN_CONTROL_AND_MGMT |
7756 		    IWM_MAC_FILTER_ACCEPT_GRP |
7757 		    IWM_MAC_FILTER_IN_BEACON |
7758 		    IWM_MAC_FILTER_IN_PROBE_REQUEST |
7759 		    IWM_MAC_FILTER_IN_CRC32);
7760 	} else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod)
7761 		/*
7762 		 * Allow beacons to pass through as long as we are not
7763 		 * associated or we do not have dtim period information.
7764 		 */
7765 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
7766 	else
7767 		iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
7768 
7769 	return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
7770 }
7771 
7772 int
7773 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in, int running)
7774 {
7775 	struct iwm_time_quota_cmd cmd;
7776 	int i, idx, num_active_macs, quota, quota_rem;
7777 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
7778 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
7779 	uint16_t id;
7780 
7781 	memset(&cmd, 0, sizeof(cmd));
7782 
7783 	/* currently, PHY ID == binding ID */
7784 	if (in && in->in_phyctxt) {
7785 		id = in->in_phyctxt->id;
7786 		KASSERT(id < IWM_MAX_BINDINGS);
7787 		colors[id] = in->in_phyctxt->color;
7788 		if (running)
7789 			n_ifs[id] = 1;
7790 	}
7791 
7792 	/*
7793 	 * The FW's scheduling session consists of
7794 	 * IWM_MAX_QUOTA fragments. Divide these fragments
7795 	 * equally between all the bindings that require quota
7796 	 */
7797 	num_active_macs = 0;
7798 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
7799 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
7800 		num_active_macs += n_ifs[i];
7801 	}
7802 
7803 	quota = 0;
7804 	quota_rem = 0;
7805 	if (num_active_macs) {
7806 		quota = IWM_MAX_QUOTA / num_active_macs;
7807 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
7808 	}
7809 
7810 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
7811 		if (colors[i] < 0)
7812 			continue;
7813 
7814 		cmd.quotas[idx].id_and_color =
7815 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
7816 
7817 		if (n_ifs[i] <= 0) {
7818 			cmd.quotas[idx].quota = htole32(0);
7819 			cmd.quotas[idx].max_duration = htole32(0);
7820 		} else {
7821 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
7822 			cmd.quotas[idx].max_duration = htole32(0);
7823 		}
7824 		idx++;
7825 	}
7826 
7827 	/* Give the remainder of the session to the first binding */
7828 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
7829 
7830 	return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0,
7831 	    sizeof(cmd), &cmd);
7832 }
7833 
7834 void
7835 iwm_add_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
7836 {
7837 	int s = splnet();
7838 
7839 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
7840 		splx(s);
7841 		return;
7842 	}
7843 
7844 	refcnt_take(&sc->task_refs);
7845 	if (!task_add(taskq, task))
7846 		refcnt_rele_wake(&sc->task_refs);
7847 	splx(s);
7848 }
7849 
7850 void
7851 iwm_del_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
7852 {
7853 	if (task_del(taskq, task))
7854 		refcnt_rele(&sc->task_refs);
7855 }
7856 
7857 int
7858 iwm_scan(struct iwm_softc *sc)
7859 {
7860 	struct ieee80211com *ic = &sc->sc_ic;
7861 	struct ifnet *ifp = IC2IFP(ic);
7862 	int err;
7863 
7864 	if (sc->sc_flags & IWM_FLAG_BGSCAN) {
7865 		err = iwm_scan_abort(sc);
7866 		if (err) {
7867 			printf("%s: could not abort background scan\n",
7868 			    DEVNAME(sc));
7869 			return err;
7870 		}
7871 	}
7872 
7873 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
7874 		err = iwm_umac_scan(sc, 0);
7875 	else
7876 		err = iwm_lmac_scan(sc, 0);
7877 	if (err) {
7878 		printf("%s: could not initiate scan\n", DEVNAME(sc));
7879 		return err;
7880 	}
7881 
7882 	/*
7883 	 * The current mode might have been fixed during association.
7884 	 * Ensure all channels get scanned.
7885 	 */
7886 	if (IFM_MODE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
7887 		ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
7888 
7889 	sc->sc_flags |= IWM_FLAG_SCANNING;
7890 	if (ifp->if_flags & IFF_DEBUG)
7891 		printf("%s: %s -> %s\n", ifp->if_xname,
7892 		    ieee80211_state_name[ic->ic_state],
7893 		    ieee80211_state_name[IEEE80211_S_SCAN]);
7894 	if ((sc->sc_flags & IWM_FLAG_BGSCAN) == 0) {
7895 		ieee80211_set_link_state(ic, LINK_STATE_DOWN);
7896 		ieee80211_node_cleanup(ic, ic->ic_bss);
7897 	}
7898 	ic->ic_state = IEEE80211_S_SCAN;
7899 	iwm_led_blink_start(sc);
7900 	wakeup(&ic->ic_state); /* wake iwm_init() */
7901 
7902 	return 0;
7903 }
7904 
7905 int
7906 iwm_bgscan(struct ieee80211com *ic)
7907 {
7908 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
7909 	int err;
7910 
7911 	if (sc->sc_flags & IWM_FLAG_SCANNING)
7912 		return 0;
7913 
7914 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
7915 		err = iwm_umac_scan(sc, 1);
7916 	else
7917 		err = iwm_lmac_scan(sc, 1);
7918 	if (err) {
7919 		printf("%s: could not initiate scan\n", DEVNAME(sc));
7920 		return err;
7921 	}
7922 
7923 	sc->sc_flags |= IWM_FLAG_BGSCAN;
7924 	return 0;
7925 }
7926 
7927 int
7928 iwm_umac_scan_abort(struct iwm_softc *sc)
7929 {
7930 	struct iwm_umac_scan_abort cmd = { 0 };
7931 
7932 	return iwm_send_cmd_pdu(sc,
7933 	    IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_ABORT_UMAC),
7934 	    0, sizeof(cmd), &cmd);
7935 }
7936 
7937 int
7938 iwm_lmac_scan_abort(struct iwm_softc *sc)
7939 {
7940 	struct iwm_host_cmd cmd = {
7941 		.id = IWM_SCAN_OFFLOAD_ABORT_CMD,
7942 	};
7943 	int err, status;
7944 
7945 	err = iwm_send_cmd_status(sc, &cmd, &status);
7946 	if (err)
7947 		return err;
7948 
7949 	if (status != IWM_CAN_ABORT_STATUS) {
7950 		/*
7951 		 * The scan abort will return 1 for success or
7952 		 * 2 for "failure".  A failure condition can be
7953 		 * due to simply not being in an active scan which
7954 		 * can occur if we send the scan abort before the
7955 		 * microcode has notified us that a scan is completed.
7956 		 */
7957 		return EBUSY;
7958 	}
7959 
7960 	return 0;
7961 }
7962 
7963 int
7964 iwm_scan_abort(struct iwm_softc *sc)
7965 {
7966 	int err;
7967 
7968 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
7969 		err = iwm_umac_scan_abort(sc);
7970 	else
7971 		err = iwm_lmac_scan_abort(sc);
7972 
7973 	if (err == 0)
7974 		sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
7975 	return err;
7976 }
7977 
7978 int
7979 iwm_auth(struct iwm_softc *sc)
7980 {
7981 	struct ieee80211com *ic = &sc->sc_ic;
7982 	struct iwm_node *in = (void *)ic->ic_bss;
7983 	uint32_t duration;
7984 	int generation = sc->sc_generation, err;
7985 
7986 	splassert(IPL_NET);
7987 
7988 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7989 		sc->sc_phyctxt[0].channel = ic->ic_ibss_chan;
7990 	else
7991 		sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
7992 	err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
7993 	    IWM_FW_CTXT_ACTION_MODIFY, 0);
7994 	if (err) {
7995 		printf("%s: could not update PHY context (error %d)\n",
7996 		    DEVNAME(sc), err);
7997 		return err;
7998 	}
7999 	in->in_phyctxt = &sc->sc_phyctxt[0];
8000 
8001 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
8002 	if (err) {
8003 		printf("%s: could not add MAC context (error %d)\n",
8004 		    DEVNAME(sc), err);
8005 		return err;
8006  	}
8007 	sc->sc_flags |= IWM_FLAG_MAC_ACTIVE;
8008 
8009 	err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
8010 	if (err) {
8011 		printf("%s: could not add binding (error %d)\n",
8012 		    DEVNAME(sc), err);
8013 		goto rm_mac_ctxt;
8014 	}
8015 	sc->sc_flags |= IWM_FLAG_BINDING_ACTIVE;
8016 
8017 	in->tid_disable_ampdu = 0xffff;
8018 	err = iwm_add_sta_cmd(sc, in, 0);
8019 	if (err) {
8020 		printf("%s: could not add sta (error %d)\n",
8021 		    DEVNAME(sc), err);
8022 		goto rm_binding;
8023 	}
8024 	sc->sc_flags |= IWM_FLAG_STA_ACTIVE;
8025 
8026 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
8027 		return 0;
8028 
8029 	/*
8030 	 * Prevent the FW from wandering off channel during association
8031 	 * by "protecting" the session with a time event.
8032 	 */
8033 	if (in->in_ni.ni_intval)
8034 		duration = in->in_ni.ni_intval * 2;
8035 	else
8036 		duration = IEEE80211_DUR_TU;
8037 	iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
8038 
8039 	return 0;
8040 
8041 rm_binding:
8042 	if (generation == sc->sc_generation) {
8043 		iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
8044 		sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
8045 	}
8046 rm_mac_ctxt:
8047 	if (generation == sc->sc_generation) {
8048 		iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
8049 		sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
8050 	}
8051 	return err;
8052 }
8053 
8054 int
8055 iwm_deauth(struct iwm_softc *sc)
8056 {
8057 	struct ieee80211com *ic = &sc->sc_ic;
8058 	struct iwm_node *in = (void *)ic->ic_bss;
8059 	int err;
8060 
8061 	splassert(IPL_NET);
8062 
8063 	iwm_unprotect_session(sc, in);
8064 
8065 	if (sc->sc_flags & IWM_FLAG_STA_ACTIVE) {
8066 		err = iwm_flush_sta(sc, in);
8067 		if (err)
8068 			return err;
8069 		err = iwm_rm_sta_cmd(sc, in);
8070 		if (err) {
8071 			printf("%s: could not remove STA (error %d)\n",
8072 			    DEVNAME(sc), err);
8073 			return err;
8074 		}
8075 		in->tid_disable_ampdu = 0xffff;
8076 		sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
8077 		sc->sc_rx_ba_sessions = 0;
8078 		sc->ba_rx.start_tidmask = 0;
8079 		sc->ba_rx.stop_tidmask = 0;
8080 		sc->tx_ba_queue_mask = 0;
8081 		sc->ba_tx.start_tidmask = 0;
8082 		sc->ba_tx.stop_tidmask = 0;
8083 	}
8084 
8085 	if (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE) {
8086 		err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
8087 		if (err) {
8088 			printf("%s: could not remove binding (error %d)\n",
8089 			    DEVNAME(sc), err);
8090 			return err;
8091 		}
8092 		sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
8093 	}
8094 
8095 	if (sc->sc_flags & IWM_FLAG_MAC_ACTIVE) {
8096 		err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
8097 		if (err) {
8098 			printf("%s: could not remove MAC context (error %d)\n",
8099 			    DEVNAME(sc), err);
8100 			return err;
8101 		}
8102 		sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
8103 	}
8104 
8105 	return 0;
8106 }
8107 
8108 int
8109 iwm_assoc(struct iwm_softc *sc)
8110 {
8111 	struct ieee80211com *ic = &sc->sc_ic;
8112 	struct iwm_node *in = (void *)ic->ic_bss;
8113 	int update_sta = (sc->sc_flags & IWM_FLAG_STA_ACTIVE);
8114 	int err;
8115 
8116 	splassert(IPL_NET);
8117 
8118 	if (!update_sta)
8119 		in->tid_disable_ampdu = 0xffff;
8120 	err = iwm_add_sta_cmd(sc, in, update_sta);
8121 	if (err) {
8122 		printf("%s: could not %s STA (error %d)\n",
8123 		    DEVNAME(sc), update_sta ? "update" : "add", err);
8124 		return err;
8125 	}
8126 
8127 	return 0;
8128 }
8129 
8130 int
8131 iwm_disassoc(struct iwm_softc *sc)
8132 {
8133 	struct ieee80211com *ic = &sc->sc_ic;
8134 	struct iwm_node *in = (void *)ic->ic_bss;
8135 	int err;
8136 
8137 	splassert(IPL_NET);
8138 
8139 	if (sc->sc_flags & IWM_FLAG_STA_ACTIVE) {
8140 		err = iwm_flush_sta(sc, in);
8141 		if (err)
8142 			return err;
8143 		err = iwm_rm_sta_cmd(sc, in);
8144 		if (err) {
8145 			printf("%s: could not remove STA (error %d)\n",
8146 			    DEVNAME(sc), err);
8147 			return err;
8148 		}
8149 		in->tid_disable_ampdu = 0xffff;
8150 		sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
8151 		sc->sc_rx_ba_sessions = 0;
8152 		sc->ba_rx.start_tidmask = 0;
8153 		sc->ba_rx.stop_tidmask = 0;
8154 		sc->tx_ba_queue_mask = 0;
8155 		sc->ba_tx.start_tidmask = 0;
8156 		sc->ba_tx.stop_tidmask = 0;
8157 	}
8158 
8159 	return 0;
8160 }
8161 
8162 int
8163 iwm_run(struct iwm_softc *sc)
8164 {
8165 	struct ieee80211com *ic = &sc->sc_ic;
8166 	struct iwm_node *in = (void *)ic->ic_bss;
8167 	int err;
8168 
8169 	splassert(IPL_NET);
8170 
8171 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8172 		/* Add a MAC context and a sniffing STA. */
8173 		err = iwm_auth(sc);
8174 		if (err)
8175 			return err;
8176 	}
8177 
8178 	/* Configure Rx chains for MIMO. */
8179 	if ((ic->ic_opmode == IEEE80211_M_MONITOR ||
8180 	    (in->in_ni.ni_flags & IEEE80211_NODE_HT)) &&
8181 	    iwm_mimo_enabled(sc)) {
8182 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0],
8183 		    2, 2, IWM_FW_CTXT_ACTION_MODIFY, 0);
8184 		if (err) {
8185 			printf("%s: failed to update PHY\n",
8186 			    DEVNAME(sc));
8187 			return err;
8188 		}
8189 	}
8190 
8191 	/* Update STA again, for HT-related settings such as MIMO. */
8192 	err = iwm_add_sta_cmd(sc, in, 1);
8193 	if (err) {
8194 		printf("%s: could not update STA (error %d)\n",
8195 		    DEVNAME(sc), err);
8196 		return err;
8197 	}
8198 
8199 	/* We have now been assigned an associd by the AP. */
8200 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
8201 	if (err) {
8202 		printf("%s: failed to update MAC\n", DEVNAME(sc));
8203 		return err;
8204 	}
8205 
8206 	err = iwm_sf_config(sc, IWM_SF_FULL_ON);
8207 	if (err) {
8208 		printf("%s: could not set sf full on (error %d)\n",
8209 		    DEVNAME(sc), err);
8210 		return err;
8211 	}
8212 
8213 	err = iwm_allow_mcast(sc);
8214 	if (err) {
8215 		printf("%s: could not allow mcast (error %d)\n",
8216 		    DEVNAME(sc), err);
8217 		return err;
8218 	}
8219 
8220 	err = iwm_power_update_device(sc);
8221 	if (err) {
8222 		printf("%s: could not send power command (error %d)\n",
8223 		    DEVNAME(sc), err);
8224 		return err;
8225 	}
8226 #ifdef notyet
8227 	/*
8228 	 * Disabled for now. Default beacon filter settings
8229 	 * prevent net80211 from getting ERP and HT protection
8230 	 * updates from beacons.
8231 	 */
8232 	err = iwm_enable_beacon_filter(sc, in);
8233 	if (err) {
8234 		printf("%s: could not enable beacon filter\n",
8235 		    DEVNAME(sc));
8236 		return err;
8237 	}
8238 #endif
8239 	err = iwm_power_mac_update_mode(sc, in);
8240 	if (err) {
8241 		printf("%s: could not update MAC power (error %d)\n",
8242 		    DEVNAME(sc), err);
8243 		return err;
8244 	}
8245 
8246 	err = iwm_update_quotas(sc, in, 1);
8247 	if (err) {
8248 		printf("%s: could not update quotas (error %d)\n",
8249 		    DEVNAME(sc), err);
8250 		return err;
8251 	}
8252 
8253 	ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
8254 	ieee80211_ra_node_init(&in->in_rn);
8255 
8256 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8257 		iwm_led_blink_start(sc);
8258 		return 0;
8259 	}
8260 
8261 	/* Start at lowest available bit-rate, AMRR will raise. */
8262 	in->in_ni.ni_txrate = 0;
8263 	in->in_ni.ni_txmcs = 0;
8264 	iwm_setrates(in, 0);
8265 
8266 	timeout_add_msec(&sc->sc_calib_to, 500);
8267 	iwm_led_enable(sc);
8268 
8269 	return 0;
8270 }
8271 
8272 int
8273 iwm_run_stop(struct iwm_softc *sc)
8274 {
8275 	struct ieee80211com *ic = &sc->sc_ic;
8276 	struct iwm_node *in = (void *)ic->ic_bss;
8277 	int err;
8278 
8279 	splassert(IPL_NET);
8280 
8281 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
8282 		iwm_led_blink_stop(sc);
8283 
8284 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
8285 	if (err)
8286 		return err;
8287 
8288 	iwm_disable_beacon_filter(sc);
8289 
8290 	err = iwm_update_quotas(sc, in, 0);
8291 	if (err) {
8292 		printf("%s: could not update quotas (error %d)\n",
8293 		    DEVNAME(sc), err);
8294 		return err;
8295 	}
8296 
8297 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
8298 	if (err) {
8299 		printf("%s: failed to update MAC\n", DEVNAME(sc));
8300 		return err;
8301 	}
8302 
8303 	/* Reset Tx chains in case MIMO was enabled. */
8304 	if ((in->in_ni.ni_flags & IEEE80211_NODE_HT) &&
8305 	    iwm_mimo_enabled(sc)) {
8306 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
8307 		    IWM_FW_CTXT_ACTION_MODIFY, 0);
8308 		if (err) {
8309 			printf("%s: failed to update PHY\n", DEVNAME(sc));
8310 			return err;
8311 		}
8312 	}
8313 
8314 	return 0;
8315 }
8316 
8317 struct ieee80211_node *
8318 iwm_node_alloc(struct ieee80211com *ic)
8319 {
8320 	return malloc(sizeof (struct iwm_node), M_DEVBUF, M_NOWAIT | M_ZERO);
8321 }
8322 
8323 int
8324 iwm_set_key_v1(struct ieee80211com *ic, struct ieee80211_node *ni,
8325     struct ieee80211_key *k)
8326 {
8327 	struct iwm_softc *sc = ic->ic_softc;
8328 	struct iwm_add_sta_key_cmd_v1 cmd;
8329 
8330 	memset(&cmd, 0, sizeof(cmd));
8331 
8332 	cmd.common.key_flags = htole16(IWM_STA_KEY_FLG_CCM |
8333 	    IWM_STA_KEY_FLG_WEP_KEY_MAP |
8334 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
8335 	    IWM_STA_KEY_FLG_KEYID_MSK));
8336 	if (k->k_flags & IEEE80211_KEY_GROUP)
8337 		cmd.common.key_flags |= htole16(IWM_STA_KEY_MULTICAST);
8338 
8339 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
8340 	cmd.common.key_offset = 0;
8341 	cmd.common.sta_id = IWM_STATION_ID;
8342 
8343 	return iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC,
8344 	    sizeof(cmd), &cmd);
8345 }
8346 
8347 int
8348 iwm_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
8349     struct ieee80211_key *k)
8350 {
8351 	struct iwm_softc *sc = ic->ic_softc;
8352 	struct iwm_add_sta_key_cmd cmd;
8353 
8354 	if ((k->k_flags & IEEE80211_KEY_GROUP) ||
8355 	    k->k_cipher != IEEE80211_CIPHER_CCMP)  {
8356 		/* Fallback to software crypto for other ciphers. */
8357 		return (ieee80211_set_key(ic, ni, k));
8358 	}
8359 
8360 	if (!isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_TKIP_MIC_KEYS))
8361 		return iwm_set_key_v1(ic, ni, k);
8362 
8363 	memset(&cmd, 0, sizeof(cmd));
8364 
8365 	cmd.common.key_flags = htole16(IWM_STA_KEY_FLG_CCM |
8366 	    IWM_STA_KEY_FLG_WEP_KEY_MAP |
8367 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
8368 	    IWM_STA_KEY_FLG_KEYID_MSK));
8369 	if (k->k_flags & IEEE80211_KEY_GROUP)
8370 		cmd.common.key_flags |= htole16(IWM_STA_KEY_MULTICAST);
8371 
8372 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
8373 	cmd.common.key_offset = 0;
8374 	cmd.common.sta_id = IWM_STATION_ID;
8375 
8376 	cmd.transmit_seq_cnt = htole64(k->k_tsc);
8377 
8378 	return iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC,
8379 	    sizeof(cmd), &cmd);
8380 }
8381 
8382 void
8383 iwm_delete_key_v1(struct ieee80211com *ic, struct ieee80211_node *ni,
8384     struct ieee80211_key *k)
8385 {
8386 	struct iwm_softc *sc = ic->ic_softc;
8387 	struct iwm_add_sta_key_cmd_v1 cmd;
8388 
8389 	memset(&cmd, 0, sizeof(cmd));
8390 
8391 	cmd.common.key_flags = htole16(IWM_STA_KEY_NOT_VALID |
8392 	    IWM_STA_KEY_FLG_NO_ENC | IWM_STA_KEY_FLG_WEP_KEY_MAP |
8393 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
8394 	    IWM_STA_KEY_FLG_KEYID_MSK));
8395 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
8396 	cmd.common.key_offset = 0;
8397 	cmd.common.sta_id = IWM_STATION_ID;
8398 
8399 	iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC, sizeof(cmd), &cmd);
8400 }
8401 
8402 void
8403 iwm_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
8404     struct ieee80211_key *k)
8405 {
8406 	struct iwm_softc *sc = ic->ic_softc;
8407 	struct iwm_add_sta_key_cmd cmd;
8408 
8409 	if ((k->k_flags & IEEE80211_KEY_GROUP) ||
8410 	    (k->k_cipher != IEEE80211_CIPHER_CCMP)) {
8411 		/* Fallback to software crypto for other ciphers. */
8412                 ieee80211_delete_key(ic, ni, k);
8413 		return;
8414 	}
8415 
8416 	if (!isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_TKIP_MIC_KEYS))
8417 		return iwm_delete_key_v1(ic, ni, k);
8418 
8419 	memset(&cmd, 0, sizeof(cmd));
8420 
8421 	cmd.common.key_flags = htole16(IWM_STA_KEY_NOT_VALID |
8422 	    IWM_STA_KEY_FLG_NO_ENC | IWM_STA_KEY_FLG_WEP_KEY_MAP |
8423 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
8424 	    IWM_STA_KEY_FLG_KEYID_MSK));
8425 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
8426 	cmd.common.key_offset = 0;
8427 	cmd.common.sta_id = IWM_STATION_ID;
8428 
8429 	iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC, sizeof(cmd), &cmd);
8430 }
8431 
8432 void
8433 iwm_calib_timeout(void *arg)
8434 {
8435 	struct iwm_softc *sc = arg;
8436 	struct ieee80211com *ic = &sc->sc_ic;
8437 	struct iwm_node *in = (void *)ic->ic_bss;
8438 	struct ieee80211_node *ni = &in->in_ni;
8439 	int s;
8440 
8441 	s = splnet();
8442 	if ((ic->ic_fixed_rate == -1 || ic->ic_fixed_mcs == -1) &&
8443 	    (ni->ni_flags & IEEE80211_NODE_HT) == 0 &&
8444 	    ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
8445 		int old_txrate = ni->ni_txrate;
8446 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
8447 		/*
8448 		 * If AMRR has chosen a new TX rate we must update
8449 		 * the firwmare's LQ rate table.
8450 		 * ni_txrate may change again before the task runs so
8451 		 * cache the chosen rate in the iwm_node structure.
8452 		 */
8453 		if (ni->ni_txrate != old_txrate)
8454 			iwm_setrates(in, 1);
8455 	}
8456 
8457 	splx(s);
8458 
8459 	timeout_add_msec(&sc->sc_calib_to, 500);
8460 }
8461 
8462 void
8463 iwm_setrates(struct iwm_node *in, int async)
8464 {
8465 	struct ieee80211_node *ni = &in->in_ni;
8466 	struct ieee80211com *ic = ni->ni_ic;
8467 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
8468 	struct iwm_lq_cmd lqcmd;
8469 	struct ieee80211_rateset *rs = &ni->ni_rates;
8470 	int i, ridx, ridx_min, ridx_max, j, sgi_ok = 0, mimo, tab = 0;
8471 	struct iwm_host_cmd cmd = {
8472 		.id = IWM_LQ_CMD,
8473 		.len = { sizeof(lqcmd), },
8474 	};
8475 
8476 	cmd.flags = async ? IWM_CMD_ASYNC : 0;
8477 
8478 	memset(&lqcmd, 0, sizeof(lqcmd));
8479 	lqcmd.sta_id = IWM_STATION_ID;
8480 
8481 	if (ic->ic_flags & IEEE80211_F_USEPROT)
8482 		lqcmd.flags |= IWM_LQ_FLAG_USE_RTS_MSK;
8483 
8484 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
8485 	    ieee80211_node_supports_ht_sgi20(ni)) {
8486 		ni->ni_flags |= IEEE80211_NODE_HT_SGI20;
8487 		sgi_ok = 1;
8488 	}
8489 
8490 	/*
8491 	 * Fill the LQ rate selection table with legacy and/or HT rates
8492 	 * in descending order, i.e. with the node's current TX rate first.
8493 	 * In cases where throughput of an HT rate corresponds to a legacy
8494 	 * rate it makes no sense to add both. We rely on the fact that
8495 	 * iwm_rates is laid out such that equivalent HT/legacy rates share
8496 	 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
8497 	 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
8498 	 */
8499 	j = 0;
8500 	ridx_min = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
8501 	mimo = iwm_is_mimo_mcs(ni->ni_txmcs);
8502 	ridx_max = (mimo ? IWM_RIDX_MAX : IWM_LAST_HT_SISO_RATE);
8503 	for (ridx = ridx_max; ridx >= ridx_min; ridx--) {
8504 		uint8_t plcp = iwm_rates[ridx].plcp;
8505 		uint8_t ht_plcp = iwm_rates[ridx].ht_plcp;
8506 
8507 		if (j >= nitems(lqcmd.rs_table))
8508 			break;
8509 		tab = 0;
8510 		if (ni->ni_flags & IEEE80211_NODE_HT) {
8511 		    	if (ht_plcp == IWM_RATE_HT_SISO_MCS_INV_PLCP)
8512 				continue;
8513 	 		/* Do not mix SISO and MIMO HT rates. */
8514 			if ((mimo && !iwm_is_mimo_ht_plcp(ht_plcp)) ||
8515 			    (!mimo && iwm_is_mimo_ht_plcp(ht_plcp)))
8516 				continue;
8517 			for (i = ni->ni_txmcs; i >= 0; i--) {
8518 				if (isclr(ni->ni_rxmcs, i))
8519 					continue;
8520 				if (ridx == iwm_mcs2ridx[i]) {
8521 					tab = ht_plcp;
8522 					tab |= IWM_RATE_MCS_HT_MSK;
8523 					if (sgi_ok)
8524 						tab |= IWM_RATE_MCS_SGI_MSK;
8525 					break;
8526 				}
8527 			}
8528 		} else if (plcp != IWM_RATE_INVM_PLCP) {
8529 			for (i = ni->ni_txrate; i >= 0; i--) {
8530 				if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
8531 				    IEEE80211_RATE_VAL)) {
8532 					tab = plcp;
8533 					break;
8534 				}
8535 			}
8536 		}
8537 
8538 		if (tab == 0)
8539 			continue;
8540 
8541 		if (iwm_is_mimo_ht_plcp(ht_plcp))
8542 			tab |= IWM_RATE_MCS_ANT_AB_MSK;
8543 		else
8544 			tab |= IWM_RATE_MCS_ANT_A_MSK;
8545 
8546 		if (IWM_RIDX_IS_CCK(ridx))
8547 			tab |= IWM_RATE_MCS_CCK_MSK;
8548 		lqcmd.rs_table[j++] = htole32(tab);
8549 	}
8550 
8551 	lqcmd.mimo_delim = (mimo ? j : 0);
8552 
8553 	/* Fill the rest with the lowest possible rate */
8554 	while (j < nitems(lqcmd.rs_table)) {
8555 		tab = iwm_rates[ridx_min].plcp;
8556 		if (IWM_RIDX_IS_CCK(ridx_min))
8557 			tab |= IWM_RATE_MCS_CCK_MSK;
8558 		tab |= IWM_RATE_MCS_ANT_A_MSK;
8559 		lqcmd.rs_table[j++] = htole32(tab);
8560 	}
8561 
8562 	lqcmd.single_stream_ant_msk = IWM_ANT_A;
8563 	lqcmd.dual_stream_ant_msk = IWM_ANT_AB;
8564 
8565 	lqcmd.agg_time_limit = htole16(4000);	/* 4ms */
8566 	lqcmd.agg_disable_start_th = 3;
8567 	lqcmd.agg_frame_cnt_limit = 0x3f;
8568 
8569 	cmd.data[0] = &lqcmd;
8570 	iwm_send_cmd(sc, &cmd);
8571 }
8572 
8573 int
8574 iwm_media_change(struct ifnet *ifp)
8575 {
8576 	struct iwm_softc *sc = ifp->if_softc;
8577 	struct ieee80211com *ic = &sc->sc_ic;
8578 	uint8_t rate, ridx;
8579 	int err;
8580 
8581 	err = ieee80211_media_change(ifp);
8582 	if (err != ENETRESET)
8583 		return err;
8584 
8585 	if (ic->ic_fixed_mcs != -1)
8586 		sc->sc_fixed_ridx = iwm_mcs2ridx[ic->ic_fixed_mcs];
8587 	else if (ic->ic_fixed_rate != -1) {
8588 		rate = ic->ic_sup_rates[ic->ic_curmode].
8589 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
8590 		/* Map 802.11 rate to HW rate index. */
8591 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
8592 			if (iwm_rates[ridx].rate == rate)
8593 				break;
8594 		sc->sc_fixed_ridx = ridx;
8595 	}
8596 
8597 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
8598 	    (IFF_UP | IFF_RUNNING)) {
8599 		iwm_stop(ifp);
8600 		err = iwm_init(ifp);
8601 	}
8602 	return err;
8603 }
8604 
8605 void
8606 iwm_newstate_task(void *psc)
8607 {
8608 	struct iwm_softc *sc = (struct iwm_softc *)psc;
8609 	struct ieee80211com *ic = &sc->sc_ic;
8610 	enum ieee80211_state nstate = sc->ns_nstate;
8611 	enum ieee80211_state ostate = ic->ic_state;
8612 	int arg = sc->ns_arg;
8613 	int err = 0, s = splnet();
8614 
8615 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
8616 		/* iwm_stop() is waiting for us. */
8617 		refcnt_rele_wake(&sc->task_refs);
8618 		splx(s);
8619 		return;
8620 	}
8621 
8622 	if (ostate == IEEE80211_S_SCAN) {
8623 		if (nstate == ostate) {
8624 			if (sc->sc_flags & IWM_FLAG_SCANNING) {
8625 				refcnt_rele_wake(&sc->task_refs);
8626 				splx(s);
8627 				return;
8628 			}
8629 			/* Firmware is no longer scanning. Do another scan. */
8630 			goto next_scan;
8631 		} else
8632 			iwm_led_blink_stop(sc);
8633 	}
8634 
8635 	if (nstate <= ostate) {
8636 		switch (ostate) {
8637 		case IEEE80211_S_RUN:
8638 			err = iwm_run_stop(sc);
8639 			if (err)
8640 				goto out;
8641 			/* FALLTHROUGH */
8642 		case IEEE80211_S_ASSOC:
8643 			if (nstate <= IEEE80211_S_ASSOC) {
8644 				err = iwm_disassoc(sc);
8645 				if (err)
8646 					goto out;
8647 			}
8648 			/* FALLTHROUGH */
8649 		case IEEE80211_S_AUTH:
8650 			if (nstate <= IEEE80211_S_AUTH) {
8651 				err = iwm_deauth(sc);
8652 				if (err)
8653 					goto out;
8654 			}
8655 			/* FALLTHROUGH */
8656 		case IEEE80211_S_SCAN:
8657 		case IEEE80211_S_INIT:
8658 			break;
8659 		}
8660 
8661 		/* Die now if iwm_stop() was called while we were sleeping. */
8662 		if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
8663 			refcnt_rele_wake(&sc->task_refs);
8664 			splx(s);
8665 			return;
8666 		}
8667 	}
8668 
8669 	switch (nstate) {
8670 	case IEEE80211_S_INIT:
8671 		break;
8672 
8673 	case IEEE80211_S_SCAN:
8674 next_scan:
8675 		err = iwm_scan(sc);
8676 		if (err)
8677 			break;
8678 		refcnt_rele_wake(&sc->task_refs);
8679 		splx(s);
8680 		return;
8681 
8682 	case IEEE80211_S_AUTH:
8683 		err = iwm_auth(sc);
8684 		break;
8685 
8686 	case IEEE80211_S_ASSOC:
8687 		err = iwm_assoc(sc);
8688 		break;
8689 
8690 	case IEEE80211_S_RUN:
8691 		err = iwm_run(sc);
8692 		break;
8693 	}
8694 
8695 out:
8696 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
8697 		if (err)
8698 			task_add(systq, &sc->init_task);
8699 		else
8700 			sc->sc_newstate(ic, nstate, arg);
8701 	}
8702 	refcnt_rele_wake(&sc->task_refs);
8703 	splx(s);
8704 }
8705 
8706 int
8707 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
8708 {
8709 	struct ifnet *ifp = IC2IFP(ic);
8710 	struct iwm_softc *sc = ifp->if_softc;
8711 	int i;
8712 
8713 	if (ic->ic_state == IEEE80211_S_RUN) {
8714 		timeout_del(&sc->sc_calib_to);
8715 		iwm_del_task(sc, systq, &sc->ba_task);
8716 		iwm_del_task(sc, systq, &sc->mac_ctxt_task);
8717 		for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
8718 			struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
8719 			iwm_clear_reorder_buffer(sc, rxba);
8720 		}
8721 	}
8722 
8723 	sc->ns_nstate = nstate;
8724 	sc->ns_arg = arg;
8725 
8726 	iwm_add_task(sc, sc->sc_nswq, &sc->newstate_task);
8727 
8728 	return 0;
8729 }
8730 
8731 void
8732 iwm_endscan(struct iwm_softc *sc)
8733 {
8734 	struct ieee80211com *ic = &sc->sc_ic;
8735 
8736 	if ((sc->sc_flags & (IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN)) == 0)
8737 		return;
8738 
8739 	sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
8740 	ieee80211_end_scan(&ic->ic_if);
8741 }
8742 
8743 /*
8744  * Aging and idle timeouts for the different possible scenarios
8745  * in default configuration
8746  */
8747 static const uint32_t
8748 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
8749 	{
8750 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
8751 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
8752 	},
8753 	{
8754 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
8755 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
8756 	},
8757 	{
8758 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
8759 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
8760 	},
8761 	{
8762 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
8763 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
8764 	},
8765 	{
8766 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
8767 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
8768 	},
8769 };
8770 
8771 /*
8772  * Aging and idle timeouts for the different possible scenarios
8773  * in single BSS MAC configuration.
8774  */
8775 static const uint32_t
8776 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
8777 	{
8778 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
8779 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
8780 	},
8781 	{
8782 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
8783 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
8784 	},
8785 	{
8786 		htole32(IWM_SF_MCAST_AGING_TIMER),
8787 		htole32(IWM_SF_MCAST_IDLE_TIMER)
8788 	},
8789 	{
8790 		htole32(IWM_SF_BA_AGING_TIMER),
8791 		htole32(IWM_SF_BA_IDLE_TIMER)
8792 	},
8793 	{
8794 		htole32(IWM_SF_TX_RE_AGING_TIMER),
8795 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
8796 	},
8797 };
8798 
8799 void
8800 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
8801     struct ieee80211_node *ni)
8802 {
8803 	int i, j, watermark;
8804 
8805 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
8806 
8807 	/*
8808 	 * If we are in association flow - check antenna configuration
8809 	 * capabilities of the AP station, and choose the watermark accordingly.
8810 	 */
8811 	if (ni) {
8812 		if (ni->ni_flags & IEEE80211_NODE_HT) {
8813 			if (ni->ni_rxmcs[1] != 0)
8814 				watermark = IWM_SF_W_MARK_MIMO2;
8815 			else
8816 				watermark = IWM_SF_W_MARK_SISO;
8817 		} else {
8818 			watermark = IWM_SF_W_MARK_LEGACY;
8819 		}
8820 	/* default watermark value for unassociated mode. */
8821 	} else {
8822 		watermark = IWM_SF_W_MARK_MIMO2;
8823 	}
8824 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
8825 
8826 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
8827 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
8828 			sf_cmd->long_delay_timeouts[i][j] =
8829 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
8830 		}
8831 	}
8832 
8833 	if (ni) {
8834 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
8835 		       sizeof(iwm_sf_full_timeout));
8836 	} else {
8837 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
8838 		       sizeof(iwm_sf_full_timeout_def));
8839 	}
8840 
8841 }
8842 
8843 int
8844 iwm_sf_config(struct iwm_softc *sc, int new_state)
8845 {
8846 	struct ieee80211com *ic = &sc->sc_ic;
8847 	struct iwm_sf_cfg_cmd sf_cmd = {
8848 		.state = htole32(new_state),
8849 	};
8850 	int err = 0;
8851 
8852 #if 0	/* only used for models with sdio interface, in iwlwifi */
8853 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
8854 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
8855 #endif
8856 
8857 	switch (new_state) {
8858 	case IWM_SF_UNINIT:
8859 	case IWM_SF_INIT_OFF:
8860 		iwm_fill_sf_command(sc, &sf_cmd, NULL);
8861 		break;
8862 	case IWM_SF_FULL_ON:
8863 		iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
8864 		break;
8865 	default:
8866 		return EINVAL;
8867 	}
8868 
8869 	err = iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
8870 				   sizeof(sf_cmd), &sf_cmd);
8871 	return err;
8872 }
8873 
8874 int
8875 iwm_send_bt_init_conf(struct iwm_softc *sc)
8876 {
8877 	struct iwm_bt_coex_cmd bt_cmd;
8878 
8879 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
8880 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
8881 
8882 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
8883 	    &bt_cmd);
8884 }
8885 
8886 int
8887 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
8888 {
8889 	struct iwm_mcc_update_cmd mcc_cmd;
8890 	struct iwm_host_cmd hcmd = {
8891 		.id = IWM_MCC_UPDATE_CMD,
8892 		.flags = IWM_CMD_WANT_RESP,
8893 		.data = { &mcc_cmd },
8894 	};
8895 	int err;
8896 	int resp_v2 = isset(sc->sc_enabled_capa,
8897 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
8898 
8899 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000 &&
8900 	    !sc->sc_nvm.lar_enabled) {
8901 		return 0;
8902 	}
8903 
8904 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
8905 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
8906 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
8907 	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
8908 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
8909 	else
8910 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
8911 
8912 	if (resp_v2) {
8913 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
8914 		hcmd.resp_pkt_len = sizeof(struct iwm_rx_packet) +
8915 		    sizeof(struct iwm_mcc_update_resp);
8916 	} else {
8917 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
8918 		hcmd.resp_pkt_len = sizeof(struct iwm_rx_packet) +
8919 		    sizeof(struct iwm_mcc_update_resp_v1);
8920 	}
8921 
8922 	err = iwm_send_cmd(sc, &hcmd);
8923 	if (err)
8924 		return err;
8925 
8926 	iwm_free_resp(sc, &hcmd);
8927 
8928 	return 0;
8929 }
8930 
8931 void
8932 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
8933 {
8934 	struct iwm_host_cmd cmd = {
8935 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
8936 		.len = { sizeof(uint32_t), },
8937 		.data = { &backoff, },
8938 	};
8939 
8940 	iwm_send_cmd(sc, &cmd);
8941 }
8942 
8943 void
8944 iwm_free_fw_paging(struct iwm_softc *sc)
8945 {
8946 	int i;
8947 
8948 	if (sc->fw_paging_db[0].fw_paging_block.vaddr == NULL)
8949 		return;
8950 
8951 	for (i = 0; i < IWM_NUM_OF_FW_PAGING_BLOCKS; i++) {
8952 		iwm_dma_contig_free(&sc->fw_paging_db[i].fw_paging_block);
8953 	}
8954 
8955 	memset(sc->fw_paging_db, 0, sizeof(sc->fw_paging_db));
8956 }
8957 
8958 int
8959 iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
8960 {
8961 	int sec_idx, idx;
8962 	uint32_t offset = 0;
8963 
8964 	/*
8965 	 * find where is the paging image start point:
8966 	 * if CPU2 exist and it's in paging format, then the image looks like:
8967 	 * CPU1 sections (2 or more)
8968 	 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
8969 	 * CPU2 sections (not paged)
8970 	 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
8971 	 * non paged to CPU2 paging sec
8972 	 * CPU2 paging CSS
8973 	 * CPU2 paging image (including instruction and data)
8974 	 */
8975 	for (sec_idx = 0; sec_idx < IWM_UCODE_SECT_MAX; sec_idx++) {
8976 		if (image->fw_sect[sec_idx].fws_devoff ==
8977 		    IWM_PAGING_SEPARATOR_SECTION) {
8978 			sec_idx++;
8979 			break;
8980 		}
8981 	}
8982 
8983 	/*
8984 	 * If paging is enabled there should be at least 2 more sections left
8985 	 * (one for CSS and one for Paging data)
8986 	 */
8987 	if (sec_idx >= nitems(image->fw_sect) - 1) {
8988 		printf("%s: Paging: Missing CSS and/or paging sections\n",
8989 		    DEVNAME(sc));
8990 		iwm_free_fw_paging(sc);
8991 		return EINVAL;
8992 	}
8993 
8994 	/* copy the CSS block to the dram */
8995 	DPRINTF(("%s: Paging: load paging CSS to FW, sec = %d\n",
8996 	    DEVNAME(sc), sec_idx));
8997 
8998 	memcpy(sc->fw_paging_db[0].fw_paging_block.vaddr,
8999 	    image->fw_sect[sec_idx].fws_data,
9000 	    sc->fw_paging_db[0].fw_paging_size);
9001 
9002 	DPRINTF(("%s: Paging: copied %d CSS bytes to first block\n",
9003 	    DEVNAME(sc), sc->fw_paging_db[0].fw_paging_size));
9004 
9005 	sec_idx++;
9006 
9007 	/*
9008 	 * copy the paging blocks to the dram
9009 	 * loop index start from 1 since that CSS block already copied to dram
9010 	 * and CSS index is 0.
9011 	 * loop stop at num_of_paging_blk since that last block is not full.
9012 	 */
9013 	for (idx = 1; idx < sc->num_of_paging_blk; idx++) {
9014 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
9015 		    (const char *)image->fw_sect[sec_idx].fws_data + offset,
9016 		    sc->fw_paging_db[idx].fw_paging_size);
9017 
9018 		DPRINTF(("%s: Paging: copied %d paging bytes to block %d\n",
9019 		    DEVNAME(sc), sc->fw_paging_db[idx].fw_paging_size, idx));
9020 
9021 		offset += sc->fw_paging_db[idx].fw_paging_size;
9022 	}
9023 
9024 	/* copy the last paging block */
9025 	if (sc->num_of_pages_in_last_blk > 0) {
9026 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
9027 		    (const char *)image->fw_sect[sec_idx].fws_data + offset,
9028 		    IWM_FW_PAGING_SIZE * sc->num_of_pages_in_last_blk);
9029 
9030 		DPRINTF(("%s: Paging: copied %d pages in the last block %d\n",
9031 		    DEVNAME(sc), sc->num_of_pages_in_last_blk, idx));
9032 	}
9033 
9034 	return 0;
9035 }
9036 
9037 int
9038 iwm_alloc_fw_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
9039 {
9040 	int blk_idx = 0;
9041 	int error, num_of_pages;
9042 
9043 	if (sc->fw_paging_db[0].fw_paging_block.vaddr != NULL) {
9044 		int i;
9045 		/* Device got reset, and we setup firmware paging again */
9046 		bus_dmamap_sync(sc->sc_dmat,
9047 		    sc->fw_paging_db[0].fw_paging_block.map,
9048 		    0, IWM_FW_PAGING_SIZE,
9049 		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
9050 		for (i = 1; i < sc->num_of_paging_blk + 1; i++) {
9051 			bus_dmamap_sync(sc->sc_dmat,
9052 			    sc->fw_paging_db[i].fw_paging_block.map,
9053 			    0, IWM_PAGING_BLOCK_SIZE,
9054 			    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
9055 		}
9056 		return 0;
9057 	}
9058 
9059 	/* ensure IWM_BLOCK_2_EXP_SIZE is power of 2 of IWM_PAGING_BLOCK_SIZE */
9060 #if (1 << IWM_BLOCK_2_EXP_SIZE) != IWM_PAGING_BLOCK_SIZE
9061 #error IWM_BLOCK_2_EXP_SIZE must be power of 2 of IWM_PAGING_BLOCK_SIZE
9062 #endif
9063 
9064 	num_of_pages = image->paging_mem_size / IWM_FW_PAGING_SIZE;
9065 	sc->num_of_paging_blk =
9066 	    ((num_of_pages - 1) / IWM_NUM_OF_PAGE_PER_GROUP) + 1;
9067 
9068 	sc->num_of_pages_in_last_blk =
9069 		num_of_pages -
9070 		IWM_NUM_OF_PAGE_PER_GROUP * (sc->num_of_paging_blk - 1);
9071 
9072 	DPRINTF(("%s: Paging: allocating mem for %d paging blocks, each block"
9073 	    " holds 8 pages, last block holds %d pages\n", DEVNAME(sc),
9074 	    sc->num_of_paging_blk,
9075 	    sc->num_of_pages_in_last_blk));
9076 
9077 	/* allocate block of 4Kbytes for paging CSS */
9078 	error = iwm_dma_contig_alloc(sc->sc_dmat,
9079 	    &sc->fw_paging_db[blk_idx].fw_paging_block, IWM_FW_PAGING_SIZE,
9080 	    4096);
9081 	if (error) {
9082 		/* free all the previous pages since we failed */
9083 		iwm_free_fw_paging(sc);
9084 		return ENOMEM;
9085 	}
9086 
9087 	sc->fw_paging_db[blk_idx].fw_paging_size = IWM_FW_PAGING_SIZE;
9088 
9089 	DPRINTF(("%s: Paging: allocated 4K(CSS) bytes for firmware paging.\n",
9090 	    DEVNAME(sc)));
9091 
9092 	/*
9093 	 * allocate blocks in dram.
9094 	 * since that CSS allocated in fw_paging_db[0] loop start from index 1
9095 	 */
9096 	for (blk_idx = 1; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
9097 		/* allocate block of IWM_PAGING_BLOCK_SIZE (32K) */
9098 		/* XXX Use iwm_dma_contig_alloc for allocating */
9099 		error = iwm_dma_contig_alloc(sc->sc_dmat,
9100 		     &sc->fw_paging_db[blk_idx].fw_paging_block,
9101 		    IWM_PAGING_BLOCK_SIZE, 4096);
9102 		if (error) {
9103 			/* free all the previous pages since we failed */
9104 			iwm_free_fw_paging(sc);
9105 			return ENOMEM;
9106 		}
9107 
9108 		sc->fw_paging_db[blk_idx].fw_paging_size =
9109 		    IWM_PAGING_BLOCK_SIZE;
9110 
9111 		DPRINTF((
9112 		    "%s: Paging: allocated 32K bytes for firmware paging.\n",
9113 		    DEVNAME(sc)));
9114 	}
9115 
9116 	return 0;
9117 }
9118 
9119 int
9120 iwm_save_fw_paging(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
9121 {
9122 	int ret;
9123 
9124 	ret = iwm_alloc_fw_paging_mem(sc, fw);
9125 	if (ret)
9126 		return ret;
9127 
9128 	return iwm_fill_paging_mem(sc, fw);
9129 }
9130 
9131 /* send paging cmd to FW in case CPU2 has paging image */
9132 int
9133 iwm_send_paging_cmd(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
9134 {
9135 	int blk_idx;
9136 	uint32_t dev_phy_addr;
9137 	struct iwm_fw_paging_cmd fw_paging_cmd = {
9138 		.flags =
9139 			htole32(IWM_PAGING_CMD_IS_SECURED |
9140 				IWM_PAGING_CMD_IS_ENABLED |
9141 				(sc->num_of_pages_in_last_blk <<
9142 				IWM_PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
9143 		.block_size = htole32(IWM_BLOCK_2_EXP_SIZE),
9144 		.block_num = htole32(sc->num_of_paging_blk),
9145 	};
9146 
9147 	/* loop for for all paging blocks + CSS block */
9148 	for (blk_idx = 0; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
9149 		dev_phy_addr = htole32(
9150 		    sc->fw_paging_db[blk_idx].fw_paging_block.paddr >>
9151 		    IWM_PAGE_2_EXP_SIZE);
9152 		fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
9153 		bus_dmamap_sync(sc->sc_dmat,
9154 		    sc->fw_paging_db[blk_idx].fw_paging_block.map, 0,
9155 		    blk_idx == 0 ? IWM_FW_PAGING_SIZE : IWM_PAGING_BLOCK_SIZE,
9156 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
9157 	}
9158 
9159 	return iwm_send_cmd_pdu(sc, iwm_cmd_id(IWM_FW_PAGING_BLOCK_CMD,
9160 					       IWM_LONG_GROUP, 0),
9161 	    0, sizeof(fw_paging_cmd), &fw_paging_cmd);
9162 }
9163 
9164 int
9165 iwm_init_hw(struct iwm_softc *sc)
9166 {
9167 	struct ieee80211com *ic = &sc->sc_ic;
9168 	int err, i, ac, qid;
9169 
9170 	err = iwm_preinit(sc);
9171 	if (err)
9172 		return err;
9173 
9174 	err = iwm_start_hw(sc);
9175 	if (err) {
9176 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
9177 		return err;
9178 	}
9179 
9180 	err = iwm_run_init_mvm_ucode(sc, 0);
9181 	if (err)
9182 		return err;
9183 
9184 	/* Should stop and start HW since INIT image just loaded. */
9185 	iwm_stop_device(sc);
9186 	err = iwm_start_hw(sc);
9187 	if (err) {
9188 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
9189 		return err;
9190 	}
9191 
9192 	/* Restart, this time with the regular firmware */
9193 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
9194 	if (err) {
9195 		printf("%s: could not load firmware\n", DEVNAME(sc));
9196 		goto err;
9197 	}
9198 
9199 	if (!iwm_nic_lock(sc))
9200 		return EBUSY;
9201 
9202 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
9203 	if (err) {
9204 		printf("%s: could not init tx ant config (error %d)\n",
9205 		    DEVNAME(sc), err);
9206 		goto err;
9207 	}
9208 
9209 	err = iwm_send_phy_db_data(sc);
9210 	if (err) {
9211 		printf("%s: could not init phy db (error %d)\n",
9212 		    DEVNAME(sc), err);
9213 		goto err;
9214 	}
9215 
9216 	err = iwm_send_phy_cfg_cmd(sc);
9217 	if (err) {
9218 		printf("%s: could not send phy config (error %d)\n",
9219 		    DEVNAME(sc), err);
9220 		goto err;
9221 	}
9222 
9223 	err = iwm_send_bt_init_conf(sc);
9224 	if (err) {
9225 		printf("%s: could not init bt coex (error %d)\n",
9226 		    DEVNAME(sc), err);
9227 		return err;
9228 	}
9229 
9230 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)) {
9231 		err = iwm_send_dqa_cmd(sc);
9232 		if (err)
9233 			return err;
9234 	}
9235 
9236 	/* Add auxiliary station for scanning */
9237 	err = iwm_add_aux_sta(sc);
9238 	if (err) {
9239 		printf("%s: could not add aux station (error %d)\n",
9240 		    DEVNAME(sc), err);
9241 		goto err;
9242 	}
9243 
9244 	for (i = 0; i < 1; i++) {
9245 		/*
9246 		 * The channel used here isn't relevant as it's
9247 		 * going to be overwritten in the other flows.
9248 		 * For now use the first channel we have.
9249 		 */
9250 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
9251 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
9252 		    IWM_FW_CTXT_ACTION_ADD, 0);
9253 		if (err) {
9254 			printf("%s: could not add phy context %d (error %d)\n",
9255 			    DEVNAME(sc), i, err);
9256 			goto err;
9257 		}
9258 	}
9259 
9260 	/* Initialize tx backoffs to the minimum. */
9261 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
9262 		iwm_tt_tx_backoff(sc, 0);
9263 
9264 
9265 	err = iwm_config_ltr(sc);
9266 	if (err) {
9267 		printf("%s: PCIe LTR configuration failed (error %d)\n",
9268 		    DEVNAME(sc), err);
9269 	}
9270 
9271 	err = iwm_power_update_device(sc);
9272 	if (err) {
9273 		printf("%s: could not send power command (error %d)\n",
9274 		    DEVNAME(sc), err);
9275 		goto err;
9276 	}
9277 
9278 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
9279 		err = iwm_send_update_mcc_cmd(sc, "ZZ");
9280 		if (err) {
9281 			printf("%s: could not init LAR (error %d)\n",
9282 			    DEVNAME(sc), err);
9283 			goto err;
9284 		}
9285 	}
9286 
9287 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
9288 		err = iwm_config_umac_scan(sc);
9289 		if (err) {
9290 			printf("%s: could not configure scan (error %d)\n",
9291 			    DEVNAME(sc), err);
9292 			goto err;
9293 		}
9294 	}
9295 
9296 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
9297 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
9298 			qid = IWM_DQA_INJECT_MONITOR_QUEUE;
9299 		else
9300 			qid = IWM_AUX_QUEUE;
9301 		err = iwm_enable_txq(sc, IWM_MONITOR_STA_ID, qid,
9302 		    iwm_ac_to_tx_fifo[EDCA_AC_BE], 0, IWM_MAX_TID_COUNT, 0);
9303 		if (err) {
9304 			printf("%s: could not enable monitor inject Tx queue "
9305 			    "(error %d)\n", DEVNAME(sc), err);
9306 			goto err;
9307 		}
9308 	} else {
9309 		for (ac = 0; ac < EDCA_NUM_AC; ac++) {
9310 			if (isset(sc->sc_enabled_capa,
9311 			    IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
9312 				qid = ac + IWM_DQA_MIN_MGMT_QUEUE;
9313 			else
9314 				qid = ac;
9315 			err = iwm_enable_txq(sc, IWM_STATION_ID, qid,
9316 			    iwm_ac_to_tx_fifo[ac], 0, IWM_TID_NON_QOS, 0);
9317 			if (err) {
9318 				printf("%s: could not enable Tx queue %d "
9319 				    "(error %d)\n", DEVNAME(sc), ac, err);
9320 				goto err;
9321 			}
9322 		}
9323 	}
9324 
9325 	err = iwm_disable_beacon_filter(sc);
9326 	if (err) {
9327 		printf("%s: could not disable beacon filter (error %d)\n",
9328 		    DEVNAME(sc), err);
9329 		goto err;
9330 	}
9331 
9332 err:
9333 	iwm_nic_unlock(sc);
9334 	return err;
9335 }
9336 
9337 /* Allow multicast from our BSSID. */
9338 int
9339 iwm_allow_mcast(struct iwm_softc *sc)
9340 {
9341 	struct ieee80211com *ic = &sc->sc_ic;
9342 	struct ieee80211_node *ni = ic->ic_bss;
9343 	struct iwm_mcast_filter_cmd *cmd;
9344 	size_t size;
9345 	int err;
9346 
9347 	size = roundup(sizeof(*cmd), 4);
9348 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
9349 	if (cmd == NULL)
9350 		return ENOMEM;
9351 	cmd->filter_own = 1;
9352 	cmd->port_id = 0;
9353 	cmd->count = 0;
9354 	cmd->pass_all = 1;
9355 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
9356 
9357 	err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
9358 	    0, size, cmd);
9359 	free(cmd, M_DEVBUF, size);
9360 	return err;
9361 }
9362 
9363 int
9364 iwm_init(struct ifnet *ifp)
9365 {
9366 	struct iwm_softc *sc = ifp->if_softc;
9367 	struct ieee80211com *ic = &sc->sc_ic;
9368 	int err, generation;
9369 
9370 	rw_assert_wrlock(&sc->ioctl_rwl);
9371 
9372 	generation = ++sc->sc_generation;
9373 
9374 	KASSERT(sc->task_refs.refs == 0);
9375 	refcnt_init(&sc->task_refs);
9376 
9377 	err = iwm_init_hw(sc);
9378 	if (err) {
9379 		if (generation == sc->sc_generation)
9380 			iwm_stop(ifp);
9381 		return err;
9382 	}
9383 
9384 	if (sc->sc_nvm.sku_cap_11n_enable)
9385 		iwm_setup_ht_rates(sc);
9386 
9387 	ifq_clr_oactive(&ifp->if_snd);
9388 	ifp->if_flags |= IFF_RUNNING;
9389 
9390 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
9391 		ic->ic_bss->ni_chan = ic->ic_ibss_chan;
9392 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
9393 		return 0;
9394 	}
9395 
9396 	ieee80211_begin_scan(ifp);
9397 
9398 	/*
9399 	 * ieee80211_begin_scan() ends up scheduling iwm_newstate_task().
9400 	 * Wait until the transition to SCAN state has completed.
9401 	 */
9402 	do {
9403 		err = tsleep_nsec(&ic->ic_state, PCATCH, "iwminit",
9404 		    SEC_TO_NSEC(1));
9405 		if (generation != sc->sc_generation)
9406 			return ENXIO;
9407 		if (err)
9408 			return err;
9409 	} while (ic->ic_state != IEEE80211_S_SCAN);
9410 
9411 	return 0;
9412 }
9413 
9414 void
9415 iwm_start(struct ifnet *ifp)
9416 {
9417 	struct iwm_softc *sc = ifp->if_softc;
9418 	struct ieee80211com *ic = &sc->sc_ic;
9419 	struct ieee80211_node *ni;
9420 	struct ether_header *eh;
9421 	struct mbuf *m;
9422 	int ac = EDCA_AC_BE; /* XXX */
9423 
9424 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
9425 		return;
9426 
9427 	for (;;) {
9428 		/* why isn't this done per-queue? */
9429 		if (sc->qfullmsk != 0) {
9430 			ifq_set_oactive(&ifp->if_snd);
9431 			break;
9432 		}
9433 
9434 		/* Don't queue additional frames while flushing Tx queues. */
9435 		if (sc->sc_flags & IWM_FLAG_TXFLUSH)
9436 			break;
9437 
9438 		/* need to send management frames even if we're not RUNning */
9439 		m = mq_dequeue(&ic->ic_mgtq);
9440 		if (m) {
9441 			ni = m->m_pkthdr.ph_cookie;
9442 			goto sendit;
9443 		}
9444 
9445 		if (ic->ic_state != IEEE80211_S_RUN ||
9446 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
9447 			break;
9448 
9449 		m = ifq_dequeue(&ifp->if_snd);
9450 		if (!m)
9451 			break;
9452 		if (m->m_len < sizeof (*eh) &&
9453 		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
9454 			ifp->if_oerrors++;
9455 			continue;
9456 		}
9457 #if NBPFILTER > 0
9458 		if (ifp->if_bpf != NULL)
9459 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
9460 #endif
9461 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
9462 			ifp->if_oerrors++;
9463 			continue;
9464 		}
9465 
9466  sendit:
9467 #if NBPFILTER > 0
9468 		if (ic->ic_rawbpf != NULL)
9469 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
9470 #endif
9471 		if (iwm_tx(sc, m, ni, ac) != 0) {
9472 			ieee80211_release_node(ic, ni);
9473 			ifp->if_oerrors++;
9474 			continue;
9475 		}
9476 
9477 		if (ifp->if_flags & IFF_UP) {
9478 			sc->sc_tx_timer = 15;
9479 			ifp->if_timer = 1;
9480 		}
9481 	}
9482 
9483 	return;
9484 }
9485 
9486 void
9487 iwm_stop(struct ifnet *ifp)
9488 {
9489 	struct iwm_softc *sc = ifp->if_softc;
9490 	struct ieee80211com *ic = &sc->sc_ic;
9491 	struct iwm_node *in = (void *)ic->ic_bss;
9492 	int i, s = splnet();
9493 
9494 	rw_assert_wrlock(&sc->ioctl_rwl);
9495 
9496 	sc->sc_flags |= IWM_FLAG_SHUTDOWN; /* Disallow new tasks. */
9497 
9498 	/* Cancel scheduled tasks and let any stale tasks finish up. */
9499 	task_del(systq, &sc->init_task);
9500 	iwm_del_task(sc, sc->sc_nswq, &sc->newstate_task);
9501 	iwm_del_task(sc, systq, &sc->ba_task);
9502 	iwm_del_task(sc, systq, &sc->mac_ctxt_task);
9503 	KASSERT(sc->task_refs.refs >= 1);
9504 	refcnt_finalize(&sc->task_refs, "iwmstop");
9505 
9506 	iwm_stop_device(sc);
9507 
9508 	/* Reset soft state. */
9509 
9510 	sc->sc_generation++;
9511 	for (i = 0; i < nitems(sc->sc_cmd_resp_pkt); i++) {
9512 		free(sc->sc_cmd_resp_pkt[i], M_DEVBUF, sc->sc_cmd_resp_len[i]);
9513 		sc->sc_cmd_resp_pkt[i] = NULL;
9514 		sc->sc_cmd_resp_len[i] = 0;
9515 	}
9516 	ifp->if_flags &= ~IFF_RUNNING;
9517 	ifq_clr_oactive(&ifp->if_snd);
9518 
9519 	in->in_phyctxt = NULL;
9520 	in->tid_disable_ampdu = 0xffff;
9521 	in->tfd_queue_msk = 0;
9522 
9523 	sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
9524 	sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
9525 	sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
9526 	sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
9527 	sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
9528 	sc->sc_flags &= ~IWM_FLAG_HW_ERR;
9529 	sc->sc_flags &= ~IWM_FLAG_SHUTDOWN;
9530 	sc->sc_flags &= ~IWM_FLAG_TXFLUSH;
9531 
9532 	sc->sc_rx_ba_sessions = 0;
9533 	sc->ba_rx.start_tidmask = 0;
9534 	sc->ba_rx.stop_tidmask = 0;
9535 	sc->tx_ba_queue_mask = 0;
9536 	sc->ba_tx.start_tidmask = 0;
9537 	sc->ba_tx.stop_tidmask = 0;
9538 
9539 	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
9540 
9541 	timeout_del(&sc->sc_calib_to); /* XXX refcount? */
9542 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
9543 		struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
9544 		iwm_clear_reorder_buffer(sc, rxba);
9545 	}
9546 	iwm_led_blink_stop(sc);
9547 	ifp->if_timer = sc->sc_tx_timer = 0;
9548 
9549 	splx(s);
9550 }
9551 
9552 void
9553 iwm_watchdog(struct ifnet *ifp)
9554 {
9555 	struct iwm_softc *sc = ifp->if_softc;
9556 
9557 	ifp->if_timer = 0;
9558 	if (sc->sc_tx_timer > 0) {
9559 		if (--sc->sc_tx_timer == 0) {
9560 			printf("%s: device timeout\n", DEVNAME(sc));
9561 #if 1
9562 			iwm_nic_error(sc);
9563 			iwm_dump_driver_status(sc);
9564 #endif
9565 			if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
9566 				task_add(systq, &sc->init_task);
9567 			ifp->if_oerrors++;
9568 			return;
9569 		}
9570 		ifp->if_timer = 1;
9571 	}
9572 
9573 	ieee80211_watchdog(ifp);
9574 }
9575 
9576 int
9577 iwm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
9578 {
9579 	struct iwm_softc *sc = ifp->if_softc;
9580 	int s, err = 0, generation = sc->sc_generation;
9581 
9582 	/*
9583 	 * Prevent processes from entering this function while another
9584 	 * process is tsleep'ing in it.
9585 	 */
9586 	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
9587 	if (err == 0 && generation != sc->sc_generation) {
9588 		rw_exit(&sc->ioctl_rwl);
9589 		return ENXIO;
9590 	}
9591 	if (err)
9592 		return err;
9593 	s = splnet();
9594 
9595 	switch (cmd) {
9596 	case SIOCSIFADDR:
9597 		ifp->if_flags |= IFF_UP;
9598 		/* FALLTHROUGH */
9599 	case SIOCSIFFLAGS:
9600 		if (ifp->if_flags & IFF_UP) {
9601 			if (!(ifp->if_flags & IFF_RUNNING)) {
9602 				err = iwm_init(ifp);
9603 			}
9604 		} else {
9605 			if (ifp->if_flags & IFF_RUNNING)
9606 				iwm_stop(ifp);
9607 		}
9608 		break;
9609 
9610 	default:
9611 		err = ieee80211_ioctl(ifp, cmd, data);
9612 	}
9613 
9614 	if (err == ENETRESET) {
9615 		err = 0;
9616 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
9617 		    (IFF_UP | IFF_RUNNING)) {
9618 			iwm_stop(ifp);
9619 			err = iwm_init(ifp);
9620 		}
9621 	}
9622 
9623 	splx(s);
9624 	rw_exit(&sc->ioctl_rwl);
9625 
9626 	return err;
9627 }
9628 
9629 #if 1
9630 /*
9631  * Note: This structure is read from the device with IO accesses,
9632  * and the reading already does the endian conversion. As it is
9633  * read with uint32_t-sized accesses, any members with a different size
9634  * need to be ordered correctly though!
9635  */
9636 struct iwm_error_event_table {
9637 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
9638 	uint32_t error_id;		/* type of error */
9639 	uint32_t trm_hw_status0;	/* TRM HW status */
9640 	uint32_t trm_hw_status1;	/* TRM HW status */
9641 	uint32_t blink2;		/* branch link */
9642 	uint32_t ilink1;		/* interrupt link */
9643 	uint32_t ilink2;		/* interrupt link */
9644 	uint32_t data1;		/* error-specific data */
9645 	uint32_t data2;		/* error-specific data */
9646 	uint32_t data3;		/* error-specific data */
9647 	uint32_t bcon_time;		/* beacon timer */
9648 	uint32_t tsf_low;		/* network timestamp function timer */
9649 	uint32_t tsf_hi;		/* network timestamp function timer */
9650 	uint32_t gp1;		/* GP1 timer register */
9651 	uint32_t gp2;		/* GP2 timer register */
9652 	uint32_t fw_rev_type;	/* firmware revision type */
9653 	uint32_t major;		/* uCode version major */
9654 	uint32_t minor;		/* uCode version minor */
9655 	uint32_t hw_ver;		/* HW Silicon version */
9656 	uint32_t brd_ver;		/* HW board version */
9657 	uint32_t log_pc;		/* log program counter */
9658 	uint32_t frame_ptr;		/* frame pointer */
9659 	uint32_t stack_ptr;		/* stack pointer */
9660 	uint32_t hcmd;		/* last host command header */
9661 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
9662 				 * rxtx_flag */
9663 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
9664 				 * host_flag */
9665 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
9666 				 * enc_flag */
9667 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
9668 				 * time_flag */
9669 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
9670 				 * wico interrupt */
9671 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
9672 	uint32_t wait_event;		/* wait event() caller address */
9673 	uint32_t l2p_control;	/* L2pControlField */
9674 	uint32_t l2p_duration;	/* L2pDurationField */
9675 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
9676 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
9677 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
9678 				 * (LMPM_PMG_SEL) */
9679 	uint32_t u_timestamp;	/* indicate when the date and time of the
9680 				 * compilation */
9681 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
9682 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
9683 
9684 /*
9685  * UMAC error struct - relevant starting from family 8000 chip.
9686  * Note: This structure is read from the device with IO accesses,
9687  * and the reading already does the endian conversion. As it is
9688  * read with u32-sized accesses, any members with a different size
9689  * need to be ordered correctly though!
9690  */
9691 struct iwm_umac_error_event_table {
9692 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
9693 	uint32_t error_id;	/* type of error */
9694 	uint32_t blink1;	/* branch link */
9695 	uint32_t blink2;	/* branch link */
9696 	uint32_t ilink1;	/* interrupt link */
9697 	uint32_t ilink2;	/* interrupt link */
9698 	uint32_t data1;		/* error-specific data */
9699 	uint32_t data2;		/* error-specific data */
9700 	uint32_t data3;		/* error-specific data */
9701 	uint32_t umac_major;
9702 	uint32_t umac_minor;
9703 	uint32_t frame_pointer;	/* core register 27*/
9704 	uint32_t stack_pointer;	/* core register 28 */
9705 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
9706 	uint32_t nic_isr_pref;	/* ISR status register */
9707 } __packed;
9708 
9709 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
9710 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
9711 
9712 void
9713 iwm_nic_umac_error(struct iwm_softc *sc)
9714 {
9715 	struct iwm_umac_error_event_table table;
9716 	uint32_t base;
9717 
9718 	base = sc->sc_uc.uc_umac_error_event_table;
9719 
9720 	if (base < 0x800000) {
9721 		printf("%s: Invalid error log pointer 0x%08x\n",
9722 		    DEVNAME(sc), base);
9723 		return;
9724 	}
9725 
9726 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
9727 		printf("%s: reading errlog failed\n", DEVNAME(sc));
9728 		return;
9729 	}
9730 
9731 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
9732 		printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
9733 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
9734 			sc->sc_flags, table.valid);
9735 	}
9736 
9737 	printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
9738 		iwm_desc_lookup(table.error_id));
9739 	printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
9740 	printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
9741 	printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
9742 	printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
9743 	printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
9744 	printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
9745 	printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
9746 	printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
9747 	printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
9748 	printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
9749 	    table.frame_pointer);
9750 	printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
9751 	    table.stack_pointer);
9752 	printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
9753 	printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
9754 	    table.nic_isr_pref);
9755 }
9756 
9757 #define IWM_FW_SYSASSERT_CPU_MASK 0xf0000000
9758 static struct {
9759 	const char *name;
9760 	uint8_t num;
9761 } advanced_lookup[] = {
9762 	{ "NMI_INTERRUPT_WDG", 0x34 },
9763 	{ "SYSASSERT", 0x35 },
9764 	{ "UCODE_VERSION_MISMATCH", 0x37 },
9765 	{ "BAD_COMMAND", 0x38 },
9766 	{ "BAD_COMMAND", 0x39 },
9767 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
9768 	{ "FATAL_ERROR", 0x3D },
9769 	{ "NMI_TRM_HW_ERR", 0x46 },
9770 	{ "NMI_INTERRUPT_TRM", 0x4C },
9771 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
9772 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
9773 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
9774 	{ "NMI_INTERRUPT_HOST", 0x66 },
9775 	{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
9776 	{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
9777 	{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
9778 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
9779 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
9780 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
9781 	{ "ADVANCED_SYSASSERT", 0 },
9782 };
9783 
9784 const char *
9785 iwm_desc_lookup(uint32_t num)
9786 {
9787 	int i;
9788 
9789 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
9790 		if (advanced_lookup[i].num ==
9791 		    (num & ~IWM_FW_SYSASSERT_CPU_MASK))
9792 			return advanced_lookup[i].name;
9793 
9794 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
9795 	return advanced_lookup[i].name;
9796 }
9797 
9798 /*
9799  * Support for dumping the error log seemed like a good idea ...
9800  * but it's mostly hex junk and the only sensible thing is the
9801  * hw/ucode revision (which we know anyway).  Since it's here,
9802  * I'll just leave it in, just in case e.g. the Intel guys want to
9803  * help us decipher some "ADVANCED_SYSASSERT" later.
9804  */
9805 void
9806 iwm_nic_error(struct iwm_softc *sc)
9807 {
9808 	struct iwm_error_event_table table;
9809 	uint32_t base;
9810 
9811 	printf("%s: dumping device error log\n", DEVNAME(sc));
9812 	base = sc->sc_uc.uc_error_event_table;
9813 	if (base < 0x800000) {
9814 		printf("%s: Invalid error log pointer 0x%08x\n",
9815 		    DEVNAME(sc), base);
9816 		return;
9817 	}
9818 
9819 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
9820 		printf("%s: reading errlog failed\n", DEVNAME(sc));
9821 		return;
9822 	}
9823 
9824 	if (!table.valid) {
9825 		printf("%s: errlog not found, skipping\n", DEVNAME(sc));
9826 		return;
9827 	}
9828 
9829 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
9830 		printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
9831 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
9832 		    sc->sc_flags, table.valid);
9833 	}
9834 
9835 	printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
9836 	    iwm_desc_lookup(table.error_id));
9837 	printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
9838 	    table.trm_hw_status0);
9839 	printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
9840 	    table.trm_hw_status1);
9841 	printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
9842 	printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
9843 	printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
9844 	printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
9845 	printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
9846 	printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
9847 	printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
9848 	printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
9849 	printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
9850 	printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
9851 	printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
9852 	printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
9853 	    table.fw_rev_type);
9854 	printf("%s: %08X | uCode version major\n", DEVNAME(sc),
9855 	    table.major);
9856 	printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
9857 	    table.minor);
9858 	printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
9859 	printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
9860 	printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
9861 	printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
9862 	printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
9863 	printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
9864 	printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
9865 	printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
9866 	printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
9867 	printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
9868 	printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
9869 	printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
9870 	printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
9871 	printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
9872 	printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
9873 	printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
9874 	printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
9875 
9876 	if (sc->sc_uc.uc_umac_error_event_table)
9877 		iwm_nic_umac_error(sc);
9878 }
9879 
9880 void
9881 iwm_dump_driver_status(struct iwm_softc *sc)
9882 {
9883 	int i;
9884 
9885 	printf("driver status:\n");
9886 	for (i = 0; i < IWM_MAX_QUEUES; i++) {
9887 		struct iwm_tx_ring *ring = &sc->txq[i];
9888 		printf("  tx ring %2d: qid=%-2d cur=%-3d "
9889 		    "queued=%-3d\n",
9890 		    i, ring->qid, ring->cur, ring->queued);
9891 	}
9892 	printf("  rx ring: cur=%d\n", sc->rxq.cur);
9893 	printf("  802.11 state %s\n",
9894 	    ieee80211_state_name[sc->sc_ic.ic_state]);
9895 }
9896 #endif
9897 
9898 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
9899 do {									\
9900 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
9901 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
9902 	_var_ = (void *)((_pkt_)+1);					\
9903 } while (/*CONSTCOND*/0)
9904 
9905 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
9906 do {									\
9907 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
9908 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
9909 	_ptr_ = (void *)((_pkt_)+1);					\
9910 } while (/*CONSTCOND*/0)
9911 
9912 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % count);
9913 
9914 int
9915 iwm_rx_pkt_valid(struct iwm_rx_packet *pkt)
9916 {
9917 	int qid, idx, code;
9918 
9919 	qid = pkt->hdr.qid & ~0x80;
9920 	idx = pkt->hdr.idx;
9921 	code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
9922 
9923 	return (!(qid == 0 && idx == 0 && code == 0) &&
9924 	    pkt->len_n_flags != htole32(IWM_FH_RSCSR_FRAME_INVALID));
9925 }
9926 
9927 void
9928 iwm_rx_pkt(struct iwm_softc *sc, struct iwm_rx_data *data, struct mbuf_list *ml)
9929 {
9930 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
9931 	struct iwm_rx_packet *pkt, *nextpkt;
9932 	uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
9933 	struct mbuf *m0, *m;
9934 	const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
9935 	int qid, idx, code, handled = 1;
9936 
9937 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
9938 	    BUS_DMASYNC_POSTREAD);
9939 
9940 	m0 = data->m;
9941 	while (m0 && offset + minsz < IWM_RBUF_SIZE) {
9942 		pkt = (struct iwm_rx_packet *)(m0->m_data + offset);
9943 		qid = pkt->hdr.qid;
9944 		idx = pkt->hdr.idx;
9945 
9946 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
9947 
9948 		if (!iwm_rx_pkt_valid(pkt))
9949 			break;
9950 
9951 		len = sizeof(pkt->len_n_flags) + iwm_rx_packet_len(pkt);
9952 		if (len < sizeof(pkt->hdr) ||
9953 		    len > (IWM_RBUF_SIZE - offset - minsz))
9954 			break;
9955 
9956 		if (code == IWM_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
9957 			/* Take mbuf m0 off the RX ring. */
9958 			if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur)) {
9959 				ifp->if_ierrors++;
9960 				break;
9961 			}
9962 			KASSERT(data->m != m0);
9963 		}
9964 
9965 		switch (code) {
9966 		case IWM_REPLY_RX_PHY_CMD:
9967 			iwm_rx_rx_phy_cmd(sc, pkt, data);
9968 			break;
9969 
9970 		case IWM_REPLY_RX_MPDU_CMD: {
9971 			size_t maxlen = IWM_RBUF_SIZE - offset - minsz;
9972 			nextoff = offset +
9973 			    roundup(len, IWM_FH_RSCSR_FRAME_ALIGN);
9974 			nextpkt = (struct iwm_rx_packet *)
9975 			    (m0->m_data + nextoff);
9976 			if (nextoff + minsz >= IWM_RBUF_SIZE ||
9977 			    !iwm_rx_pkt_valid(nextpkt)) {
9978 				/* No need to copy last frame in buffer. */
9979 				if (offset > 0)
9980 					m_adj(m0, offset);
9981 				if (sc->sc_mqrx_supported)
9982 					iwm_rx_mpdu_mq(sc, m0, pkt->data,
9983 					    maxlen, ml);
9984 				else
9985 					iwm_rx_mpdu(sc, m0, pkt->data,
9986 					    maxlen, ml);
9987 				m0 = NULL; /* stack owns m0 now; abort loop */
9988 			} else {
9989 				/*
9990 				 * Create an mbuf which points to the current
9991 				 * packet. Always copy from offset zero to
9992 				 * preserve m_pkthdr.
9993 				 */
9994 				m = m_copym(m0, 0, M_COPYALL, M_DONTWAIT);
9995 				if (m == NULL) {
9996 					ifp->if_ierrors++;
9997 					m_freem(m0);
9998 					m0 = NULL;
9999 					break;
10000 				}
10001 				m_adj(m, offset);
10002 				if (sc->sc_mqrx_supported)
10003 					iwm_rx_mpdu_mq(sc, m, pkt->data,
10004 					    maxlen, ml);
10005 				else
10006 					iwm_rx_mpdu(sc, m, pkt->data,
10007 					    maxlen, ml);
10008 			}
10009  			break;
10010 		}
10011 
10012 		case IWM_TX_CMD:
10013 			iwm_rx_tx_cmd(sc, pkt, data);
10014 			break;
10015 
10016 		case IWM_BA_NOTIF:
10017 			iwm_rx_compressed_ba(sc, pkt, data);
10018 			break;
10019 
10020 		case IWM_MISSED_BEACONS_NOTIFICATION:
10021 			iwm_rx_bmiss(sc, pkt, data);
10022 			break;
10023 
10024 		case IWM_MFUART_LOAD_NOTIFICATION:
10025 			break;
10026 
10027 		case IWM_ALIVE: {
10028 			struct iwm_alive_resp_v1 *resp1;
10029 			struct iwm_alive_resp_v2 *resp2;
10030 			struct iwm_alive_resp_v3 *resp3;
10031 
10032 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
10033 				SYNC_RESP_STRUCT(resp1, pkt);
10034 				sc->sc_uc.uc_error_event_table
10035 				    = le32toh(resp1->error_event_table_ptr);
10036 				sc->sc_uc.uc_log_event_table
10037 				    = le32toh(resp1->log_event_table_ptr);
10038 				sc->sched_base = le32toh(resp1->scd_base_ptr);
10039 				if (resp1->status == IWM_ALIVE_STATUS_OK)
10040 					sc->sc_uc.uc_ok = 1;
10041 				else
10042 					sc->sc_uc.uc_ok = 0;
10043 			}
10044 
10045 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
10046 				SYNC_RESP_STRUCT(resp2, pkt);
10047 				sc->sc_uc.uc_error_event_table
10048 				    = le32toh(resp2->error_event_table_ptr);
10049 				sc->sc_uc.uc_log_event_table
10050 				    = le32toh(resp2->log_event_table_ptr);
10051 				sc->sched_base = le32toh(resp2->scd_base_ptr);
10052 				sc->sc_uc.uc_umac_error_event_table
10053 				    = le32toh(resp2->error_info_addr);
10054 				if (resp2->status == IWM_ALIVE_STATUS_OK)
10055 					sc->sc_uc.uc_ok = 1;
10056 				else
10057 					sc->sc_uc.uc_ok = 0;
10058 			}
10059 
10060 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
10061 				SYNC_RESP_STRUCT(resp3, pkt);
10062 				sc->sc_uc.uc_error_event_table
10063 				    = le32toh(resp3->error_event_table_ptr);
10064 				sc->sc_uc.uc_log_event_table
10065 				    = le32toh(resp3->log_event_table_ptr);
10066 				sc->sched_base = le32toh(resp3->scd_base_ptr);
10067 				sc->sc_uc.uc_umac_error_event_table
10068 				    = le32toh(resp3->error_info_addr);
10069 				if (resp3->status == IWM_ALIVE_STATUS_OK)
10070 					sc->sc_uc.uc_ok = 1;
10071 				else
10072 					sc->sc_uc.uc_ok = 0;
10073 			}
10074 
10075 			sc->sc_uc.uc_intr = 1;
10076 			wakeup(&sc->sc_uc);
10077 			break;
10078 		}
10079 
10080 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
10081 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
10082 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
10083 			iwm_phy_db_set_section(sc, phy_db_notif);
10084 			sc->sc_init_complete |= IWM_CALIB_COMPLETE;
10085 			wakeup(&sc->sc_init_complete);
10086 			break;
10087 		}
10088 
10089 		case IWM_STATISTICS_NOTIFICATION: {
10090 			struct iwm_notif_statistics *stats;
10091 			SYNC_RESP_STRUCT(stats, pkt);
10092 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
10093 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
10094 			break;
10095 		}
10096 
10097 		case IWM_MCC_CHUB_UPDATE_CMD: {
10098 			struct iwm_mcc_chub_notif *notif;
10099 			SYNC_RESP_STRUCT(notif, pkt);
10100 
10101 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
10102 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
10103 			sc->sc_fw_mcc[2] = '\0';
10104 		}
10105 
10106 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
10107 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
10108 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE):
10109 			break;
10110 
10111 		case IWM_ADD_STA_KEY:
10112 		case IWM_PHY_CONFIGURATION_CMD:
10113 		case IWM_TX_ANT_CONFIGURATION_CMD:
10114 		case IWM_ADD_STA:
10115 		case IWM_MAC_CONTEXT_CMD:
10116 		case IWM_REPLY_SF_CFG_CMD:
10117 		case IWM_POWER_TABLE_CMD:
10118 		case IWM_LTR_CONFIG:
10119 		case IWM_PHY_CONTEXT_CMD:
10120 		case IWM_BINDING_CONTEXT_CMD:
10121 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_CFG_CMD):
10122 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_REQ_UMAC):
10123 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
10124 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
10125 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
10126 		case IWM_REPLY_BEACON_FILTERING_CMD:
10127 		case IWM_MAC_PM_POWER_TABLE:
10128 		case IWM_TIME_QUOTA_CMD:
10129 		case IWM_REMOVE_STA:
10130 		case IWM_TXPATH_FLUSH:
10131 		case IWM_LQ_CMD:
10132 		case IWM_WIDE_ID(IWM_LONG_GROUP,
10133 				 IWM_FW_PAGING_BLOCK_CMD):
10134 		case IWM_BT_CONFIG:
10135 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
10136 		case IWM_NVM_ACCESS_CMD:
10137 		case IWM_MCC_UPDATE_CMD:
10138 		case IWM_TIME_EVENT_CMD: {
10139 			size_t pkt_len;
10140 
10141 			if (sc->sc_cmd_resp_pkt[idx] == NULL)
10142 				break;
10143 
10144 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
10145 			    sizeof(*pkt), BUS_DMASYNC_POSTREAD);
10146 
10147 			pkt_len = sizeof(pkt->len_n_flags) +
10148 			    iwm_rx_packet_len(pkt);
10149 
10150 			if ((pkt->hdr.flags & IWM_CMD_FAILED_MSK) ||
10151 			    pkt_len < sizeof(*pkt) ||
10152 			    pkt_len > sc->sc_cmd_resp_len[idx]) {
10153 				free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
10154 				    sc->sc_cmd_resp_len[idx]);
10155 				sc->sc_cmd_resp_pkt[idx] = NULL;
10156 				break;
10157 			}
10158 
10159 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
10160 			    pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD);
10161 			memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
10162 			break;
10163 		}
10164 
10165 		/* ignore */
10166 		case IWM_PHY_DB_CMD:
10167 			break;
10168 
10169 		case IWM_INIT_COMPLETE_NOTIF:
10170 			sc->sc_init_complete |= IWM_INIT_COMPLETE;
10171 			wakeup(&sc->sc_init_complete);
10172 			break;
10173 
10174 		case IWM_SCAN_OFFLOAD_COMPLETE: {
10175 			struct iwm_periodic_scan_complete *notif;
10176 			SYNC_RESP_STRUCT(notif, pkt);
10177 			break;
10178 		}
10179 
10180 		case IWM_SCAN_ITERATION_COMPLETE: {
10181 			struct iwm_lmac_scan_complete_notif *notif;
10182 			SYNC_RESP_STRUCT(notif, pkt);
10183 			iwm_endscan(sc);
10184 			break;
10185 		}
10186 
10187 		case IWM_SCAN_COMPLETE_UMAC: {
10188 			struct iwm_umac_scan_complete *notif;
10189 			SYNC_RESP_STRUCT(notif, pkt);
10190 			iwm_endscan(sc);
10191 			break;
10192 		}
10193 
10194 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
10195 			struct iwm_umac_scan_iter_complete_notif *notif;
10196 			SYNC_RESP_STRUCT(notif, pkt);
10197 			iwm_endscan(sc);
10198 			break;
10199 		}
10200 
10201 		case IWM_REPLY_ERROR: {
10202 			struct iwm_error_resp *resp;
10203 			SYNC_RESP_STRUCT(resp, pkt);
10204 			printf("%s: firmware error 0x%x, cmd 0x%x\n",
10205 				DEVNAME(sc), le32toh(resp->error_type),
10206 				resp->cmd_id);
10207 			break;
10208 		}
10209 
10210 		case IWM_TIME_EVENT_NOTIFICATION: {
10211 			struct iwm_time_event_notif *notif;
10212 			uint32_t action;
10213 			SYNC_RESP_STRUCT(notif, pkt);
10214 
10215 			if (sc->sc_time_event_uid != le32toh(notif->unique_id))
10216 				break;
10217 			action = le32toh(notif->action);
10218 			if (action & IWM_TE_V2_NOTIF_HOST_EVENT_END)
10219 				sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
10220 			break;
10221 		}
10222 
10223 		case IWM_WIDE_ID(IWM_SYSTEM_GROUP,
10224 		    IWM_FSEQ_VER_MISMATCH_NOTIFICATION):
10225 		    break;
10226 
10227 		/*
10228 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
10229 		 * messages. Just ignore them for now.
10230 		 */
10231 		case IWM_DEBUG_LOG_MSG:
10232 			break;
10233 
10234 		case IWM_MCAST_FILTER_CMD:
10235 			break;
10236 
10237 		case IWM_SCD_QUEUE_CFG: {
10238 			struct iwm_scd_txq_cfg_rsp *rsp;
10239 			SYNC_RESP_STRUCT(rsp, pkt);
10240 
10241 			break;
10242 		}
10243 
10244 		case IWM_WIDE_ID(IWM_DATA_PATH_GROUP, IWM_DQA_ENABLE_CMD):
10245 			break;
10246 
10247 		default:
10248 			handled = 0;
10249 			printf("%s: unhandled firmware response 0x%x/0x%x "
10250 			    "rx ring %d[%d]\n",
10251 			    DEVNAME(sc), code, pkt->len_n_flags,
10252 			    (qid & ~0x80), idx);
10253 			break;
10254 		}
10255 
10256 		/*
10257 		 * uCode sets bit 0x80 when it originates the notification,
10258 		 * i.e. when the notification is not a direct response to a
10259 		 * command sent by the driver.
10260 		 * For example, uCode issues IWM_REPLY_RX when it sends a
10261 		 * received frame to the driver.
10262 		 */
10263 		if (handled && !(qid & (1 << 7))) {
10264 			iwm_cmd_done(sc, qid, idx, code);
10265 		}
10266 
10267 		offset += roundup(len, IWM_FH_RSCSR_FRAME_ALIGN);
10268 	}
10269 
10270 	if (m0 && m0 != data->m)
10271 		m_freem(m0);
10272 }
10273 
10274 void
10275 iwm_notif_intr(struct iwm_softc *sc)
10276 {
10277 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
10278 	uint32_t wreg;
10279 	uint16_t hw;
10280 	int count;
10281 
10282 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
10283 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
10284 
10285 	if (sc->sc_mqrx_supported) {
10286 		count = IWM_RX_MQ_RING_COUNT;
10287 		wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG;
10288 	} else {
10289 		count = IWM_RX_RING_COUNT;
10290 		wreg = IWM_FH_RSCSR_CHNL0_WPTR;
10291 	}
10292 
10293 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
10294 	hw &= (count - 1);
10295 	while (sc->rxq.cur != hw) {
10296 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
10297 		iwm_rx_pkt(sc, data, &ml);
10298 		ADVANCE_RXQ(sc);
10299 	}
10300 	if_input(&sc->sc_ic.ic_if, &ml);
10301 
10302 	/*
10303 	 * Tell the firmware what we have processed.
10304 	 * Seems like the hardware gets upset unless we align the write by 8??
10305 	 */
10306 	hw = (hw == 0) ? count - 1 : hw - 1;
10307 	IWM_WRITE(sc, wreg, hw & ~7);
10308 }
10309 
10310 int
10311 iwm_intr(void *arg)
10312 {
10313 	struct iwm_softc *sc = arg;
10314 	int handled = 0;
10315 	int rv = 0;
10316 	uint32_t r1, r2;
10317 
10318 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
10319 
10320 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
10321 		uint32_t *ict = sc->ict_dma.vaddr;
10322 		int tmp;
10323 
10324 		tmp = htole32(ict[sc->ict_cur]);
10325 		if (!tmp)
10326 			goto out_ena;
10327 
10328 		/*
10329 		 * ok, there was something.  keep plowing until we have all.
10330 		 */
10331 		r1 = r2 = 0;
10332 		while (tmp) {
10333 			r1 |= tmp;
10334 			ict[sc->ict_cur] = 0;
10335 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
10336 			tmp = htole32(ict[sc->ict_cur]);
10337 		}
10338 
10339 		/* this is where the fun begins.  don't ask */
10340 		if (r1 == 0xffffffff)
10341 			r1 = 0;
10342 
10343 		/*
10344 		 * Workaround for hardware bug where bits are falsely cleared
10345 		 * when using interrupt coalescing.  Bit 15 should be set if
10346 		 * bits 18 and 19 are set.
10347 		 */
10348 		if (r1 & 0xc0000)
10349 			r1 |= 0x8000;
10350 
10351 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
10352 	} else {
10353 		r1 = IWM_READ(sc, IWM_CSR_INT);
10354 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
10355 	}
10356 	if (r1 == 0 && r2 == 0) {
10357 		goto out_ena;
10358 	}
10359 	if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
10360 		goto out;
10361 
10362 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
10363 
10364 	/* ignored */
10365 	handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
10366 
10367 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
10368 		handled |= IWM_CSR_INT_BIT_RF_KILL;
10369 		iwm_check_rfkill(sc);
10370 		task_add(systq, &sc->init_task);
10371 		rv = 1;
10372 		goto out_ena;
10373 	}
10374 
10375 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
10376 #if 1
10377 		iwm_nic_error(sc);
10378 		iwm_dump_driver_status(sc);
10379 #endif
10380 
10381 		printf("%s: fatal firmware error\n", DEVNAME(sc));
10382 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
10383 			task_add(systq, &sc->init_task);
10384 		rv = 1;
10385 		goto out;
10386 
10387 	}
10388 
10389 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
10390 		handled |= IWM_CSR_INT_BIT_HW_ERR;
10391 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
10392 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
10393 			sc->sc_flags |= IWM_FLAG_HW_ERR;
10394 			task_add(systq, &sc->init_task);
10395 		}
10396 		rv = 1;
10397 		goto out;
10398 	}
10399 
10400 	/* firmware chunk loaded */
10401 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
10402 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
10403 		handled |= IWM_CSR_INT_BIT_FH_TX;
10404 
10405 		sc->sc_fw_chunk_done = 1;
10406 		wakeup(&sc->sc_fw);
10407 	}
10408 
10409 	if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX |
10410 	    IWM_CSR_INT_BIT_RX_PERIODIC)) {
10411 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) {
10412 			handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
10413 			IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
10414 		}
10415 		if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
10416 			handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
10417 			IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
10418 		}
10419 
10420 		/* Disable periodic interrupt; we use it as just a one-shot. */
10421 		IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
10422 
10423 		/*
10424 		 * Enable periodic interrupt in 8 msec only if we received
10425 		 * real RX interrupt (instead of just periodic int), to catch
10426 		 * any dangling Rx interrupt.  If it was just the periodic
10427 		 * interrupt, there was no dangling Rx activity, and no need
10428 		 * to extend the periodic interrupt; one-shot is enough.
10429 		 */
10430 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX))
10431 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
10432 			    IWM_CSR_INT_PERIODIC_ENA);
10433 
10434 		iwm_notif_intr(sc);
10435 	}
10436 
10437 	rv = 1;
10438 
10439  out_ena:
10440 	iwm_restore_interrupts(sc);
10441  out:
10442 	return rv;
10443 }
10444 
10445 int
10446 iwm_intr_msix(void *arg)
10447 {
10448 	struct iwm_softc *sc = arg;
10449 	uint32_t inta_fh, inta_hw;
10450 	int vector = 0;
10451 
10452 	inta_fh = IWM_READ(sc, IWM_CSR_MSIX_FH_INT_CAUSES_AD);
10453 	inta_hw = IWM_READ(sc, IWM_CSR_MSIX_HW_INT_CAUSES_AD);
10454 	IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
10455 	IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
10456 	inta_fh &= sc->sc_fh_mask;
10457 	inta_hw &= sc->sc_hw_mask;
10458 
10459 	if (inta_fh & IWM_MSIX_FH_INT_CAUSES_Q0 ||
10460 	    inta_fh & IWM_MSIX_FH_INT_CAUSES_Q1) {
10461 		iwm_notif_intr(sc);
10462 	}
10463 
10464 	/* firmware chunk loaded */
10465 	if (inta_fh & IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
10466 		sc->sc_fw_chunk_done = 1;
10467 		wakeup(&sc->sc_fw);
10468 	}
10469 
10470 	if ((inta_fh & IWM_MSIX_FH_INT_CAUSES_FH_ERR) ||
10471 	    (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
10472 	    (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
10473 #if 1
10474 		iwm_nic_error(sc);
10475 		iwm_dump_driver_status(sc);
10476 #endif
10477 
10478 		printf("%s: fatal firmware error\n", DEVNAME(sc));
10479 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
10480 			task_add(systq, &sc->init_task);
10481 		return 1;
10482 	}
10483 
10484 	if (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
10485 		iwm_check_rfkill(sc);
10486 		task_add(systq, &sc->init_task);
10487 	}
10488 
10489 	if (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
10490 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
10491 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
10492 			sc->sc_flags |= IWM_FLAG_HW_ERR;
10493 			task_add(systq, &sc->init_task);
10494 		}
10495 		return 1;
10496 	}
10497 
10498 	/*
10499 	 * Before sending the interrupt the HW disables it to prevent
10500 	 * a nested interrupt. This is done by writing 1 to the corresponding
10501 	 * bit in the mask register. After handling the interrupt, it should be
10502 	 * re-enabled by clearing this bit. This register is defined as
10503 	 * write 1 clear (W1C) register, meaning that it's being clear
10504 	 * by writing 1 to the bit.
10505 	 */
10506 	IWM_WRITE(sc, IWM_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
10507 	return 1;
10508 }
10509 
10510 typedef void *iwm_match_t;
10511 
10512 static const struct pci_matchid iwm_devices[] = {
10513 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_1 },
10514 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_2 },
10515 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3165_1 },
10516 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3165_2 },
10517 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3168_1 },
10518 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_1 },
10519 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_2 },
10520 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_1 },
10521 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_2 },
10522 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_1 },
10523 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_2 },
10524 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8265_1 },
10525 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9260_1 },
10526 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_1 },
10527 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_2 },
10528 };
10529 
10530 int
10531 iwm_match(struct device *parent, iwm_match_t match __unused, void *aux)
10532 {
10533 	return pci_matchbyid((struct pci_attach_args *)aux, iwm_devices,
10534 	    nitems(iwm_devices));
10535 }
10536 
10537 int
10538 iwm_preinit(struct iwm_softc *sc)
10539 {
10540 	struct ieee80211com *ic = &sc->sc_ic;
10541 	struct ifnet *ifp = IC2IFP(ic);
10542 	int err;
10543 	static int attached;
10544 
10545 	err = iwm_prepare_card_hw(sc);
10546 	if (err) {
10547 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
10548 		return err;
10549 	}
10550 
10551 	if (attached) {
10552 		/* Update MAC in case the upper layers changed it. */
10553 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
10554 		    ((struct arpcom *)ifp)->ac_enaddr);
10555 		return 0;
10556 	}
10557 
10558 	err = iwm_start_hw(sc);
10559 	if (err) {
10560 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
10561 		return err;
10562 	}
10563 
10564 	err = iwm_run_init_mvm_ucode(sc, 1);
10565 	iwm_stop_device(sc);
10566 	if (err)
10567 		return err;
10568 
10569 	/* Print version info and MAC address on first successful fw load. */
10570 	attached = 1;
10571 	printf("%s: hw rev 0x%x, fw ver %s, address %s\n",
10572 	    DEVNAME(sc), sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
10573 	    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
10574 
10575 	if (sc->sc_nvm.sku_cap_11n_enable)
10576 		iwm_setup_ht_rates(sc);
10577 
10578 	/* not all hardware can do 5GHz band */
10579 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
10580 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
10581 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
10582 
10583 	/* Configure channel information obtained from firmware. */
10584 	ieee80211_channel_init(ifp);
10585 
10586 	/* Configure MAC address. */
10587 	err = if_setlladdr(ifp, ic->ic_myaddr);
10588 	if (err)
10589 		printf("%s: could not set MAC address (error %d)\n",
10590 		    DEVNAME(sc), err);
10591 
10592 	ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
10593 
10594 	return 0;
10595 }
10596 
10597 void
10598 iwm_attach_hook(struct device *self)
10599 {
10600 	struct iwm_softc *sc = (void *)self;
10601 
10602 	KASSERT(!cold);
10603 
10604 	iwm_preinit(sc);
10605 }
10606 
10607 void
10608 iwm_attach(struct device *parent, struct device *self, void *aux)
10609 {
10610 	struct iwm_softc *sc = (void *)self;
10611 	struct pci_attach_args *pa = aux;
10612 	pci_intr_handle_t ih;
10613 	pcireg_t reg, memtype;
10614 	struct ieee80211com *ic = &sc->sc_ic;
10615 	struct ifnet *ifp = &ic->ic_if;
10616 	const char *intrstr;
10617 	int err;
10618 	int txq_i, i, j;
10619 
10620 	sc->sc_pct = pa->pa_pc;
10621 	sc->sc_pcitag = pa->pa_tag;
10622 	sc->sc_dmat = pa->pa_dmat;
10623 
10624 	rw_init(&sc->ioctl_rwl, "iwmioctl");
10625 
10626 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
10627 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
10628 	if (err == 0) {
10629 		printf("%s: PCIe capability structure not found!\n",
10630 		    DEVNAME(sc));
10631 		return;
10632 	}
10633 
10634 	/* Clear device-specific "PCI retry timeout" register (41h). */
10635 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
10636 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
10637 
10638 	/* Enable bus-mastering and hardware bug workaround. */
10639 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
10640 	reg |= PCI_COMMAND_MASTER_ENABLE;
10641 	/* if !MSI */
10642 	if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
10643 		reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
10644 	}
10645 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
10646 
10647 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
10648 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
10649 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
10650 	if (err) {
10651 		printf("%s: can't map mem space\n", DEVNAME(sc));
10652 		return;
10653 	}
10654 
10655 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
10656 		sc->sc_msix = 1;
10657 	} else if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
10658 		printf("%s: can't map interrupt\n", DEVNAME(sc));
10659 		return;
10660 	}
10661 
10662 	intrstr = pci_intr_string(sc->sc_pct, ih);
10663 	if (sc->sc_msix)
10664 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
10665 		    iwm_intr_msix, sc, DEVNAME(sc));
10666 	else
10667 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
10668 		    iwm_intr, sc, DEVNAME(sc));
10669 
10670 	if (sc->sc_ih == NULL) {
10671 		printf("\n");
10672 		printf("%s: can't establish interrupt", DEVNAME(sc));
10673 		if (intrstr != NULL)
10674 			printf(" at %s", intrstr);
10675 		printf("\n");
10676 		return;
10677 	}
10678 	printf(", %s\n", intrstr);
10679 
10680 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
10681 	switch (PCI_PRODUCT(pa->pa_id)) {
10682 	case PCI_PRODUCT_INTEL_WL_3160_1:
10683 	case PCI_PRODUCT_INTEL_WL_3160_2:
10684 		sc->sc_fwname = "iwm-3160-17";
10685 		sc->host_interrupt_operation_mode = 1;
10686 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
10687 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
10688 		sc->sc_nvm_max_section_size = 16384;
10689 		sc->nvm_type = IWM_NVM;
10690 		break;
10691 	case PCI_PRODUCT_INTEL_WL_3165_1:
10692 	case PCI_PRODUCT_INTEL_WL_3165_2:
10693 		sc->sc_fwname = "iwm-7265-17";
10694 		sc->host_interrupt_operation_mode = 0;
10695 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
10696 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
10697 		sc->sc_nvm_max_section_size = 16384;
10698 		sc->nvm_type = IWM_NVM;
10699 		break;
10700 	case PCI_PRODUCT_INTEL_WL_3168_1:
10701 		sc->sc_fwname = "iwm-3168-29";
10702 		sc->host_interrupt_operation_mode = 0;
10703 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
10704 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
10705 		sc->sc_nvm_max_section_size = 16384;
10706 		sc->nvm_type = IWM_NVM_SDP;
10707 		break;
10708 	case PCI_PRODUCT_INTEL_WL_7260_1:
10709 	case PCI_PRODUCT_INTEL_WL_7260_2:
10710 		sc->sc_fwname = "iwm-7260-17";
10711 		sc->host_interrupt_operation_mode = 1;
10712 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
10713 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
10714 		sc->sc_nvm_max_section_size = 16384;
10715 		sc->nvm_type = IWM_NVM;
10716 		break;
10717 	case PCI_PRODUCT_INTEL_WL_7265_1:
10718 	case PCI_PRODUCT_INTEL_WL_7265_2:
10719 		sc->sc_fwname = "iwm-7265-17";
10720 		sc->host_interrupt_operation_mode = 0;
10721 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
10722 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
10723 		sc->sc_nvm_max_section_size = 16384;
10724 		sc->nvm_type = IWM_NVM;
10725 		break;
10726 	case PCI_PRODUCT_INTEL_WL_8260_1:
10727 	case PCI_PRODUCT_INTEL_WL_8260_2:
10728 		sc->sc_fwname = "iwm-8000C-34";
10729 		sc->host_interrupt_operation_mode = 0;
10730 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
10731 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
10732 		sc->sc_nvm_max_section_size = 32768;
10733 		sc->nvm_type = IWM_NVM_EXT;
10734 		break;
10735 	case PCI_PRODUCT_INTEL_WL_8265_1:
10736 		sc->sc_fwname = "iwm-8265-34";
10737 		sc->host_interrupt_operation_mode = 0;
10738 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
10739 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
10740 		sc->sc_nvm_max_section_size = 32768;
10741 		sc->nvm_type = IWM_NVM_EXT;
10742 		break;
10743 	case PCI_PRODUCT_INTEL_WL_9260_1:
10744 		sc->sc_fwname = "iwm-9260-34";
10745 		sc->host_interrupt_operation_mode = 0;
10746 		sc->sc_device_family = IWM_DEVICE_FAMILY_9000;
10747 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
10748 		sc->sc_nvm_max_section_size = 32768;
10749 		sc->sc_mqrx_supported = 1;
10750 		break;
10751 	case PCI_PRODUCT_INTEL_WL_9560_1:
10752 	case PCI_PRODUCT_INTEL_WL_9560_2:
10753 		sc->sc_fwname = "iwm-9000-34";
10754 		sc->host_interrupt_operation_mode = 0;
10755 		sc->sc_device_family = IWM_DEVICE_FAMILY_9000;
10756 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
10757 		sc->sc_nvm_max_section_size = 32768;
10758 		sc->sc_mqrx_supported = 1;
10759 		sc->sc_integrated = 1;
10760 		break;
10761 	default:
10762 		printf("%s: unknown adapter type\n", DEVNAME(sc));
10763 		return;
10764 	}
10765 
10766 	/*
10767 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
10768 	 * changed, and now the revision step also includes bit 0-1 (no more
10769 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
10770 	 * in the old format.
10771 	 */
10772 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
10773 		uint32_t hw_step;
10774 
10775 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
10776 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
10777 
10778 		if (iwm_prepare_card_hw(sc) != 0) {
10779 			printf("%s: could not initialize hardware\n",
10780 			    DEVNAME(sc));
10781 			return;
10782 		}
10783 
10784 		/*
10785 		 * In order to recognize C step the driver should read the
10786 		 * chip version id located at the AUX bus MISC address.
10787 		 */
10788 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
10789 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
10790 		DELAY(2);
10791 
10792 		err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
10793 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
10794 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
10795 				   25000);
10796 		if (!err) {
10797 			printf("%s: Failed to wake up the nic\n", DEVNAME(sc));
10798 			return;
10799 		}
10800 
10801 		if (iwm_nic_lock(sc)) {
10802 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
10803 			hw_step |= IWM_ENABLE_WFPM;
10804 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
10805 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
10806 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
10807 			if (hw_step == 0x3)
10808 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
10809 						(IWM_SILICON_C_STEP << 2);
10810 			iwm_nic_unlock(sc);
10811 		} else {
10812 			printf("%s: Failed to lock the nic\n", DEVNAME(sc));
10813 			return;
10814 		}
10815 	}
10816 
10817 	/*
10818 	 * Allocate DMA memory for firmware transfers.
10819 	 * Must be aligned on a 16-byte boundary.
10820 	 */
10821 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
10822 	    sc->sc_fwdmasegsz, 16);
10823 	if (err) {
10824 		printf("%s: could not allocate memory for firmware\n",
10825 		    DEVNAME(sc));
10826 		return;
10827 	}
10828 
10829 	/* Allocate "Keep Warm" page, used internally by the card. */
10830 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
10831 	if (err) {
10832 		printf("%s: could not allocate keep warm page\n", DEVNAME(sc));
10833 		goto fail1;
10834 	}
10835 
10836 	/* Allocate interrupt cause table (ICT).*/
10837 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
10838 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
10839 	if (err) {
10840 		printf("%s: could not allocate ICT table\n", DEVNAME(sc));
10841 		goto fail2;
10842 	}
10843 
10844 	/* TX scheduler rings must be aligned on a 1KB boundary. */
10845 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
10846 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
10847 	if (err) {
10848 		printf("%s: could not allocate TX scheduler rings\n",
10849 		    DEVNAME(sc));
10850 		goto fail3;
10851 	}
10852 
10853 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
10854 		err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
10855 		if (err) {
10856 			printf("%s: could not allocate TX ring %d\n",
10857 			    DEVNAME(sc), txq_i);
10858 			goto fail4;
10859 		}
10860 	}
10861 
10862 	err = iwm_alloc_rx_ring(sc, &sc->rxq);
10863 	if (err) {
10864 		printf("%s: could not allocate RX ring\n", DEVNAME(sc));
10865 		goto fail4;
10866 	}
10867 
10868 	sc->sc_nswq = taskq_create("iwmns", 1, IPL_NET, 0);
10869 	if (sc->sc_nswq == NULL)
10870 		goto fail4;
10871 
10872 	/* Clear pending interrupts. */
10873 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
10874 
10875 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
10876 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
10877 	ic->ic_state = IEEE80211_S_INIT;
10878 
10879 	/* Set device capabilities. */
10880 	ic->ic_caps =
10881 	    IEEE80211_C_QOS | IEEE80211_C_TX_AMPDU | /* A-MPDU */
10882 	    IEEE80211_C_WEP |		/* WEP */
10883 	    IEEE80211_C_RSN |		/* WPA/RSN */
10884 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
10885 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
10886 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
10887 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
10888 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
10889 
10890 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
10891 	ic->ic_htcaps |=
10892 	    (IEEE80211_HTCAP_SMPS_DIS << IEEE80211_HTCAP_SMPS_SHIFT);
10893 	ic->ic_htxcaps = 0;
10894 	ic->ic_txbfcaps = 0;
10895 	ic->ic_aselcaps = 0;
10896 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
10897 
10898 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
10899 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
10900 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
10901 
10902 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
10903 		sc->sc_phyctxt[i].id = i;
10904 	}
10905 
10906 	sc->sc_amrr.amrr_min_success_threshold =  1;
10907 	sc->sc_amrr.amrr_max_success_threshold = 15;
10908 
10909 	/* IBSS channel undefined for now. */
10910 	ic->ic_ibss_chan = &ic->ic_channels[1];
10911 
10912 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
10913 
10914 	ifp->if_softc = sc;
10915 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
10916 	ifp->if_ioctl = iwm_ioctl;
10917 	ifp->if_start = iwm_start;
10918 	ifp->if_watchdog = iwm_watchdog;
10919 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
10920 
10921 	if_attach(ifp);
10922 	ieee80211_ifattach(ifp);
10923 	ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
10924 
10925 #if NBPFILTER > 0
10926 	iwm_radiotap_attach(sc);
10927 #endif
10928 	timeout_set(&sc->sc_calib_to, iwm_calib_timeout, sc);
10929 	timeout_set(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
10930 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
10931 		struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
10932 		rxba->baid = IWM_RX_REORDER_DATA_INVALID_BAID;
10933 		rxba->sc = sc;
10934 		timeout_set(&rxba->session_timer, iwm_rx_ba_session_expired,
10935 		    rxba);
10936 		timeout_set(&rxba->reorder_buf.reorder_timer,
10937 		    iwm_reorder_timer_expired, &rxba->reorder_buf);
10938 		for (j = 0; j < nitems(rxba->entries); j++)
10939 			ml_init(&rxba->entries[j].frames);
10940 	}
10941 	task_set(&sc->init_task, iwm_init_task, sc);
10942 	task_set(&sc->newstate_task, iwm_newstate_task, sc);
10943 	task_set(&sc->ba_task, iwm_ba_task, sc);
10944 	task_set(&sc->mac_ctxt_task, iwm_mac_ctxt_task, sc);
10945 
10946 	ic->ic_node_alloc = iwm_node_alloc;
10947 	ic->ic_bgscan_start = iwm_bgscan;
10948 	ic->ic_set_key = iwm_set_key;
10949 	ic->ic_delete_key = iwm_delete_key;
10950 
10951 	/* Override 802.11 state transition machine. */
10952 	sc->sc_newstate = ic->ic_newstate;
10953 	ic->ic_newstate = iwm_newstate;
10954 	ic->ic_updateprot = iwm_updateprot;
10955 	ic->ic_updateslot = iwm_updateslot;
10956 	ic->ic_updateedca = iwm_updateedca;
10957 	ic->ic_ampdu_rx_start = iwm_ampdu_rx_start;
10958 	ic->ic_ampdu_rx_stop = iwm_ampdu_rx_stop;
10959 	ic->ic_ampdu_tx_start = iwm_ampdu_tx_start;
10960 	ic->ic_ampdu_tx_stop = iwm_ampdu_tx_stop;
10961 	/*
10962 	 * We cannot read the MAC address without loading the
10963 	 * firmware from disk. Postpone until mountroot is done.
10964 	 */
10965 	config_mountroot(self, iwm_attach_hook);
10966 
10967 	return;
10968 
10969 fail4:	while (--txq_i >= 0)
10970 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
10971 	iwm_free_rx_ring(sc, &sc->rxq);
10972 	iwm_dma_contig_free(&sc->sched_dma);
10973 fail3:	if (sc->ict_dma.vaddr != NULL)
10974 		iwm_dma_contig_free(&sc->ict_dma);
10975 
10976 fail2:	iwm_dma_contig_free(&sc->kw_dma);
10977 fail1:	iwm_dma_contig_free(&sc->fw_dma);
10978 	return;
10979 }
10980 
10981 #if NBPFILTER > 0
10982 void
10983 iwm_radiotap_attach(struct iwm_softc *sc)
10984 {
10985 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
10986 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
10987 
10988 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
10989 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
10990 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
10991 
10992 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
10993 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
10994 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
10995 }
10996 #endif
10997 
10998 void
10999 iwm_init_task(void *arg1)
11000 {
11001 	struct iwm_softc *sc = arg1;
11002 	struct ifnet *ifp = &sc->sc_ic.ic_if;
11003 	int s = splnet();
11004 	int generation = sc->sc_generation;
11005 	int fatal = (sc->sc_flags & (IWM_FLAG_HW_ERR | IWM_FLAG_RFKILL));
11006 
11007 	rw_enter_write(&sc->ioctl_rwl);
11008 	if (generation != sc->sc_generation) {
11009 		rw_exit(&sc->ioctl_rwl);
11010 		splx(s);
11011 		return;
11012 	}
11013 
11014 	if (ifp->if_flags & IFF_RUNNING)
11015 		iwm_stop(ifp);
11016 	else
11017 		sc->sc_flags &= ~IWM_FLAG_HW_ERR;
11018 
11019 	if (!fatal && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
11020 		iwm_init(ifp);
11021 
11022 	rw_exit(&sc->ioctl_rwl);
11023 	splx(s);
11024 }
11025 
11026 int
11027 iwm_resume(struct iwm_softc *sc)
11028 {
11029 	pcireg_t reg;
11030 
11031 	/* Clear device-specific "PCI retry timeout" register (41h). */
11032 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
11033 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
11034 
11035 	/* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */
11036 	iwm_conf_msix_hw(sc, 0);
11037 
11038 	iwm_enable_rfkill_int(sc);
11039 	iwm_check_rfkill(sc);
11040 
11041 	return iwm_prepare_card_hw(sc);
11042 }
11043 
11044 int
11045 iwm_activate(struct device *self, int act)
11046 {
11047 	struct iwm_softc *sc = (struct iwm_softc *)self;
11048 	struct ifnet *ifp = &sc->sc_ic.ic_if;
11049 	int err = 0;
11050 
11051 	switch (act) {
11052 	case DVACT_QUIESCE:
11053 		if (ifp->if_flags & IFF_RUNNING) {
11054 			rw_enter_write(&sc->ioctl_rwl);
11055 			iwm_stop(ifp);
11056 			rw_exit(&sc->ioctl_rwl);
11057 		}
11058 		break;
11059 	case DVACT_RESUME:
11060 		err = iwm_resume(sc);
11061 		if (err)
11062 			printf("%s: could not initialize hardware\n",
11063 			    DEVNAME(sc));
11064 		break;
11065 	case DVACT_WAKEUP:
11066 		/* Hardware should be up at this point. */
11067 		if (iwm_set_hw_ready(sc))
11068 			task_add(systq, &sc->init_task);
11069 		break;
11070 	}
11071 
11072 	return 0;
11073 }
11074 
11075 struct cfdriver iwm_cd = {
11076 	NULL, "iwm", DV_IFNET
11077 };
11078 
11079 struct cfattach iwm_ca = {
11080 	sizeof(struct iwm_softc), iwm_match, iwm_attach,
11081 	NULL, iwm_activate
11082 };
11083