xref: /openbsd-src/sys/dev/pci/if_iwx.c (revision fc405d53b73a2d73393cb97f684863d17b583e38)
1 /*	$OpenBSD: if_iwx.c,v 1.171 2023/05/11 16:55:46 stsp Exp $	*/
2 
3 /*
4  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5  *   Author: Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2014 Fixup Software Ltd.
7  * Copyright (c) 2017, 2019, 2020 Stefan Sperling <stsp@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ******************************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2017 Intel Deutschland GmbH
34  * Copyright(c) 2018 - 2019 Intel Corporation
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * BSD LICENSE
46  *
47  * Copyright(c) 2017 Intel Deutschland GmbH
48  * Copyright(c) 2018 - 2019 Intel Corporation
49  * All rights reserved.
50  *
51  * Redistribution and use in source and binary forms, with or without
52  * modification, are permitted provided that the following conditions
53  * are met:
54  *
55  *  * Redistributions of source code must retain the above copyright
56  *    notice, this list of conditions and the following disclaimer.
57  *  * Redistributions in binary form must reproduce the above copyright
58  *    notice, this list of conditions and the following disclaimer in
59  *    the documentation and/or other materials provided with the
60  *    distribution.
61  *  * Neither the name Intel Corporation nor the names of its
62  *    contributors may be used to endorse or promote products derived
63  *    from this software without specific prior written permission.
64  *
65  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
66  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
67  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
68  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
69  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
70  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
71  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
72  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
73  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
74  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
75  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
76  *
77  *****************************************************************************
78  */
79 
80 /*-
81  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
82  *
83  * Permission to use, copy, modify, and distribute this software for any
84  * purpose with or without fee is hereby granted, provided that the above
85  * copyright notice and this permission notice appear in all copies.
86  *
87  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
88  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
89  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
90  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
91  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
92  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
93  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
94  */
95 
96 #include "bpfilter.h"
97 
98 #include <sys/param.h>
99 #include <sys/conf.h>
100 #include <sys/kernel.h>
101 #include <sys/malloc.h>
102 #include <sys/mbuf.h>
103 #include <sys/mutex.h>
104 #include <sys/proc.h>
105 #include <sys/rwlock.h>
106 #include <sys/socket.h>
107 #include <sys/sockio.h>
108 #include <sys/systm.h>
109 #include <sys/endian.h>
110 
111 #include <sys/refcnt.h>
112 #include <sys/task.h>
113 #include <machine/bus.h>
114 #include <machine/intr.h>
115 
116 #include <dev/pci/pcireg.h>
117 #include <dev/pci/pcivar.h>
118 #include <dev/pci/pcidevs.h>
119 
120 #if NBPFILTER > 0
121 #include <net/bpf.h>
122 #endif
123 #include <net/if.h>
124 #include <net/if_dl.h>
125 #include <net/if_media.h>
126 
127 #include <netinet/in.h>
128 #include <netinet/if_ether.h>
129 
130 #include <net80211/ieee80211_var.h>
131 #include <net80211/ieee80211_radiotap.h>
132 #include <net80211/ieee80211_priv.h> /* for SEQ_LT */
133 #undef DPRINTF /* defined in ieee80211_priv.h */
134 
135 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
136 
137 #define IC2IFP(_ic_) (&(_ic_)->ic_if)
138 
139 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
140 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
141 
142 #ifdef IWX_DEBUG
143 #define DPRINTF(x)	do { if (iwx_debug > 0) printf x; } while (0)
144 #define DPRINTFN(n, x)	do { if (iwx_debug >= (n)) printf x; } while (0)
145 int iwx_debug = 1;
146 #else
147 #define DPRINTF(x)	do { ; } while (0)
148 #define DPRINTFN(n, x)	do { ; } while (0)
149 #endif
150 
151 #include <dev/pci/if_iwxreg.h>
152 #include <dev/pci/if_iwxvar.h>
153 
154 const uint8_t iwx_nvm_channels_8000[] = {
155 	/* 2.4 GHz */
156 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
157 	/* 5 GHz */
158 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
159 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
160 	149, 153, 157, 161, 165, 169, 173, 177, 181
161 };
162 
163 static const uint8_t iwx_nvm_channels_uhb[] = {
164 	/* 2.4 GHz */
165 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
166 	/* 5 GHz */
167 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
168 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
169 	149, 153, 157, 161, 165, 169, 173, 177, 181,
170 	/* 6-7 GHz */
171 	1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69,
172 	73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129,
173 	133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185,
174 	189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233
175 };
176 
177 #define IWX_NUM_2GHZ_CHANNELS	14
178 #define IWX_NUM_5GHZ_CHANNELS	37
179 
180 const struct iwx_rate {
181 	uint16_t rate;
182 	uint8_t plcp;
183 	uint8_t ht_plcp;
184 } iwx_rates[] = {
185 		/* Legacy */		/* HT */
186 	{   2,	IWX_RATE_1M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
187 	{   4,	IWX_RATE_2M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP },
188 	{  11,	IWX_RATE_5M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
189 	{  22,	IWX_RATE_11M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP },
190 	{  12,	IWX_RATE_6M_PLCP,	IWX_RATE_HT_SISO_MCS_0_PLCP },
191 	{  18,	IWX_RATE_9M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
192 	{  24,	IWX_RATE_12M_PLCP,	IWX_RATE_HT_SISO_MCS_1_PLCP },
193 	{  26,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_8_PLCP },
194 	{  36,	IWX_RATE_18M_PLCP,	IWX_RATE_HT_SISO_MCS_2_PLCP },
195 	{  48,	IWX_RATE_24M_PLCP,	IWX_RATE_HT_SISO_MCS_3_PLCP },
196 	{  52,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_9_PLCP },
197 	{  72,	IWX_RATE_36M_PLCP,	IWX_RATE_HT_SISO_MCS_4_PLCP },
198 	{  78,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_10_PLCP },
199 	{  96,	IWX_RATE_48M_PLCP,	IWX_RATE_HT_SISO_MCS_5_PLCP },
200 	{ 104,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_11_PLCP },
201 	{ 108,	IWX_RATE_54M_PLCP,	IWX_RATE_HT_SISO_MCS_6_PLCP },
202 	{ 128,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_SISO_MCS_7_PLCP },
203 	{ 156,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_12_PLCP },
204 	{ 208,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_13_PLCP },
205 	{ 234,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_14_PLCP },
206 	{ 260,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_15_PLCP },
207 };
208 #define IWX_RIDX_CCK	0
209 #define IWX_RIDX_OFDM	4
210 #define IWX_RIDX_MAX	(nitems(iwx_rates)-1)
211 #define IWX_RIDX_IS_CCK(_i_) ((_i_) < IWX_RIDX_OFDM)
212 #define IWX_RIDX_IS_OFDM(_i_) ((_i_) >= IWX_RIDX_OFDM)
213 #define IWX_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
214 
215 /* Convert an MCS index into an iwx_rates[] index. */
216 const int iwx_mcs2ridx[] = {
217 	IWX_RATE_MCS_0_INDEX,
218 	IWX_RATE_MCS_1_INDEX,
219 	IWX_RATE_MCS_2_INDEX,
220 	IWX_RATE_MCS_3_INDEX,
221 	IWX_RATE_MCS_4_INDEX,
222 	IWX_RATE_MCS_5_INDEX,
223 	IWX_RATE_MCS_6_INDEX,
224 	IWX_RATE_MCS_7_INDEX,
225 	IWX_RATE_MCS_8_INDEX,
226 	IWX_RATE_MCS_9_INDEX,
227 	IWX_RATE_MCS_10_INDEX,
228 	IWX_RATE_MCS_11_INDEX,
229 	IWX_RATE_MCS_12_INDEX,
230 	IWX_RATE_MCS_13_INDEX,
231 	IWX_RATE_MCS_14_INDEX,
232 	IWX_RATE_MCS_15_INDEX,
233 };
234 
235 uint8_t	iwx_lookup_cmd_ver(struct iwx_softc *, uint8_t, uint8_t);
236 uint8_t	iwx_lookup_notif_ver(struct iwx_softc *, uint8_t, uint8_t);
237 int	iwx_is_mimo_ht_plcp(uint8_t);
238 int	iwx_store_cscheme(struct iwx_softc *, uint8_t *, size_t);
239 int	iwx_alloc_fw_monitor_block(struct iwx_softc *, uint8_t, uint8_t);
240 int	iwx_alloc_fw_monitor(struct iwx_softc *, uint8_t);
241 int	iwx_apply_debug_destination(struct iwx_softc *);
242 void	iwx_set_ltr(struct iwx_softc *);
243 int	iwx_ctxt_info_init(struct iwx_softc *, const struct iwx_fw_sects *);
244 int	iwx_ctxt_info_gen3_init(struct iwx_softc *,
245 	    const struct iwx_fw_sects *);
246 void	iwx_ctxt_info_free_fw_img(struct iwx_softc *);
247 void	iwx_ctxt_info_free_paging(struct iwx_softc *);
248 int	iwx_init_fw_sec(struct iwx_softc *, const struct iwx_fw_sects *,
249 	    struct iwx_context_info_dram *);
250 void	iwx_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
251 int	iwx_firmware_store_section(struct iwx_softc *, enum iwx_ucode_type,
252 	    uint8_t *, size_t);
253 int	iwx_set_default_calib(struct iwx_softc *, const void *);
254 void	iwx_fw_info_free(struct iwx_fw_info *);
255 int	iwx_read_firmware(struct iwx_softc *);
256 uint32_t iwx_prph_addr_mask(struct iwx_softc *);
257 uint32_t iwx_read_prph_unlocked(struct iwx_softc *, uint32_t);
258 uint32_t iwx_read_prph(struct iwx_softc *, uint32_t);
259 void	iwx_write_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t);
260 void	iwx_write_prph(struct iwx_softc *, uint32_t, uint32_t);
261 uint32_t iwx_read_umac_prph_unlocked(struct iwx_softc *, uint32_t);
262 uint32_t iwx_read_umac_prph(struct iwx_softc *, uint32_t);
263 void	iwx_write_umac_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t);
264 void	iwx_write_umac_prph(struct iwx_softc *, uint32_t, uint32_t);
265 int	iwx_read_mem(struct iwx_softc *, uint32_t, void *, int);
266 int	iwx_write_mem(struct iwx_softc *, uint32_t, const void *, int);
267 int	iwx_write_mem32(struct iwx_softc *, uint32_t, uint32_t);
268 int	iwx_poll_bit(struct iwx_softc *, int, uint32_t, uint32_t, int);
269 int	iwx_nic_lock(struct iwx_softc *);
270 void	iwx_nic_assert_locked(struct iwx_softc *);
271 void	iwx_nic_unlock(struct iwx_softc *);
272 int	iwx_set_bits_mask_prph(struct iwx_softc *, uint32_t, uint32_t,
273 	    uint32_t);
274 int	iwx_set_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
275 int	iwx_clear_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
276 int	iwx_dma_contig_alloc(bus_dma_tag_t, struct iwx_dma_info *, bus_size_t,
277 	    bus_size_t);
278 void	iwx_dma_contig_free(struct iwx_dma_info *);
279 int	iwx_alloc_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
280 void	iwx_disable_rx_dma(struct iwx_softc *);
281 void	iwx_reset_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
282 void	iwx_free_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
283 int	iwx_alloc_tx_ring(struct iwx_softc *, struct iwx_tx_ring *, int);
284 void	iwx_reset_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
285 void	iwx_free_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
286 void	iwx_enable_rfkill_int(struct iwx_softc *);
287 int	iwx_check_rfkill(struct iwx_softc *);
288 void	iwx_enable_interrupts(struct iwx_softc *);
289 void	iwx_enable_fwload_interrupt(struct iwx_softc *);
290 void	iwx_restore_interrupts(struct iwx_softc *);
291 void	iwx_disable_interrupts(struct iwx_softc *);
292 void	iwx_ict_reset(struct iwx_softc *);
293 int	iwx_set_hw_ready(struct iwx_softc *);
294 int	iwx_prepare_card_hw(struct iwx_softc *);
295 int	iwx_force_power_gating(struct iwx_softc *);
296 void	iwx_apm_config(struct iwx_softc *);
297 int	iwx_apm_init(struct iwx_softc *);
298 void	iwx_apm_stop(struct iwx_softc *);
299 int	iwx_allow_mcast(struct iwx_softc *);
300 void	iwx_init_msix_hw(struct iwx_softc *);
301 void	iwx_conf_msix_hw(struct iwx_softc *, int);
302 int	iwx_clear_persistence_bit(struct iwx_softc *);
303 int	iwx_start_hw(struct iwx_softc *);
304 void	iwx_stop_device(struct iwx_softc *);
305 void	iwx_nic_config(struct iwx_softc *);
306 int	iwx_nic_rx_init(struct iwx_softc *);
307 int	iwx_nic_init(struct iwx_softc *);
308 int	iwx_enable_txq(struct iwx_softc *, int, int, int, int);
309 int	iwx_disable_txq(struct iwx_softc *sc, int, int, uint8_t);
310 void	iwx_post_alive(struct iwx_softc *);
311 int	iwx_schedule_session_protection(struct iwx_softc *, struct iwx_node *,
312 	    uint32_t);
313 void	iwx_unprotect_session(struct iwx_softc *, struct iwx_node *);
314 void	iwx_init_channel_map(struct iwx_softc *, uint16_t *, uint32_t *, int);
315 void	iwx_setup_ht_rates(struct iwx_softc *);
316 void	iwx_setup_vht_rates(struct iwx_softc *);
317 int	iwx_mimo_enabled(struct iwx_softc *);
318 void	iwx_mac_ctxt_task(void *);
319 void	iwx_phy_ctxt_task(void *);
320 void	iwx_updatechan(struct ieee80211com *);
321 void	iwx_updateprot(struct ieee80211com *);
322 void	iwx_updateslot(struct ieee80211com *);
323 void	iwx_updateedca(struct ieee80211com *);
324 void	iwx_updatedtim(struct ieee80211com *);
325 void	iwx_init_reorder_buffer(struct iwx_reorder_buffer *, uint16_t,
326 	    uint16_t);
327 void	iwx_clear_reorder_buffer(struct iwx_softc *, struct iwx_rxba_data *);
328 int	iwx_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
329 	    uint8_t);
330 void	iwx_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
331 	    uint8_t);
332 int	iwx_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
333 	    uint8_t);
334 void	iwx_rx_ba_session_expired(void *);
335 void	iwx_rx_bar_frame_release(struct iwx_softc *, struct iwx_rx_packet *,
336 	    struct mbuf_list *);
337 void	iwx_reorder_timer_expired(void *);
338 void	iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t,
339 	    uint16_t, uint16_t, int, int);
340 void	iwx_sta_tx_agg_start(struct iwx_softc *, struct ieee80211_node *,
341 	    uint8_t);
342 void	iwx_ba_task(void *);
343 
344 void	iwx_set_mac_addr_from_csr(struct iwx_softc *, struct iwx_nvm_data *);
345 int	iwx_is_valid_mac_addr(const uint8_t *);
346 void	iwx_flip_hw_address(uint32_t, uint32_t, uint8_t *);
347 int	iwx_nvm_get(struct iwx_softc *);
348 int	iwx_load_firmware(struct iwx_softc *);
349 int	iwx_start_fw(struct iwx_softc *);
350 int	iwx_pnvm_handle_section(struct iwx_softc *, const uint8_t *, size_t);
351 int	iwx_pnvm_parse(struct iwx_softc *, const uint8_t *, size_t);
352 void	iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *);
353 int	iwx_load_pnvm(struct iwx_softc *);
354 int	iwx_send_tx_ant_cfg(struct iwx_softc *, uint8_t);
355 int	iwx_send_phy_cfg_cmd(struct iwx_softc *);
356 int	iwx_load_ucode_wait_alive(struct iwx_softc *);
357 int	iwx_send_dqa_cmd(struct iwx_softc *);
358 int	iwx_run_init_mvm_ucode(struct iwx_softc *, int);
359 int	iwx_config_ltr(struct iwx_softc *);
360 void	iwx_update_rx_desc(struct iwx_softc *, struct iwx_rx_ring *, int);
361 int	iwx_rx_addbuf(struct iwx_softc *, int, int);
362 int	iwx_rxmq_get_signal_strength(struct iwx_softc *, struct iwx_rx_mpdu_desc *);
363 void	iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *,
364 	    struct iwx_rx_data *);
365 int	iwx_get_noise(const struct iwx_statistics_rx_non_phy *);
366 int	iwx_rx_hwdecrypt(struct iwx_softc *, struct mbuf *, uint32_t,
367 	    struct ieee80211_rxinfo *);
368 int	iwx_ccmp_decap(struct iwx_softc *, struct mbuf *,
369 	    struct ieee80211_node *, struct ieee80211_rxinfo *);
370 void	iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, uint32_t, int, int,
371 	    uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
372 void	iwx_clear_tx_desc(struct iwx_softc *, struct iwx_tx_ring *, int);
373 void	iwx_txd_done(struct iwx_softc *, struct iwx_tx_data *);
374 void	iwx_txq_advance(struct iwx_softc *, struct iwx_tx_ring *, uint16_t);
375 void	iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *,
376 	    struct iwx_rx_data *);
377 void	iwx_clear_oactive(struct iwx_softc *, struct iwx_tx_ring *);
378 void	iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *,
379 	    struct iwx_rx_data *);
380 int	iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t);
381 uint8_t	iwx_get_vht_ctrl_pos(struct ieee80211com *, struct ieee80211_channel *);
382 int	iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *, struct iwx_phy_ctxt *,
383 	    uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
384 int	iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *, struct iwx_phy_ctxt *,
385 	    uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
386 int	iwx_phy_ctxt_cmd(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
387 	    uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
388 int	iwx_send_cmd(struct iwx_softc *, struct iwx_host_cmd *);
389 int	iwx_send_cmd_pdu(struct iwx_softc *, uint32_t, uint32_t, uint16_t,
390 	    const void *);
391 int	iwx_send_cmd_status(struct iwx_softc *, struct iwx_host_cmd *,
392 	    uint32_t *);
393 int	iwx_send_cmd_pdu_status(struct iwx_softc *, uint32_t, uint16_t,
394 	    const void *, uint32_t *);
395 void	iwx_free_resp(struct iwx_softc *, struct iwx_host_cmd *);
396 void	iwx_cmd_done(struct iwx_softc *, int, int, int);
397 uint32_t iwx_fw_rateidx_ofdm(uint8_t);
398 uint32_t iwx_fw_rateidx_cck(uint8_t);
399 const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *, struct iwx_node *,
400 	    struct ieee80211_frame *, uint16_t *, uint32_t *);
401 void	iwx_tx_update_byte_tbl(struct iwx_softc *, struct iwx_tx_ring *, int,
402 	    uint16_t, uint16_t);
403 int	iwx_tx(struct iwx_softc *, struct mbuf *, struct ieee80211_node *);
404 int	iwx_flush_sta_tids(struct iwx_softc *, int, uint16_t);
405 int	iwx_wait_tx_queues_empty(struct iwx_softc *);
406 int	iwx_drain_sta(struct iwx_softc *sc, struct iwx_node *, int);
407 int	iwx_flush_sta(struct iwx_softc *, struct iwx_node *);
408 int	iwx_beacon_filter_send_cmd(struct iwx_softc *,
409 	    struct iwx_beacon_filter_cmd *);
410 int	iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *, int);
411 void	iwx_power_build_cmd(struct iwx_softc *, struct iwx_node *,
412 	    struct iwx_mac_power_cmd *);
413 int	iwx_power_mac_update_mode(struct iwx_softc *, struct iwx_node *);
414 int	iwx_power_update_device(struct iwx_softc *);
415 int	iwx_enable_beacon_filter(struct iwx_softc *, struct iwx_node *);
416 int	iwx_disable_beacon_filter(struct iwx_softc *);
417 int	iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int);
418 int	iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *);
419 int	iwx_rm_sta(struct iwx_softc *, struct iwx_node *);
420 int	iwx_fill_probe_req(struct iwx_softc *, struct iwx_scan_probe_req *);
421 int	iwx_config_umac_scan_reduced(struct iwx_softc *);
422 uint16_t iwx_scan_umac_flags_v2(struct iwx_softc *, int);
423 void	iwx_scan_umac_dwell_v10(struct iwx_softc *,
424 	    struct iwx_scan_general_params_v10 *, int);
425 void	iwx_scan_umac_fill_general_p_v10(struct iwx_softc *,
426 	    struct iwx_scan_general_params_v10 *, uint16_t, int);
427 void	iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *,
428 	    struct iwx_scan_channel_params_v6 *, uint32_t, int, int);
429 int	iwx_umac_scan_v14(struct iwx_softc *, int);
430 void	iwx_mcc_update(struct iwx_softc *, struct iwx_mcc_chub_notif *);
431 uint8_t	iwx_ridx2rate(struct ieee80211_rateset *, int);
432 int	iwx_rval2ridx(int);
433 void	iwx_ack_rates(struct iwx_softc *, struct iwx_node *, int *, int *);
434 void	iwx_mac_ctxt_cmd_common(struct iwx_softc *, struct iwx_node *,
435 	    struct iwx_mac_ctx_cmd *, uint32_t);
436 void	iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *, struct iwx_node *,
437 	    struct iwx_mac_data_sta *, int);
438 int	iwx_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *, uint32_t, int);
439 int	iwx_clear_statistics(struct iwx_softc *);
440 void	iwx_add_task(struct iwx_softc *, struct taskq *, struct task *);
441 void	iwx_del_task(struct iwx_softc *, struct taskq *, struct task *);
442 int	iwx_scan(struct iwx_softc *);
443 int	iwx_bgscan(struct ieee80211com *);
444 void	iwx_bgscan_done(struct ieee80211com *,
445 	    struct ieee80211_node_switch_bss_arg *, size_t);
446 void	iwx_bgscan_done_task(void *);
447 int	iwx_umac_scan_abort(struct iwx_softc *);
448 int	iwx_scan_abort(struct iwx_softc *);
449 int	iwx_enable_mgmt_queue(struct iwx_softc *);
450 int	iwx_disable_mgmt_queue(struct iwx_softc *);
451 int	iwx_rs_rval2idx(uint8_t);
452 uint16_t iwx_rs_ht_rates(struct iwx_softc *, struct ieee80211_node *, int);
453 uint16_t iwx_rs_vht_rates(struct iwx_softc *, struct ieee80211_node *, int);
454 int	iwx_rs_init_v3(struct iwx_softc *, struct iwx_node *);
455 int	iwx_rs_init_v4(struct iwx_softc *, struct iwx_node *);
456 int	iwx_rs_init(struct iwx_softc *, struct iwx_node *);
457 int	iwx_enable_data_tx_queues(struct iwx_softc *);
458 int	iwx_phy_send_rlc(struct iwx_softc *, struct iwx_phy_ctxt *,
459 	    uint8_t, uint8_t);
460 int	iwx_phy_ctxt_update(struct iwx_softc *, struct iwx_phy_ctxt *,
461 	    struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t,
462 	    uint8_t);
463 int	iwx_auth(struct iwx_softc *);
464 int	iwx_deauth(struct iwx_softc *);
465 int	iwx_run(struct iwx_softc *);
466 int	iwx_run_stop(struct iwx_softc *);
467 struct ieee80211_node *iwx_node_alloc(struct ieee80211com *);
468 int	iwx_set_key(struct ieee80211com *, struct ieee80211_node *,
469 	    struct ieee80211_key *);
470 void	iwx_setkey_task(void *);
471 void	iwx_delete_key(struct ieee80211com *,
472 	    struct ieee80211_node *, struct ieee80211_key *);
473 int	iwx_media_change(struct ifnet *);
474 void	iwx_newstate_task(void *);
475 int	iwx_newstate(struct ieee80211com *, enum ieee80211_state, int);
476 void	iwx_endscan(struct iwx_softc *);
477 void	iwx_fill_sf_command(struct iwx_softc *, struct iwx_sf_cfg_cmd *,
478 	    struct ieee80211_node *);
479 int	iwx_sf_config(struct iwx_softc *, int);
480 int	iwx_send_bt_init_conf(struct iwx_softc *);
481 int	iwx_send_soc_conf(struct iwx_softc *);
482 int	iwx_send_update_mcc_cmd(struct iwx_softc *, const char *);
483 int	iwx_send_temp_report_ths_cmd(struct iwx_softc *);
484 int	iwx_init_hw(struct iwx_softc *);
485 int	iwx_init(struct ifnet *);
486 void	iwx_start(struct ifnet *);
487 void	iwx_stop(struct ifnet *);
488 void	iwx_watchdog(struct ifnet *);
489 int	iwx_ioctl(struct ifnet *, u_long, caddr_t);
490 const char *iwx_desc_lookup(uint32_t);
491 void	iwx_nic_error(struct iwx_softc *);
492 void	iwx_dump_driver_status(struct iwx_softc *);
493 void	iwx_nic_umac_error(struct iwx_softc *);
494 int	iwx_detect_duplicate(struct iwx_softc *, struct mbuf *,
495 	    struct iwx_rx_mpdu_desc *, struct ieee80211_rxinfo *);
496 int	iwx_is_sn_less(uint16_t, uint16_t, uint16_t);
497 void	iwx_release_frames(struct iwx_softc *, struct ieee80211_node *,
498 	    struct iwx_rxba_data *, struct iwx_reorder_buffer *, uint16_t,
499 	    struct mbuf_list *);
500 int	iwx_oldsn_workaround(struct iwx_softc *, struct ieee80211_node *,
501 	    int, struct iwx_reorder_buffer *, uint32_t, uint32_t);
502 int	iwx_rx_reorder(struct iwx_softc *, struct mbuf *, int,
503 	    struct iwx_rx_mpdu_desc *, int, int, uint32_t,
504 	    struct ieee80211_rxinfo *, struct mbuf_list *);
505 void	iwx_rx_mpdu_mq(struct iwx_softc *, struct mbuf *, void *, size_t,
506 	    struct mbuf_list *);
507 int	iwx_rx_pkt_valid(struct iwx_rx_packet *);
508 void	iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *,
509 	    struct mbuf_list *);
510 void	iwx_notif_intr(struct iwx_softc *);
511 int	iwx_intr(void *);
512 int	iwx_intr_msix(void *);
513 int	iwx_match(struct device *, void *, void *);
514 int	iwx_preinit(struct iwx_softc *);
515 void	iwx_attach_hook(struct device *);
516 const struct iwx_device_cfg *iwx_find_device_cfg(struct iwx_softc *);
517 void	iwx_attach(struct device *, struct device *, void *);
518 void	iwx_init_task(void *);
519 int	iwx_activate(struct device *, int);
520 void	iwx_resume(struct iwx_softc *);
521 int	iwx_wakeup(struct iwx_softc *);
522 
523 #if NBPFILTER > 0
524 void	iwx_radiotap_attach(struct iwx_softc *);
525 #endif
526 
527 uint8_t
528 iwx_lookup_cmd_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
529 {
530 	const struct iwx_fw_cmd_version *entry;
531 	int i;
532 
533 	for (i = 0; i < sc->n_cmd_versions; i++) {
534 		entry = &sc->cmd_versions[i];
535 		if (entry->group == grp && entry->cmd == cmd)
536 			return entry->cmd_ver;
537 	}
538 
539 	return IWX_FW_CMD_VER_UNKNOWN;
540 }
541 
542 uint8_t
543 iwx_lookup_notif_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
544 {
545 	const struct iwx_fw_cmd_version *entry;
546 	int i;
547 
548 	for (i = 0; i < sc->n_cmd_versions; i++) {
549 		entry = &sc->cmd_versions[i];
550 		if (entry->group == grp && entry->cmd == cmd)
551 			return entry->notif_ver;
552 	}
553 
554 	return IWX_FW_CMD_VER_UNKNOWN;
555 }
556 
557 int
558 iwx_is_mimo_ht_plcp(uint8_t ht_plcp)
559 {
560 	switch (ht_plcp) {
561 	case IWX_RATE_HT_MIMO2_MCS_8_PLCP:
562 	case IWX_RATE_HT_MIMO2_MCS_9_PLCP:
563 	case IWX_RATE_HT_MIMO2_MCS_10_PLCP:
564 	case IWX_RATE_HT_MIMO2_MCS_11_PLCP:
565 	case IWX_RATE_HT_MIMO2_MCS_12_PLCP:
566 	case IWX_RATE_HT_MIMO2_MCS_13_PLCP:
567 	case IWX_RATE_HT_MIMO2_MCS_14_PLCP:
568 	case IWX_RATE_HT_MIMO2_MCS_15_PLCP:
569 		return 1;
570 	default:
571 		break;
572 	}
573 
574 	return 0;
575 }
576 
577 int
578 iwx_store_cscheme(struct iwx_softc *sc, uint8_t *data, size_t dlen)
579 {
580 	struct iwx_fw_cscheme_list *l = (void *)data;
581 
582 	if (dlen < sizeof(*l) ||
583 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
584 		return EINVAL;
585 
586 	/* we don't actually store anything for now, always use s/w crypto */
587 
588 	return 0;
589 }
590 
591 int
592 iwx_ctxt_info_alloc_dma(struct iwx_softc *sc,
593     const struct iwx_fw_onesect *sec, struct iwx_dma_info *dram)
594 {
595 	int err = iwx_dma_contig_alloc(sc->sc_dmat, dram, sec->fws_len, 0);
596 	if (err) {
597 		printf("%s: could not allocate context info DMA memory\n",
598 		    DEVNAME(sc));
599 		return err;
600 	}
601 
602 	memcpy(dram->vaddr, sec->fws_data, sec->fws_len);
603 
604 	return 0;
605 }
606 
607 void iwx_ctxt_info_free_paging(struct iwx_softc *sc)
608 {
609 	struct iwx_self_init_dram *dram = &sc->init_dram;
610 	int i;
611 
612 	if (!dram->paging)
613 		return;
614 
615 	/* free paging*/
616 	for (i = 0; i < dram->paging_cnt; i++)
617 		iwx_dma_contig_free(&dram->paging[i]);
618 
619 	free(dram->paging, M_DEVBUF, dram->paging_cnt * sizeof(*dram->paging));
620 	dram->paging_cnt = 0;
621 	dram->paging = NULL;
622 }
623 
624 int
625 iwx_get_num_sections(const struct iwx_fw_sects *fws, int start)
626 {
627 	int i = 0;
628 
629 	while (start < fws->fw_count &&
630 	       fws->fw_sect[start].fws_devoff != IWX_CPU1_CPU2_SEPARATOR_SECTION &&
631 	       fws->fw_sect[start].fws_devoff != IWX_PAGING_SEPARATOR_SECTION) {
632 		start++;
633 		i++;
634 	}
635 
636 	return i;
637 }
638 
639 int
640 iwx_init_fw_sec(struct iwx_softc *sc, const struct iwx_fw_sects *fws,
641     struct iwx_context_info_dram *ctxt_dram)
642 {
643 	struct iwx_self_init_dram *dram = &sc->init_dram;
644 	int i, ret, fw_cnt = 0;
645 
646 	KASSERT(dram->paging == NULL);
647 
648 	dram->lmac_cnt = iwx_get_num_sections(fws, 0);
649 	/* add 1 due to separator */
650 	dram->umac_cnt = iwx_get_num_sections(fws, dram->lmac_cnt + 1);
651 	/* add 2 due to separators */
652 	dram->paging_cnt = iwx_get_num_sections(fws,
653 	    dram->lmac_cnt + dram->umac_cnt + 2);
654 
655 	dram->fw = mallocarray(dram->umac_cnt + dram->lmac_cnt,
656 	    sizeof(*dram->fw), M_DEVBUF,  M_ZERO | M_NOWAIT);
657 	if (!dram->fw) {
658 		printf("%s: could not allocate memory for firmware sections\n",
659 		    DEVNAME(sc));
660 		return ENOMEM;
661 	}
662 
663 	dram->paging = mallocarray(dram->paging_cnt, sizeof(*dram->paging),
664 	    M_DEVBUF, M_ZERO | M_NOWAIT);
665 	if (!dram->paging) {
666 		printf("%s: could not allocate memory for firmware paging\n",
667 		    DEVNAME(sc));
668 		return ENOMEM;
669 	}
670 
671 	/* initialize lmac sections */
672 	for (i = 0; i < dram->lmac_cnt; i++) {
673 		ret = iwx_ctxt_info_alloc_dma(sc, &fws->fw_sect[i],
674 						   &dram->fw[fw_cnt]);
675 		if (ret)
676 			return ret;
677 		ctxt_dram->lmac_img[i] =
678 			htole64(dram->fw[fw_cnt].paddr);
679 		DPRINTF(("%s: firmware LMAC section %d at 0x%llx size %lld\n", __func__, i,
680 		    (unsigned long long)dram->fw[fw_cnt].paddr,
681 		    (unsigned long long)dram->fw[fw_cnt].size));
682 		fw_cnt++;
683 	}
684 
685 	/* initialize umac sections */
686 	for (i = 0; i < dram->umac_cnt; i++) {
687 		/* access FW with +1 to make up for lmac separator */
688 		ret = iwx_ctxt_info_alloc_dma(sc,
689 		    &fws->fw_sect[fw_cnt + 1], &dram->fw[fw_cnt]);
690 		if (ret)
691 			return ret;
692 		ctxt_dram->umac_img[i] =
693 			htole64(dram->fw[fw_cnt].paddr);
694 		DPRINTF(("%s: firmware UMAC section %d at 0x%llx size %lld\n", __func__, i,
695 			(unsigned long long)dram->fw[fw_cnt].paddr,
696 			(unsigned long long)dram->fw[fw_cnt].size));
697 		fw_cnt++;
698 	}
699 
700 	/*
701 	 * Initialize paging.
702 	 * Paging memory isn't stored in dram->fw as the umac and lmac - it is
703 	 * stored separately.
704 	 * This is since the timing of its release is different -
705 	 * while fw memory can be released on alive, the paging memory can be
706 	 * freed only when the device goes down.
707 	 * Given that, the logic here in accessing the fw image is a bit
708 	 * different - fw_cnt isn't changing so loop counter is added to it.
709 	 */
710 	for (i = 0; i < dram->paging_cnt; i++) {
711 		/* access FW with +2 to make up for lmac & umac separators */
712 		int fw_idx = fw_cnt + i + 2;
713 
714 		ret = iwx_ctxt_info_alloc_dma(sc,
715 		    &fws->fw_sect[fw_idx], &dram->paging[i]);
716 		if (ret)
717 			return ret;
718 
719 		ctxt_dram->virtual_img[i] = htole64(dram->paging[i].paddr);
720 		DPRINTF(("%s: firmware paging section %d at 0x%llx size %lld\n", __func__, i,
721 		    (unsigned long long)dram->paging[i].paddr,
722 		    (unsigned long long)dram->paging[i].size));
723 	}
724 
725 	return 0;
726 }
727 
728 void
729 iwx_fw_version_str(char *buf, size_t bufsize,
730     uint32_t major, uint32_t minor, uint32_t api)
731 {
732 	/*
733 	 * Starting with major version 35 the Linux driver prints the minor
734 	 * version in hexadecimal.
735 	 */
736 	if (major >= 35)
737 		snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
738 	else
739 		snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
740 }
741 
742 int
743 iwx_alloc_fw_monitor_block(struct iwx_softc *sc, uint8_t max_power,
744     uint8_t min_power)
745 {
746 	struct iwx_dma_info *fw_mon = &sc->fw_mon;
747 	uint32_t size = 0;
748 	uint8_t power;
749 	int err;
750 
751 	if (fw_mon->size)
752 		return 0;
753 
754 	for (power = max_power; power >= min_power; power--) {
755 		size = (1 << power);
756 
757 		err = iwx_dma_contig_alloc(sc->sc_dmat, fw_mon, size, 0);
758 		if (err)
759 			continue;
760 
761 		DPRINTF(("%s: allocated 0x%08x bytes for firmware monitor.\n",
762 			 DEVNAME(sc), size));
763 		break;
764 	}
765 
766 	if (err) {
767 		fw_mon->size = 0;
768 		return err;
769 	}
770 
771 	if (power != max_power)
772 		DPRINTF(("%s: Sorry - debug buffer is only %luK while you requested %luK\n",
773 			DEVNAME(sc), (unsigned long)(1 << (power - 10)),
774 			(unsigned long)(1 << (max_power - 10))));
775 
776 	return 0;
777 }
778 
779 int
780 iwx_alloc_fw_monitor(struct iwx_softc *sc, uint8_t max_power)
781 {
782 	if (!max_power) {
783 		/* default max_power is maximum */
784 		max_power = 26;
785 	} else {
786 		max_power += 11;
787 	}
788 
789 	if (max_power > 26) {
790 		 DPRINTF(("%s: External buffer size for monitor is too big %d, "
791 		     "check the FW TLV\n", DEVNAME(sc), max_power));
792 		return 0;
793 	}
794 
795 	if (sc->fw_mon.size)
796 		return 0;
797 
798 	return iwx_alloc_fw_monitor_block(sc, max_power, 11);
799 }
800 
801 int
802 iwx_apply_debug_destination(struct iwx_softc *sc)
803 {
804 	struct iwx_fw_dbg_dest_tlv_v1 *dest_v1;
805 	int i, err;
806 	uint8_t mon_mode, size_power, base_shift, end_shift;
807 	uint32_t base_reg, end_reg;
808 
809 	dest_v1 = sc->sc_fw.dbg_dest_tlv_v1;
810 	mon_mode = dest_v1->monitor_mode;
811 	size_power = dest_v1->size_power;
812 	base_reg = le32toh(dest_v1->base_reg);
813 	end_reg = le32toh(dest_v1->end_reg);
814 	base_shift = dest_v1->base_shift;
815 	end_shift = dest_v1->end_shift;
816 
817 	DPRINTF(("%s: applying debug destination %d\n", DEVNAME(sc), mon_mode));
818 
819 	if (mon_mode == EXTERNAL_MODE) {
820 		err = iwx_alloc_fw_monitor(sc, size_power);
821 		if (err)
822 			return err;
823 	}
824 
825 	if (!iwx_nic_lock(sc))
826 		return EBUSY;
827 
828 	for (i = 0; i < sc->sc_fw.n_dest_reg; i++) {
829 		uint32_t addr, val;
830 		uint8_t op;
831 
832 		addr = le32toh(dest_v1->reg_ops[i].addr);
833 		val = le32toh(dest_v1->reg_ops[i].val);
834 		op = dest_v1->reg_ops[i].op;
835 
836 		DPRINTF(("%s: op=%u addr=%u val=%u\n", __func__, op, addr, val));
837 		switch (op) {
838 		case CSR_ASSIGN:
839 			IWX_WRITE(sc, addr, val);
840 			break;
841 		case CSR_SETBIT:
842 			IWX_SETBITS(sc, addr, (1 << val));
843 			break;
844 		case CSR_CLEARBIT:
845 			IWX_CLRBITS(sc, addr, (1 << val));
846 			break;
847 		case PRPH_ASSIGN:
848 			iwx_write_prph(sc, addr, val);
849 			break;
850 		case PRPH_SETBIT:
851 			err = iwx_set_bits_prph(sc, addr, (1 << val));
852 			if (err)
853 				return err;
854 			break;
855 		case PRPH_CLEARBIT:
856 			err = iwx_clear_bits_prph(sc, addr, (1 << val));
857 			if (err)
858 				return err;
859 			break;
860 		case PRPH_BLOCKBIT:
861 			if (iwx_read_prph(sc, addr) & (1 << val))
862 				goto monitor;
863 			break;
864 		default:
865 			DPRINTF(("%s: FW debug - unknown OP %d\n",
866 			    DEVNAME(sc), op));
867 			break;
868 		}
869 	}
870 
871 monitor:
872 	if (mon_mode == EXTERNAL_MODE && sc->fw_mon.size) {
873 		iwx_write_prph(sc, le32toh(base_reg),
874 		    sc->fw_mon.paddr >> base_shift);
875 		iwx_write_prph(sc, end_reg,
876 		    (sc->fw_mon.paddr + sc->fw_mon.size - 256)
877 		    >> end_shift);
878 	}
879 
880 	iwx_nic_unlock(sc);
881 	return 0;
882 }
883 
884 void
885 iwx_set_ltr(struct iwx_softc *sc)
886 {
887 	uint32_t ltr_val = IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
888 	    ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
889 	    IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_SHIFT) &
890 	    IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_MASK) |
891 	    ((250 << IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_SHIFT) &
892 	    IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_MASK) |
893 	    IWX_CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
894 	    ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
895 	    IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_SHIFT) &
896 	    IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_MASK) |
897 	    (250 & IWX_CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
898 
899 	/*
900 	 * To workaround hardware latency issues during the boot process,
901 	 * initialize the LTR to ~250 usec (see ltr_val above).
902 	 * The firmware initializes this again later (to a smaller value).
903 	 */
904 	if (!sc->sc_integrated) {
905 		IWX_WRITE(sc, IWX_CSR_LTR_LONG_VAL_AD, ltr_val);
906 	} else if (sc->sc_integrated &&
907 		   sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
908 		iwx_write_prph(sc, IWX_HPM_MAC_LTR_CSR,
909 		    IWX_HPM_MAC_LRT_ENABLE_ALL);
910 		iwx_write_prph(sc, IWX_HPM_UMAC_LTR, ltr_val);
911 	}
912 }
913 
914 int
915 iwx_ctxt_info_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
916 {
917 	struct iwx_context_info *ctxt_info;
918 	struct iwx_context_info_rbd_cfg *rx_cfg;
919 	uint32_t control_flags = 0;
920 	uint64_t paddr;
921 	int err;
922 
923 	ctxt_info = sc->ctxt_info_dma.vaddr;
924 	memset(ctxt_info, 0, sizeof(*ctxt_info));
925 
926 	ctxt_info->version.version = 0;
927 	ctxt_info->version.mac_id =
928 		htole16((uint16_t)IWX_READ(sc, IWX_CSR_HW_REV));
929 	/* size is in DWs */
930 	ctxt_info->version.size = htole16(sizeof(*ctxt_info) / 4);
931 
932 	KASSERT(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF);
933 	control_flags = IWX_CTXT_INFO_TFD_FORMAT_LONG |
934 			(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) <<
935 			 IWX_CTXT_INFO_RB_CB_SIZE_POS) |
936 			(IWX_CTXT_INFO_RB_SIZE_4K << IWX_CTXT_INFO_RB_SIZE_POS);
937 	ctxt_info->control.control_flags = htole32(control_flags);
938 
939 	/* initialize RX default queue */
940 	rx_cfg = &ctxt_info->rbd_cfg;
941 	rx_cfg->free_rbd_addr = htole64(sc->rxq.free_desc_dma.paddr);
942 	rx_cfg->used_rbd_addr = htole64(sc->rxq.used_desc_dma.paddr);
943 	rx_cfg->status_wr_ptr = htole64(sc->rxq.stat_dma.paddr);
944 
945 	/* initialize TX command queue */
946 	ctxt_info->hcmd_cfg.cmd_queue_addr =
947 	    htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
948 	ctxt_info->hcmd_cfg.cmd_queue_size =
949 		IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
950 
951 	/* allocate ucode sections in dram and set addresses */
952 	err = iwx_init_fw_sec(sc, fws, &ctxt_info->dram);
953 	if (err) {
954 		iwx_ctxt_info_free_fw_img(sc);
955 		return err;
956 	}
957 
958 	/* Configure debug, if exists */
959 	if (sc->sc_fw.dbg_dest_tlv_v1) {
960 		err = iwx_apply_debug_destination(sc);
961 		if (err) {
962 			iwx_ctxt_info_free_fw_img(sc);
963 			return err;
964 		}
965 	}
966 
967 	/*
968 	 * Write the context info DMA base address. The device expects a
969 	 * 64-bit address but a simple bus_space_write_8 to this register
970 	 * won't work on some devices, such as the AX201.
971 	 */
972 	paddr = sc->ctxt_info_dma.paddr;
973 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA, paddr & 0xffffffff);
974 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA + 4, paddr >> 32);
975 
976 	/* kick FW self load */
977 	if (!iwx_nic_lock(sc)) {
978 		iwx_ctxt_info_free_fw_img(sc);
979 		return EBUSY;
980 	}
981 
982 	iwx_set_ltr(sc);
983 	iwx_write_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
984 	iwx_nic_unlock(sc);
985 
986 	/* Context info will be released upon alive or failure to get one */
987 
988 	return 0;
989 }
990 
991 int
992 iwx_ctxt_info_gen3_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
993 {
994 	struct iwx_context_info_gen3 *ctxt_info_gen3;
995 	struct iwx_prph_scratch *prph_scratch;
996 	struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
997 	uint16_t cb_size;
998 	uint32_t control_flags, scratch_size;
999 	uint64_t paddr;
1000 	int err;
1001 
1002 	if (sc->sc_fw.iml == NULL || sc->sc_fw.iml_len == 0) {
1003 		printf("%s: no image loader found in firmware file\n",
1004 		    DEVNAME(sc));
1005 		iwx_ctxt_info_free_fw_img(sc);
1006 		return EINVAL;
1007 	}
1008 
1009 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->iml_dma,
1010 	    sc->sc_fw.iml_len, 0);
1011 	if (err) {
1012 		printf("%s: could not allocate DMA memory for "
1013 		    "firmware image loader\n", DEVNAME(sc));
1014 		iwx_ctxt_info_free_fw_img(sc);
1015 		return ENOMEM;
1016 	}
1017 
1018 	prph_scratch = sc->prph_scratch_dma.vaddr;
1019 	memset(prph_scratch, 0, sizeof(*prph_scratch));
1020 	prph_sc_ctrl = &prph_scratch->ctrl_cfg;
1021 	prph_sc_ctrl->version.version = 0;
1022 	prph_sc_ctrl->version.mac_id = htole16(IWX_READ(sc, IWX_CSR_HW_REV));
1023 	prph_sc_ctrl->version.size = htole16(sizeof(*prph_scratch) / 4);
1024 
1025 	control_flags = IWX_PRPH_SCRATCH_RB_SIZE_4K |
1026 	    IWX_PRPH_SCRATCH_MTR_MODE |
1027 	    (IWX_PRPH_MTR_FORMAT_256B & IWX_PRPH_SCRATCH_MTR_FORMAT);
1028 	if (sc->sc_imr_enabled)
1029 		control_flags |= IWX_PRPH_SCRATCH_IMR_DEBUG_EN;
1030 	prph_sc_ctrl->control.control_flags = htole32(control_flags);
1031 
1032 	/* initialize RX default queue */
1033 	prph_sc_ctrl->rbd_cfg.free_rbd_addr =
1034 	    htole64(sc->rxq.free_desc_dma.paddr);
1035 
1036 	/* allocate ucode sections in dram and set addresses */
1037 	err = iwx_init_fw_sec(sc, fws, &prph_scratch->dram);
1038 	if (err) {
1039 		iwx_dma_contig_free(&sc->iml_dma);
1040 		iwx_ctxt_info_free_fw_img(sc);
1041 		return err;
1042 	}
1043 
1044 	ctxt_info_gen3 = sc->ctxt_info_dma.vaddr;
1045 	memset(ctxt_info_gen3, 0, sizeof(*ctxt_info_gen3));
1046 	ctxt_info_gen3->prph_info_base_addr = htole64(sc->prph_info_dma.paddr);
1047 	ctxt_info_gen3->prph_scratch_base_addr =
1048 	    htole64(sc->prph_scratch_dma.paddr);
1049 	scratch_size = sizeof(*prph_scratch);
1050 	ctxt_info_gen3->prph_scratch_size = htole32(scratch_size);
1051 	ctxt_info_gen3->cr_head_idx_arr_base_addr =
1052 	    htole64(sc->rxq.stat_dma.paddr);
1053 	ctxt_info_gen3->tr_tail_idx_arr_base_addr =
1054 	    htole64(sc->prph_info_dma.paddr + PAGE_SIZE / 2);
1055 	ctxt_info_gen3->cr_tail_idx_arr_base_addr =
1056 	    htole64(sc->prph_info_dma.paddr + 3 * PAGE_SIZE / 4);
1057 	ctxt_info_gen3->mtr_base_addr =
1058 	    htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
1059 	ctxt_info_gen3->mcr_base_addr = htole64(sc->rxq.used_desc_dma.paddr);
1060 	cb_size = IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
1061 	ctxt_info_gen3->mtr_size = htole16(cb_size);
1062 	cb_size = IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE);
1063 	ctxt_info_gen3->mcr_size = htole16(cb_size);
1064 
1065 	memcpy(sc->iml_dma.vaddr, sc->sc_fw.iml, sc->sc_fw.iml_len);
1066 
1067 	paddr = sc->ctxt_info_dma.paddr;
1068 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR, paddr & 0xffffffff);
1069 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR + 4, paddr >> 32);
1070 
1071 	paddr = sc->iml_dma.paddr;
1072 	IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR, paddr & 0xffffffff);
1073 	IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR + 4, paddr >> 32);
1074 	IWX_WRITE(sc, IWX_CSR_IML_SIZE_ADDR, sc->sc_fw.iml_len);
1075 
1076 	IWX_SETBITS(sc, IWX_CSR_CTXT_INFO_BOOT_CTRL,
1077 		    IWX_CSR_AUTO_FUNC_BOOT_ENA);
1078 
1079 	/* kick FW self load */
1080 	if (!iwx_nic_lock(sc)) {
1081 		iwx_dma_contig_free(&sc->iml_dma);
1082 		iwx_ctxt_info_free_fw_img(sc);
1083 		return EBUSY;
1084 	}
1085 	iwx_set_ltr(sc);
1086 	iwx_write_umac_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
1087 	iwx_nic_unlock(sc);
1088 
1089 	/* Context info will be released upon alive or failure to get one */
1090 	return 0;
1091 }
1092 
1093 void
1094 iwx_ctxt_info_free_fw_img(struct iwx_softc *sc)
1095 {
1096 	struct iwx_self_init_dram *dram = &sc->init_dram;
1097 	int i;
1098 
1099 	if (!dram->fw)
1100 		return;
1101 
1102 	for (i = 0; i < dram->lmac_cnt + dram->umac_cnt; i++)
1103 		iwx_dma_contig_free(&dram->fw[i]);
1104 
1105 	free(dram->fw, M_DEVBUF,
1106 	    (dram->lmac_cnt + dram->umac_cnt) * sizeof(*dram->fw));
1107 	dram->lmac_cnt = 0;
1108 	dram->umac_cnt = 0;
1109 	dram->fw = NULL;
1110 }
1111 
1112 int
1113 iwx_firmware_store_section(struct iwx_softc *sc, enum iwx_ucode_type type,
1114     uint8_t *data, size_t dlen)
1115 {
1116 	struct iwx_fw_sects *fws;
1117 	struct iwx_fw_onesect *fwone;
1118 
1119 	if (type >= IWX_UCODE_TYPE_MAX)
1120 		return EINVAL;
1121 	if (dlen < sizeof(uint32_t))
1122 		return EINVAL;
1123 
1124 	fws = &sc->sc_fw.fw_sects[type];
1125 	DPRINTF(("%s: ucode type %d section %d\n", DEVNAME(sc), type, fws->fw_count));
1126 	if (fws->fw_count >= IWX_UCODE_SECT_MAX)
1127 		return EINVAL;
1128 
1129 	fwone = &fws->fw_sect[fws->fw_count];
1130 
1131 	/* first 32bit are device load offset */
1132 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
1133 
1134 	/* rest is data */
1135 	fwone->fws_data = data + sizeof(uint32_t);
1136 	fwone->fws_len = dlen - sizeof(uint32_t);
1137 
1138 	fws->fw_count++;
1139 	fws->fw_totlen += fwone->fws_len;
1140 
1141 	return 0;
1142 }
1143 
1144 #define IWX_DEFAULT_SCAN_CHANNELS	40
1145 /* Newer firmware might support more channels. Raise this value if needed. */
1146 #define IWX_MAX_SCAN_CHANNELS		67 /* as of iwx-cc-a0-62 firmware */
1147 
1148 struct iwx_tlv_calib_data {
1149 	uint32_t ucode_type;
1150 	struct iwx_tlv_calib_ctrl calib;
1151 } __packed;
1152 
1153 int
1154 iwx_set_default_calib(struct iwx_softc *sc, const void *data)
1155 {
1156 	const struct iwx_tlv_calib_data *def_calib = data;
1157 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
1158 
1159 	if (ucode_type >= IWX_UCODE_TYPE_MAX)
1160 		return EINVAL;
1161 
1162 	sc->sc_default_calib[ucode_type].flow_trigger =
1163 	    def_calib->calib.flow_trigger;
1164 	sc->sc_default_calib[ucode_type].event_trigger =
1165 	    def_calib->calib.event_trigger;
1166 
1167 	return 0;
1168 }
1169 
1170 void
1171 iwx_fw_info_free(struct iwx_fw_info *fw)
1172 {
1173 	free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
1174 	fw->fw_rawdata = NULL;
1175 	fw->fw_rawsize = 0;
1176 	/* don't touch fw->fw_status */
1177 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
1178 	free(fw->iml, M_DEVBUF, fw->iml_len);
1179 	fw->iml = NULL;
1180 	fw->iml_len = 0;
1181 }
1182 
1183 #define IWX_FW_ADDR_CACHE_CONTROL 0xC0000000
1184 
1185 int
1186 iwx_read_firmware(struct iwx_softc *sc)
1187 {
1188 	struct ieee80211com *ic = &sc->sc_ic;
1189 	struct iwx_fw_info *fw = &sc->sc_fw;
1190 	struct iwx_tlv_ucode_header *uhdr;
1191 	struct iwx_ucode_tlv tlv;
1192 	uint32_t tlv_type;
1193 	uint8_t *data;
1194 	int err;
1195 	size_t len;
1196 
1197 	if (fw->fw_status == IWX_FW_STATUS_DONE)
1198 		return 0;
1199 
1200 	while (fw->fw_status == IWX_FW_STATUS_INPROGRESS)
1201 		tsleep_nsec(&sc->sc_fw, 0, "iwxfwp", INFSLP);
1202 	fw->fw_status = IWX_FW_STATUS_INPROGRESS;
1203 
1204 	if (fw->fw_rawdata != NULL)
1205 		iwx_fw_info_free(fw);
1206 
1207 	err = loadfirmware(sc->sc_fwname,
1208 	    (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
1209 	if (err) {
1210 		printf("%s: could not read firmware %s (error %d)\n",
1211 		    DEVNAME(sc), sc->sc_fwname, err);
1212 		goto out;
1213 	}
1214 
1215 	if (ic->ic_if.if_flags & IFF_DEBUG)
1216 		printf("%s: using firmware %s\n", DEVNAME(sc), sc->sc_fwname);
1217 
1218 	sc->sc_capaflags = 0;
1219 	sc->sc_capa_n_scan_channels = IWX_DEFAULT_SCAN_CHANNELS;
1220 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
1221 	memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api));
1222 	sc->n_cmd_versions = 0;
1223 
1224 	uhdr = (void *)fw->fw_rawdata;
1225 	if (*(uint32_t *)fw->fw_rawdata != 0
1226 	    || le32toh(uhdr->magic) != IWX_TLV_UCODE_MAGIC) {
1227 		printf("%s: invalid firmware %s\n",
1228 		    DEVNAME(sc), sc->sc_fwname);
1229 		err = EINVAL;
1230 		goto out;
1231 	}
1232 
1233 	iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1234 	    IWX_UCODE_MAJOR(le32toh(uhdr->ver)),
1235 	    IWX_UCODE_MINOR(le32toh(uhdr->ver)),
1236 	    IWX_UCODE_API(le32toh(uhdr->ver)));
1237 
1238 	data = uhdr->data;
1239 	len = fw->fw_rawsize - sizeof(*uhdr);
1240 
1241 	while (len >= sizeof(tlv)) {
1242 		size_t tlv_len;
1243 		void *tlv_data;
1244 
1245 		memcpy(&tlv, data, sizeof(tlv));
1246 		tlv_len = le32toh(tlv.length);
1247 		tlv_type = le32toh(tlv.type);
1248 
1249 		len -= sizeof(tlv);
1250 		data += sizeof(tlv);
1251 		tlv_data = data;
1252 
1253 		if (len < tlv_len) {
1254 			printf("%s: firmware too short: %zu bytes\n",
1255 			    DEVNAME(sc), len);
1256 			err = EINVAL;
1257 			goto parse_out;
1258 		}
1259 
1260 		switch (tlv_type) {
1261 		case IWX_UCODE_TLV_PROBE_MAX_LEN:
1262 			if (tlv_len < sizeof(uint32_t)) {
1263 				err = EINVAL;
1264 				goto parse_out;
1265 			}
1266 			sc->sc_capa_max_probe_len
1267 			    = le32toh(*(uint32_t *)tlv_data);
1268 			if (sc->sc_capa_max_probe_len >
1269 			    IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
1270 				err = EINVAL;
1271 				goto parse_out;
1272 			}
1273 			break;
1274 		case IWX_UCODE_TLV_PAN:
1275 			if (tlv_len) {
1276 				err = EINVAL;
1277 				goto parse_out;
1278 			}
1279 			sc->sc_capaflags |= IWX_UCODE_TLV_FLAGS_PAN;
1280 			break;
1281 		case IWX_UCODE_TLV_FLAGS:
1282 			if (tlv_len < sizeof(uint32_t)) {
1283 				err = EINVAL;
1284 				goto parse_out;
1285 			}
1286 			/*
1287 			 * Apparently there can be many flags, but Linux driver
1288 			 * parses only the first one, and so do we.
1289 			 *
1290 			 * XXX: why does this override IWX_UCODE_TLV_PAN?
1291 			 * Intentional or a bug?  Observations from
1292 			 * current firmware file:
1293 			 *  1) TLV_PAN is parsed first
1294 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
1295 			 * ==> this resets TLV_PAN to itself... hnnnk
1296 			 */
1297 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
1298 			break;
1299 		case IWX_UCODE_TLV_CSCHEME:
1300 			err = iwx_store_cscheme(sc, tlv_data, tlv_len);
1301 			if (err)
1302 				goto parse_out;
1303 			break;
1304 		case IWX_UCODE_TLV_NUM_OF_CPU: {
1305 			uint32_t num_cpu;
1306 			if (tlv_len != sizeof(uint32_t)) {
1307 				err = EINVAL;
1308 				goto parse_out;
1309 			}
1310 			num_cpu = le32toh(*(uint32_t *)tlv_data);
1311 			if (num_cpu < 1 || num_cpu > 2) {
1312 				err = EINVAL;
1313 				goto parse_out;
1314 			}
1315 			break;
1316 		}
1317 		case IWX_UCODE_TLV_SEC_RT:
1318 			err = iwx_firmware_store_section(sc,
1319 			    IWX_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
1320 			if (err)
1321 				goto parse_out;
1322 			break;
1323 		case IWX_UCODE_TLV_SEC_INIT:
1324 			err = iwx_firmware_store_section(sc,
1325 			    IWX_UCODE_TYPE_INIT, tlv_data, tlv_len);
1326 			if (err)
1327 				goto parse_out;
1328 			break;
1329 		case IWX_UCODE_TLV_SEC_WOWLAN:
1330 			err = iwx_firmware_store_section(sc,
1331 			    IWX_UCODE_TYPE_WOW, tlv_data, tlv_len);
1332 			if (err)
1333 				goto parse_out;
1334 			break;
1335 		case IWX_UCODE_TLV_DEF_CALIB:
1336 			if (tlv_len != sizeof(struct iwx_tlv_calib_data)) {
1337 				err = EINVAL;
1338 				goto parse_out;
1339 			}
1340 			err = iwx_set_default_calib(sc, tlv_data);
1341 			if (err)
1342 				goto parse_out;
1343 			break;
1344 		case IWX_UCODE_TLV_PHY_SKU:
1345 			if (tlv_len != sizeof(uint32_t)) {
1346 				err = EINVAL;
1347 				goto parse_out;
1348 			}
1349 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
1350 			break;
1351 
1352 		case IWX_UCODE_TLV_API_CHANGES_SET: {
1353 			struct iwx_ucode_api *api;
1354 			int idx, i;
1355 			if (tlv_len != sizeof(*api)) {
1356 				err = EINVAL;
1357 				goto parse_out;
1358 			}
1359 			api = (struct iwx_ucode_api *)tlv_data;
1360 			idx = le32toh(api->api_index);
1361 			if (idx >= howmany(IWX_NUM_UCODE_TLV_API, 32)) {
1362 				err = EINVAL;
1363 				goto parse_out;
1364 			}
1365 			for (i = 0; i < 32; i++) {
1366 				if ((le32toh(api->api_flags) & (1 << i)) == 0)
1367 					continue;
1368 				setbit(sc->sc_ucode_api, i + (32 * idx));
1369 			}
1370 			break;
1371 		}
1372 
1373 		case IWX_UCODE_TLV_ENABLED_CAPABILITIES: {
1374 			struct iwx_ucode_capa *capa;
1375 			int idx, i;
1376 			if (tlv_len != sizeof(*capa)) {
1377 				err = EINVAL;
1378 				goto parse_out;
1379 			}
1380 			capa = (struct iwx_ucode_capa *)tlv_data;
1381 			idx = le32toh(capa->api_index);
1382 			if (idx >= howmany(IWX_NUM_UCODE_TLV_CAPA, 32)) {
1383 				goto parse_out;
1384 			}
1385 			for (i = 0; i < 32; i++) {
1386 				if ((le32toh(capa->api_capa) & (1 << i)) == 0)
1387 					continue;
1388 				setbit(sc->sc_enabled_capa, i + (32 * idx));
1389 			}
1390 			break;
1391 		}
1392 
1393 		case IWX_UCODE_TLV_SDIO_ADMA_ADDR:
1394 		case IWX_UCODE_TLV_FW_GSCAN_CAPA:
1395 			/* ignore, not used by current driver */
1396 			break;
1397 
1398 		case IWX_UCODE_TLV_SEC_RT_USNIFFER:
1399 			err = iwx_firmware_store_section(sc,
1400 			    IWX_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
1401 			    tlv_len);
1402 			if (err)
1403 				goto parse_out;
1404 			break;
1405 
1406 		case IWX_UCODE_TLV_PAGING:
1407 			if (tlv_len != sizeof(uint32_t)) {
1408 				err = EINVAL;
1409 				goto parse_out;
1410 			}
1411 			break;
1412 
1413 		case IWX_UCODE_TLV_N_SCAN_CHANNELS:
1414 			if (tlv_len != sizeof(uint32_t)) {
1415 				err = EINVAL;
1416 				goto parse_out;
1417 			}
1418 			sc->sc_capa_n_scan_channels =
1419 			  le32toh(*(uint32_t *)tlv_data);
1420 			if (sc->sc_capa_n_scan_channels > IWX_MAX_SCAN_CHANNELS) {
1421 				err = ERANGE;
1422 				goto parse_out;
1423 			}
1424 			break;
1425 
1426 		case IWX_UCODE_TLV_FW_VERSION:
1427 			if (tlv_len != sizeof(uint32_t) * 3) {
1428 				err = EINVAL;
1429 				goto parse_out;
1430 			}
1431 
1432 			iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1433 			    le32toh(((uint32_t *)tlv_data)[0]),
1434 			    le32toh(((uint32_t *)tlv_data)[1]),
1435 			    le32toh(((uint32_t *)tlv_data)[2]));
1436 			break;
1437 
1438 		case IWX_UCODE_TLV_FW_DBG_DEST: {
1439 			struct iwx_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
1440 
1441 			fw->dbg_dest_ver = (uint8_t *)tlv_data;
1442 			if (*fw->dbg_dest_ver != 0) {
1443 				err = EINVAL;
1444 				goto parse_out;
1445 			}
1446 
1447 			if (fw->dbg_dest_tlv_init)
1448 				break;
1449 			fw->dbg_dest_tlv_init = true;
1450 
1451 			dest_v1 = (void *)tlv_data;
1452 			fw->dbg_dest_tlv_v1 = dest_v1;
1453 			fw->n_dest_reg = tlv_len -
1454 			    offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops);
1455 			fw->n_dest_reg /= sizeof(dest_v1->reg_ops[0]);
1456 			DPRINTF(("%s: found debug dest; n_dest_reg=%d\n", __func__, fw->n_dest_reg));
1457 			break;
1458 		}
1459 
1460 		case IWX_UCODE_TLV_FW_DBG_CONF: {
1461 			struct iwx_fw_dbg_conf_tlv *conf = (void *)tlv_data;
1462 
1463 			if (!fw->dbg_dest_tlv_init ||
1464 			    conf->id >= nitems(fw->dbg_conf_tlv) ||
1465 			    fw->dbg_conf_tlv[conf->id] != NULL)
1466 				break;
1467 
1468 			DPRINTF(("Found debug configuration: %d\n", conf->id));
1469 			fw->dbg_conf_tlv[conf->id] = conf;
1470 			fw->dbg_conf_tlv_len[conf->id] = tlv_len;
1471 			break;
1472 		}
1473 
1474 		case IWX_UCODE_TLV_UMAC_DEBUG_ADDRS: {
1475 			struct iwx_umac_debug_addrs *dbg_ptrs =
1476 				(void *)tlv_data;
1477 
1478 			if (tlv_len != sizeof(*dbg_ptrs)) {
1479 				err = EINVAL;
1480 				goto parse_out;
1481 			}
1482 			if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1483 				break;
1484 			sc->sc_uc.uc_umac_error_event_table =
1485 				le32toh(dbg_ptrs->error_info_addr) &
1486 				~IWX_FW_ADDR_CACHE_CONTROL;
1487 			sc->sc_uc.error_event_table_tlv_status |=
1488 				IWX_ERROR_EVENT_TABLE_UMAC;
1489 			break;
1490 		}
1491 
1492 		case IWX_UCODE_TLV_LMAC_DEBUG_ADDRS: {
1493 			struct iwx_lmac_debug_addrs *dbg_ptrs =
1494 				(void *)tlv_data;
1495 
1496 			if (tlv_len != sizeof(*dbg_ptrs)) {
1497 				err = EINVAL;
1498 				goto parse_out;
1499 			}
1500 			if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1501 				break;
1502 			sc->sc_uc.uc_lmac_error_event_table[0] =
1503 				le32toh(dbg_ptrs->error_event_table_ptr) &
1504 				~IWX_FW_ADDR_CACHE_CONTROL;
1505 			sc->sc_uc.error_event_table_tlv_status |=
1506 				IWX_ERROR_EVENT_TABLE_LMAC1;
1507 			break;
1508 		}
1509 
1510 		case IWX_UCODE_TLV_FW_MEM_SEG:
1511 			break;
1512 
1513 		case IWX_UCODE_TLV_IML:
1514 			if (sc->sc_fw.iml != NULL) {
1515 				free(fw->iml, M_DEVBUF, fw->iml_len);
1516 				fw->iml_len = 0;
1517 			}
1518 			sc->sc_fw.iml = malloc(tlv_len, M_DEVBUF,
1519 			    M_WAIT | M_CANFAIL | M_ZERO);
1520 			if (sc->sc_fw.iml == NULL) {
1521 				err = ENOMEM;
1522 				goto parse_out;
1523 			}
1524 			memcpy(sc->sc_fw.iml, tlv_data, tlv_len);
1525 			sc->sc_fw.iml_len = tlv_len;
1526 			break;
1527 
1528 		case IWX_UCODE_TLV_CMD_VERSIONS:
1529 			if (tlv_len % sizeof(struct iwx_fw_cmd_version)) {
1530 				tlv_len /= sizeof(struct iwx_fw_cmd_version);
1531 				tlv_len *= sizeof(struct iwx_fw_cmd_version);
1532 			}
1533 			if (sc->n_cmd_versions != 0) {
1534 				err = EINVAL;
1535 				goto parse_out;
1536 			}
1537 			if (tlv_len > sizeof(sc->cmd_versions)) {
1538 				err = EINVAL;
1539 				goto parse_out;
1540 			}
1541 			memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
1542 			sc->n_cmd_versions = tlv_len / sizeof(struct iwx_fw_cmd_version);
1543 			break;
1544 
1545 		case IWX_UCODE_TLV_FW_RECOVERY_INFO:
1546 			break;
1547 
1548 		case IWX_UCODE_TLV_FW_FSEQ_VERSION:
1549 		case IWX_UCODE_TLV_PHY_INTEGRATION_VERSION:
1550 		case IWX_UCODE_TLV_FW_NUM_STATIONS:
1551 		case IWX_UCODE_TLV_FW_NUM_BEACONS:
1552 			break;
1553 
1554 		/* undocumented TLVs found in iwx-cc-a0-46 image */
1555 		case 58:
1556 		case 0x1000003:
1557 		case 0x1000004:
1558 			break;
1559 
1560 		/* undocumented TLVs found in iwx-cc-a0-48 image */
1561 		case 0x1000000:
1562 		case 0x1000002:
1563 			break;
1564 
1565 		case IWX_UCODE_TLV_TYPE_DEBUG_INFO:
1566 		case IWX_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
1567 		case IWX_UCODE_TLV_TYPE_HCMD:
1568 		case IWX_UCODE_TLV_TYPE_REGIONS:
1569 		case IWX_UCODE_TLV_TYPE_TRIGGERS:
1570 		case IWX_UCODE_TLV_TYPE_CONF_SET:
1571 		case IWX_UCODE_TLV_SEC_TABLE_ADDR:
1572 		case IWX_UCODE_TLV_D3_KEK_KCK_ADDR:
1573 		case IWX_UCODE_TLV_CURRENT_PC:
1574 			break;
1575 
1576 		/* undocumented TLV found in iwx-cc-a0-67 image */
1577 		case 0x100000b:
1578 			break;
1579 
1580 		/* undocumented TLV found in iwx-ty-a0-gf-a0-73 image */
1581 		case 0x101:
1582 			break;
1583 
1584 		/* undocumented TLV found in iwx-ty-a0-gf-a0-77 image */
1585 		case 0x100000c:
1586 			break;
1587 
1588 		default:
1589 			err = EINVAL;
1590 			goto parse_out;
1591 		}
1592 
1593 		/*
1594 		 * Check for size_t overflow and ignore missing padding at
1595 		 * end of firmware file.
1596 		 */
1597 		if (roundup(tlv_len, 4) > len)
1598 			break;
1599 
1600 		len -= roundup(tlv_len, 4);
1601 		data += roundup(tlv_len, 4);
1602 	}
1603 
1604 	KASSERT(err == 0);
1605 
1606  parse_out:
1607 	if (err) {
1608 		printf("%s: firmware parse error %d, "
1609 		    "section type %d\n", DEVNAME(sc), err, tlv_type);
1610 	}
1611 
1612  out:
1613 	if (err) {
1614 		fw->fw_status = IWX_FW_STATUS_NONE;
1615 		if (fw->fw_rawdata != NULL)
1616 			iwx_fw_info_free(fw);
1617 	} else
1618 		fw->fw_status = IWX_FW_STATUS_DONE;
1619 	wakeup(&sc->sc_fw);
1620 
1621 	return err;
1622 }
1623 
1624 uint32_t
1625 iwx_prph_addr_mask(struct iwx_softc *sc)
1626 {
1627 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1628 		return 0x00ffffff;
1629 	else
1630 		return 0x000fffff;
1631 }
1632 
1633 uint32_t
1634 iwx_read_prph_unlocked(struct iwx_softc *sc, uint32_t addr)
1635 {
1636 	uint32_t mask = iwx_prph_addr_mask(sc);
1637 	IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_RADDR, ((addr & mask) | (3 << 24)));
1638 	IWX_BARRIER_READ_WRITE(sc);
1639 	return IWX_READ(sc, IWX_HBUS_TARG_PRPH_RDAT);
1640 }
1641 
1642 uint32_t
1643 iwx_read_prph(struct iwx_softc *sc, uint32_t addr)
1644 {
1645 	iwx_nic_assert_locked(sc);
1646 	return iwx_read_prph_unlocked(sc, addr);
1647 }
1648 
1649 void
1650 iwx_write_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1651 {
1652 	uint32_t mask = iwx_prph_addr_mask(sc);
1653 	IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WADDR, ((addr & mask) | (3 << 24)));
1654 	IWX_BARRIER_WRITE(sc);
1655 	IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WDAT, val);
1656 }
1657 
1658 void
1659 iwx_write_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1660 {
1661 	iwx_nic_assert_locked(sc);
1662 	iwx_write_prph_unlocked(sc, addr, val);
1663 }
1664 
1665 void
1666 iwx_write_prph64(struct iwx_softc *sc, uint64_t addr, uint64_t val)
1667 {
1668 	iwx_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
1669 	iwx_write_prph(sc, (uint32_t)addr + 4, val >> 32);
1670 }
1671 
1672 uint32_t
1673 iwx_read_umac_prph_unlocked(struct iwx_softc *sc, uint32_t addr)
1674 {
1675 	return iwx_read_prph_unlocked(sc, addr + sc->sc_umac_prph_offset);
1676 }
1677 
1678 uint32_t
1679 iwx_read_umac_prph(struct iwx_softc *sc, uint32_t addr)
1680 {
1681 	return iwx_read_prph(sc, addr + sc->sc_umac_prph_offset);
1682 }
1683 
1684 void
1685 iwx_write_umac_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1686 {
1687 	iwx_write_prph_unlocked(sc, addr + sc->sc_umac_prph_offset, val);
1688 }
1689 
1690 void
1691 iwx_write_umac_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1692 {
1693 	iwx_write_prph(sc, addr + sc->sc_umac_prph_offset, val);
1694 }
1695 
1696 int
1697 iwx_read_mem(struct iwx_softc *sc, uint32_t addr, void *buf, int dwords)
1698 {
1699 	int offs, err = 0;
1700 	uint32_t *vals = buf;
1701 
1702 	if (iwx_nic_lock(sc)) {
1703 		IWX_WRITE(sc, IWX_HBUS_TARG_MEM_RADDR, addr);
1704 		for (offs = 0; offs < dwords; offs++)
1705 			vals[offs] = le32toh(IWX_READ(sc, IWX_HBUS_TARG_MEM_RDAT));
1706 		iwx_nic_unlock(sc);
1707 	} else {
1708 		err = EBUSY;
1709 	}
1710 	return err;
1711 }
1712 
1713 int
1714 iwx_write_mem(struct iwx_softc *sc, uint32_t addr, const void *buf, int dwords)
1715 {
1716 	int offs;
1717 	const uint32_t *vals = buf;
1718 
1719 	if (iwx_nic_lock(sc)) {
1720 		IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WADDR, addr);
1721 		/* WADDR auto-increments */
1722 		for (offs = 0; offs < dwords; offs++) {
1723 			uint32_t val = vals ? vals[offs] : 0;
1724 			IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WDAT, val);
1725 		}
1726 		iwx_nic_unlock(sc);
1727 	} else {
1728 		return EBUSY;
1729 	}
1730 	return 0;
1731 }
1732 
1733 int
1734 iwx_write_mem32(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1735 {
1736 	return iwx_write_mem(sc, addr, &val, 1);
1737 }
1738 
1739 int
1740 iwx_poll_bit(struct iwx_softc *sc, int reg, uint32_t bits, uint32_t mask,
1741     int timo)
1742 {
1743 	for (;;) {
1744 		if ((IWX_READ(sc, reg) & mask) == (bits & mask)) {
1745 			return 1;
1746 		}
1747 		if (timo < 10) {
1748 			return 0;
1749 		}
1750 		timo -= 10;
1751 		DELAY(10);
1752 	}
1753 }
1754 
1755 int
1756 iwx_nic_lock(struct iwx_softc *sc)
1757 {
1758 	if (sc->sc_nic_locks > 0) {
1759 		iwx_nic_assert_locked(sc);
1760 		sc->sc_nic_locks++;
1761 		return 1; /* already locked */
1762 	}
1763 
1764 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
1765 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1766 
1767 	DELAY(2);
1768 
1769 	if (iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
1770 	    IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1771 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1772 	     | IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1773 		sc->sc_nic_locks++;
1774 		return 1;
1775 	}
1776 
1777 	printf("%s: acquiring device failed\n", DEVNAME(sc));
1778 	return 0;
1779 }
1780 
1781 void
1782 iwx_nic_assert_locked(struct iwx_softc *sc)
1783 {
1784 	if (sc->sc_nic_locks <= 0)
1785 		panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1786 }
1787 
1788 void
1789 iwx_nic_unlock(struct iwx_softc *sc)
1790 {
1791 	if (sc->sc_nic_locks > 0) {
1792 		if (--sc->sc_nic_locks == 0)
1793 			IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
1794 			    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1795 	} else
1796 		printf("%s: NIC already unlocked\n", DEVNAME(sc));
1797 }
1798 
1799 int
1800 iwx_set_bits_mask_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits,
1801     uint32_t mask)
1802 {
1803 	uint32_t val;
1804 
1805 	if (iwx_nic_lock(sc)) {
1806 		val = iwx_read_prph(sc, reg) & mask;
1807 		val |= bits;
1808 		iwx_write_prph(sc, reg, val);
1809 		iwx_nic_unlock(sc);
1810 		return 0;
1811 	}
1812 	return EBUSY;
1813 }
1814 
1815 int
1816 iwx_set_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1817 {
1818 	return iwx_set_bits_mask_prph(sc, reg, bits, ~0);
1819 }
1820 
1821 int
1822 iwx_clear_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1823 {
1824 	return iwx_set_bits_mask_prph(sc, reg, 0, ~bits);
1825 }
1826 
1827 int
1828 iwx_dma_contig_alloc(bus_dma_tag_t tag, struct iwx_dma_info *dma,
1829     bus_size_t size, bus_size_t alignment)
1830 {
1831 	int nsegs, err;
1832 	caddr_t va;
1833 
1834 	dma->tag = tag;
1835 	dma->size = size;
1836 
1837 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1838 	    &dma->map);
1839 	if (err)
1840 		goto fail;
1841 
1842 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1843 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1844 	if (err)
1845 		goto fail;
1846 
1847 	if (nsegs > 1) {
1848 		err = ENOMEM;
1849 		goto fail;
1850 	}
1851 
1852 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
1853 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1854 	if (err)
1855 		goto fail;
1856 	dma->vaddr = va;
1857 
1858 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1859 	    BUS_DMA_NOWAIT);
1860 	if (err)
1861 		goto fail;
1862 
1863 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1864 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1865 
1866 	return 0;
1867 
1868 fail:	iwx_dma_contig_free(dma);
1869 	return err;
1870 }
1871 
1872 void
1873 iwx_dma_contig_free(struct iwx_dma_info *dma)
1874 {
1875 	if (dma->map != NULL) {
1876 		if (dma->vaddr != NULL) {
1877 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1878 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1879 			bus_dmamap_unload(dma->tag, dma->map);
1880 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1881 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1882 			dma->vaddr = NULL;
1883 		}
1884 		bus_dmamap_destroy(dma->tag, dma->map);
1885 		dma->map = NULL;
1886 	}
1887 }
1888 
1889 int
1890 iwx_alloc_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1891 {
1892 	bus_size_t size;
1893 	int i, err;
1894 
1895 	ring->cur = 0;
1896 
1897 	/* Allocate RX descriptors (256-byte aligned). */
1898 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1899 		size = sizeof(struct iwx_rx_transfer_desc);
1900 	else
1901 		size = sizeof(uint64_t);
1902 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma,
1903 	    size * IWX_RX_MQ_RING_COUNT, 256);
1904 	if (err) {
1905 		printf("%s: could not allocate RX ring DMA memory\n",
1906 		    DEVNAME(sc));
1907 		goto fail;
1908 	}
1909 	ring->desc = ring->free_desc_dma.vaddr;
1910 
1911 	/* Allocate RX status area (16-byte aligned). */
1912 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1913 		size = sizeof(uint16_t);
1914 	else
1915 		size = sizeof(*ring->stat);
1916 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, size, 16);
1917 	if (err) {
1918 		printf("%s: could not allocate RX status DMA memory\n",
1919 		    DEVNAME(sc));
1920 		goto fail;
1921 	}
1922 	ring->stat = ring->stat_dma.vaddr;
1923 
1924 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1925 		size = sizeof(struct iwx_rx_completion_desc);
1926 	else
1927 		size = sizeof(uint32_t);
1928 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1929 	    size * IWX_RX_MQ_RING_COUNT, 256);
1930 	if (err) {
1931 		printf("%s: could not allocate RX ring DMA memory\n",
1932 		    DEVNAME(sc));
1933 		goto fail;
1934 	}
1935 
1936 	for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
1937 		struct iwx_rx_data *data = &ring->data[i];
1938 
1939 		memset(data, 0, sizeof(*data));
1940 		err = bus_dmamap_create(sc->sc_dmat, IWX_RBUF_SIZE, 1,
1941 		    IWX_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1942 		    &data->map);
1943 		if (err) {
1944 			printf("%s: could not create RX buf DMA map\n",
1945 			    DEVNAME(sc));
1946 			goto fail;
1947 		}
1948 
1949 		err = iwx_rx_addbuf(sc, IWX_RBUF_SIZE, i);
1950 		if (err)
1951 			goto fail;
1952 	}
1953 	return 0;
1954 
1955 fail:	iwx_free_rx_ring(sc, ring);
1956 	return err;
1957 }
1958 
1959 void
1960 iwx_disable_rx_dma(struct iwx_softc *sc)
1961 {
1962 	int ntries;
1963 
1964 	if (iwx_nic_lock(sc)) {
1965 		if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1966 			iwx_write_umac_prph(sc, IWX_RFH_RXF_DMA_CFG_GEN3, 0);
1967 		else
1968 			iwx_write_prph(sc, IWX_RFH_RXF_DMA_CFG, 0);
1969 		for (ntries = 0; ntries < 1000; ntries++) {
1970 			if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
1971 				if (iwx_read_umac_prph(sc,
1972 				    IWX_RFH_GEN_STATUS_GEN3) & IWX_RXF_DMA_IDLE)
1973 					break;
1974 			} else {
1975 				if (iwx_read_prph(sc, IWX_RFH_GEN_STATUS) &
1976 				    IWX_RXF_DMA_IDLE)
1977 					break;
1978 			}
1979 			DELAY(10);
1980 		}
1981 		iwx_nic_unlock(sc);
1982 	}
1983 }
1984 
1985 void
1986 iwx_reset_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1987 {
1988 	ring->cur = 0;
1989 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1990 	    ring->stat_dma.size, BUS_DMASYNC_PREWRITE);
1991 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
1992 		uint16_t *status = sc->rxq.stat_dma.vaddr;
1993 		*status = 0;
1994 	} else
1995 		memset(ring->stat, 0, sizeof(*ring->stat));
1996 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1997 	    ring->stat_dma.size, BUS_DMASYNC_POSTWRITE);
1998 
1999 }
2000 
2001 void
2002 iwx_free_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
2003 {
2004 	int i;
2005 
2006 	iwx_dma_contig_free(&ring->free_desc_dma);
2007 	iwx_dma_contig_free(&ring->stat_dma);
2008 	iwx_dma_contig_free(&ring->used_desc_dma);
2009 
2010 	for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
2011 		struct iwx_rx_data *data = &ring->data[i];
2012 
2013 		if (data->m != NULL) {
2014 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
2015 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2016 			bus_dmamap_unload(sc->sc_dmat, data->map);
2017 			m_freem(data->m);
2018 			data->m = NULL;
2019 		}
2020 		if (data->map != NULL)
2021 			bus_dmamap_destroy(sc->sc_dmat, data->map);
2022 	}
2023 }
2024 
2025 int
2026 iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid)
2027 {
2028 	bus_addr_t paddr;
2029 	bus_size_t size;
2030 	int i, err;
2031 	size_t bc_tbl_size;
2032 	bus_size_t bc_align;
2033 
2034 	ring->qid = qid;
2035 	ring->queued = 0;
2036 	ring->cur = 0;
2037 	ring->cur_hw = 0;
2038 	ring->tail = 0;
2039 	ring->tail_hw = 0;
2040 
2041 	/* Allocate TX descriptors (256-byte aligned). */
2042 	size = IWX_TX_RING_COUNT * sizeof(struct iwx_tfh_tfd);
2043 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
2044 	if (err) {
2045 		printf("%s: could not allocate TX ring DMA memory\n",
2046 		    DEVNAME(sc));
2047 		goto fail;
2048 	}
2049 	ring->desc = ring->desc_dma.vaddr;
2050 
2051 	/*
2052 	 * The hardware supports up to 512 Tx rings which is more
2053 	 * than we currently need.
2054 	 *
2055 	 * In DQA mode we use 1 command queue + 1 default queue for
2056 	 * management, control, and non-QoS data frames.
2057 	 * The command is queue sc->txq[0], our default queue is sc->txq[1].
2058 	 *
2059 	 * Tx aggregation requires additional queues, one queue per TID for
2060 	 * which aggregation is enabled. We map TID 0-7 to sc->txq[2:9].
2061 	 * Firmware may assign its own internal IDs for these queues
2062 	 * depending on which TID gets aggregation enabled first.
2063 	 * The driver maintains a table mapping driver-side queue IDs
2064 	 * to firmware-side queue IDs.
2065 	 */
2066 
2067 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2068 		bc_tbl_size = sizeof(struct iwx_gen3_bc_tbl_entry) *
2069 		    IWX_TFD_QUEUE_BC_SIZE_GEN3_AX210;
2070 		bc_align = 128;
2071 	} else {
2072 		bc_tbl_size = sizeof(struct iwx_agn_scd_bc_tbl);
2073 		bc_align = 64;
2074 	}
2075 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl, bc_tbl_size,
2076 	    bc_align);
2077 	if (err) {
2078 		printf("%s: could not allocate byte count table DMA memory\n",
2079 		    DEVNAME(sc));
2080 		goto fail;
2081 	}
2082 
2083 	size = IWX_TX_RING_COUNT * sizeof(struct iwx_device_cmd);
2084 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size,
2085 	    IWX_FIRST_TB_SIZE_ALIGN);
2086 	if (err) {
2087 		printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc));
2088 		goto fail;
2089 	}
2090 	ring->cmd = ring->cmd_dma.vaddr;
2091 
2092 	paddr = ring->cmd_dma.paddr;
2093 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2094 		struct iwx_tx_data *data = &ring->data[i];
2095 		size_t mapsize;
2096 
2097 		data->cmd_paddr = paddr;
2098 		paddr += sizeof(struct iwx_device_cmd);
2099 
2100 		/* FW commands may require more mapped space than packets. */
2101 		if (qid == IWX_DQA_CMD_QUEUE)
2102 			mapsize = (sizeof(struct iwx_cmd_header) +
2103 			    IWX_MAX_CMD_PAYLOAD_SIZE);
2104 		else
2105 			mapsize = MCLBYTES;
2106 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
2107 		    IWX_TFH_NUM_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,
2108 		    &data->map);
2109 		if (err) {
2110 			printf("%s: could not create TX buf DMA map\n",
2111 			    DEVNAME(sc));
2112 			goto fail;
2113 		}
2114 	}
2115 	KASSERT(paddr == ring->cmd_dma.paddr + size);
2116 	return 0;
2117 
2118 fail:	iwx_free_tx_ring(sc, ring);
2119 	return err;
2120 }
2121 
2122 void
2123 iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
2124 {
2125 	int i;
2126 
2127 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2128 		struct iwx_tx_data *data = &ring->data[i];
2129 
2130 		if (data->m != NULL) {
2131 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
2132 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2133 			bus_dmamap_unload(sc->sc_dmat, data->map);
2134 			m_freem(data->m);
2135 			data->m = NULL;
2136 		}
2137 	}
2138 
2139 	/* Clear byte count table. */
2140 	memset(ring->bc_tbl.vaddr, 0, ring->bc_tbl.size);
2141 
2142 	/* Clear TX descriptors. */
2143 	memset(ring->desc, 0, ring->desc_dma.size);
2144 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
2145 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
2146 	sc->qfullmsk &= ~(1 << ring->qid);
2147 	sc->qenablemsk &= ~(1 << ring->qid);
2148 	for (i = 0; i < nitems(sc->aggqid); i++) {
2149 		if (sc->aggqid[i] == ring->qid) {
2150 			sc->aggqid[i] = 0;
2151 			break;
2152 		}
2153 	}
2154 	ring->queued = 0;
2155 	ring->cur = 0;
2156 	ring->cur_hw = 0;
2157 	ring->tail = 0;
2158 	ring->tail_hw = 0;
2159 	ring->tid = 0;
2160 }
2161 
2162 void
2163 iwx_free_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
2164 {
2165 	int i;
2166 
2167 	iwx_dma_contig_free(&ring->desc_dma);
2168 	iwx_dma_contig_free(&ring->cmd_dma);
2169 	iwx_dma_contig_free(&ring->bc_tbl);
2170 
2171 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2172 		struct iwx_tx_data *data = &ring->data[i];
2173 
2174 		if (data->m != NULL) {
2175 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
2176 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2177 			bus_dmamap_unload(sc->sc_dmat, data->map);
2178 			m_freem(data->m);
2179 			data->m = NULL;
2180 		}
2181 		if (data->map != NULL)
2182 			bus_dmamap_destroy(sc->sc_dmat, data->map);
2183 	}
2184 }
2185 
2186 void
2187 iwx_enable_rfkill_int(struct iwx_softc *sc)
2188 {
2189 	if (!sc->sc_msix) {
2190 		sc->sc_intmask = IWX_CSR_INT_BIT_RF_KILL;
2191 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2192 	} else {
2193 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2194 		    sc->sc_fh_init_mask);
2195 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2196 		    ~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL);
2197 		sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL;
2198 	}
2199 
2200 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2201 	    IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
2202 }
2203 
2204 int
2205 iwx_check_rfkill(struct iwx_softc *sc)
2206 {
2207 	uint32_t v;
2208 	int rv;
2209 
2210 	/*
2211 	 * "documentation" is not really helpful here:
2212 	 *  27:	HW_RF_KILL_SW
2213 	 *	Indicates state of (platform's) hardware RF-Kill switch
2214 	 *
2215 	 * But apparently when it's off, it's on ...
2216 	 */
2217 	v = IWX_READ(sc, IWX_CSR_GP_CNTRL);
2218 	rv = (v & IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
2219 	if (rv) {
2220 		sc->sc_flags |= IWX_FLAG_RFKILL;
2221 	} else {
2222 		sc->sc_flags &= ~IWX_FLAG_RFKILL;
2223 	}
2224 
2225 	return rv;
2226 }
2227 
2228 void
2229 iwx_enable_interrupts(struct iwx_softc *sc)
2230 {
2231 	if (!sc->sc_msix) {
2232 		sc->sc_intmask = IWX_CSR_INI_SET_MASK;
2233 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2234 	} else {
2235 		/*
2236 		 * fh/hw_mask keeps all the unmasked causes.
2237 		 * Unlike msi, in msix cause is enabled when it is unset.
2238 		 */
2239 		sc->sc_hw_mask = sc->sc_hw_init_mask;
2240 		sc->sc_fh_mask = sc->sc_fh_init_mask;
2241 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2242 		    ~sc->sc_fh_mask);
2243 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2244 		    ~sc->sc_hw_mask);
2245 	}
2246 }
2247 
2248 void
2249 iwx_enable_fwload_interrupt(struct iwx_softc *sc)
2250 {
2251 	if (!sc->sc_msix) {
2252 		sc->sc_intmask = IWX_CSR_INT_BIT_ALIVE | IWX_CSR_INT_BIT_FH_RX;
2253 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2254 	} else {
2255 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2256 		    ~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE);
2257 		sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_ALIVE;
2258 		/*
2259 		 * Leave all the FH causes enabled to get the ALIVE
2260 		 * notification.
2261 		 */
2262 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2263 		    ~sc->sc_fh_init_mask);
2264 		sc->sc_fh_mask = sc->sc_fh_init_mask;
2265 	}
2266 }
2267 
2268 void
2269 iwx_restore_interrupts(struct iwx_softc *sc)
2270 {
2271 	IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2272 }
2273 
2274 void
2275 iwx_disable_interrupts(struct iwx_softc *sc)
2276 {
2277 	if (!sc->sc_msix) {
2278 		IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
2279 
2280 		/* acknowledge all interrupts */
2281 		IWX_WRITE(sc, IWX_CSR_INT, ~0);
2282 		IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
2283 	} else {
2284 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2285 		    sc->sc_fh_init_mask);
2286 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2287 		    sc->sc_hw_init_mask);
2288 	}
2289 }
2290 
2291 void
2292 iwx_ict_reset(struct iwx_softc *sc)
2293 {
2294 	iwx_disable_interrupts(sc);
2295 
2296 	memset(sc->ict_dma.vaddr, 0, IWX_ICT_SIZE);
2297 	sc->ict_cur = 0;
2298 
2299 	/* Set physical address of ICT (4KB aligned). */
2300 	IWX_WRITE(sc, IWX_CSR_DRAM_INT_TBL_REG,
2301 	    IWX_CSR_DRAM_INT_TBL_ENABLE
2302 	    | IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK
2303 	    | IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER
2304 	    | sc->ict_dma.paddr >> IWX_ICT_PADDR_SHIFT);
2305 
2306 	/* Switch to ICT interrupt mode in driver. */
2307 	sc->sc_flags |= IWX_FLAG_USE_ICT;
2308 
2309 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
2310 	iwx_enable_interrupts(sc);
2311 }
2312 
2313 #define IWX_HW_READY_TIMEOUT 50
2314 int
2315 iwx_set_hw_ready(struct iwx_softc *sc)
2316 {
2317 	int ready;
2318 
2319 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2320 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
2321 
2322 	ready = iwx_poll_bit(sc, IWX_CSR_HW_IF_CONFIG_REG,
2323 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2324 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2325 	    IWX_HW_READY_TIMEOUT);
2326 	if (ready)
2327 		IWX_SETBITS(sc, IWX_CSR_MBOX_SET_REG,
2328 		    IWX_CSR_MBOX_SET_REG_OS_ALIVE);
2329 
2330 	return ready;
2331 }
2332 #undef IWX_HW_READY_TIMEOUT
2333 
2334 int
2335 iwx_prepare_card_hw(struct iwx_softc *sc)
2336 {
2337 	int t = 0;
2338 	int ntries;
2339 
2340 	if (iwx_set_hw_ready(sc))
2341 		return 0;
2342 
2343 	IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2344 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2345 	DELAY(1000);
2346 
2347 	for (ntries = 0; ntries < 10; ntries++) {
2348 		/* If HW is not ready, prepare the conditions to check again */
2349 		IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2350 		    IWX_CSR_HW_IF_CONFIG_REG_PREPARE);
2351 
2352 		do {
2353 			if (iwx_set_hw_ready(sc))
2354 				return 0;
2355 			DELAY(200);
2356 			t += 200;
2357 		} while (t < 150000);
2358 		DELAY(25000);
2359 	}
2360 
2361 	return ETIMEDOUT;
2362 }
2363 
2364 int
2365 iwx_force_power_gating(struct iwx_softc *sc)
2366 {
2367 	int err;
2368 
2369 	err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2370 	    IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2371 	if (err)
2372 		return err;
2373 	DELAY(20);
2374 	err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2375 	    IWX_HPM_HIPM_GEN_CFG_CR_PG_EN |
2376 	    IWX_HPM_HIPM_GEN_CFG_CR_SLP_EN);
2377 	if (err)
2378 		return err;
2379 	DELAY(20);
2380 	err = iwx_clear_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2381 	    IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2382 	return err;
2383 }
2384 
2385 void
2386 iwx_apm_config(struct iwx_softc *sc)
2387 {
2388 	pcireg_t lctl, cap;
2389 
2390 	/*
2391 	 * L0S states have been found to be unstable with our devices
2392 	 * and in newer hardware they are not officially supported at
2393 	 * all, so we must always set the L0S_DISABLED bit.
2394 	 */
2395 	IWX_SETBITS(sc, IWX_CSR_GIO_REG, IWX_CSR_GIO_REG_VAL_L0S_DISABLED);
2396 
2397 	lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
2398 	    sc->sc_cap_off + PCI_PCIE_LCSR);
2399 	sc->sc_pm_support = !(lctl & PCI_PCIE_LCSR_ASPM_L0S);
2400 	cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
2401 	    sc->sc_cap_off + PCI_PCIE_DCSR2);
2402 	sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
2403 	DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
2404 	    DEVNAME(sc),
2405 	    (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
2406 	    sc->sc_ltr_enabled ? "En" : "Dis"));
2407 }
2408 
2409 /*
2410  * Start up NIC's basic functionality after it has been reset
2411  * e.g. after platform boot or shutdown.
2412  * NOTE:  This does not load uCode nor start the embedded processor
2413  */
2414 int
2415 iwx_apm_init(struct iwx_softc *sc)
2416 {
2417 	int err = 0;
2418 
2419 	/*
2420 	 * Disable L0s without affecting L1;
2421 	 *  don't wait for ICH L0s (ICH bug W/A)
2422 	 */
2423 	IWX_SETBITS(sc, IWX_CSR_GIO_CHICKEN_BITS,
2424 	    IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
2425 
2426 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
2427 	IWX_SETBITS(sc, IWX_CSR_DBG_HPET_MEM_REG, IWX_CSR_DBG_HPET_MEM_REG_VAL);
2428 
2429 	/*
2430 	 * Enable HAP INTA (interrupt from management bus) to
2431 	 * wake device's PCI Express link L1a -> L0s
2432 	 */
2433 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2434 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
2435 
2436 	iwx_apm_config(sc);
2437 
2438 	/*
2439 	 * Set "initialization complete" bit to move adapter from
2440 	 * D0U* --> D0A* (powered-up active) state.
2441 	 */
2442 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2443 
2444 	/*
2445 	 * Wait for clock stabilization; once stabilized, access to
2446 	 * device-internal resources is supported, e.g. iwx_write_prph()
2447 	 * and accesses to uCode SRAM.
2448 	 */
2449 	if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2450 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2451 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2452 		printf("%s: timeout waiting for clock stabilization\n",
2453 		    DEVNAME(sc));
2454 		err = ETIMEDOUT;
2455 		goto out;
2456 	}
2457  out:
2458 	if (err)
2459 		printf("%s: apm init error %d\n", DEVNAME(sc), err);
2460 	return err;
2461 }
2462 
2463 void
2464 iwx_apm_stop(struct iwx_softc *sc)
2465 {
2466 	IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2467 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2468 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2469 	    IWX_CSR_HW_IF_CONFIG_REG_PREPARE |
2470 	    IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
2471 	DELAY(1000);
2472 	IWX_CLRBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2473 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2474 	DELAY(5000);
2475 
2476 	/* stop device's busmaster DMA activity */
2477 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_STOP_MASTER);
2478 
2479 	if (!iwx_poll_bit(sc, IWX_CSR_RESET,
2480 	    IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED,
2481 	    IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
2482 		printf("%s: timeout waiting for master\n", DEVNAME(sc));
2483 
2484 	/*
2485 	 * Clear "initialization complete" bit to move adapter from
2486 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
2487 	 */
2488 	IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2489 	    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2490 }
2491 
2492 void
2493 iwx_init_msix_hw(struct iwx_softc *sc)
2494 {
2495 	iwx_conf_msix_hw(sc, 0);
2496 
2497 	if (!sc->sc_msix)
2498 		return;
2499 
2500 	sc->sc_fh_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_FH_INT_MASK_AD);
2501 	sc->sc_fh_mask = sc->sc_fh_init_mask;
2502 	sc->sc_hw_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_HW_INT_MASK_AD);
2503 	sc->sc_hw_mask = sc->sc_hw_init_mask;
2504 }
2505 
2506 void
2507 iwx_conf_msix_hw(struct iwx_softc *sc, int stopped)
2508 {
2509 	int vector = 0;
2510 
2511 	if (!sc->sc_msix) {
2512 		/* Newer chips default to MSIX. */
2513 		if (!stopped && iwx_nic_lock(sc)) {
2514 			iwx_write_umac_prph(sc, IWX_UREG_CHICK,
2515 			    IWX_UREG_CHICK_MSI_ENABLE);
2516 			iwx_nic_unlock(sc);
2517 		}
2518 		return;
2519 	}
2520 
2521 	if (!stopped && iwx_nic_lock(sc)) {
2522 		iwx_write_umac_prph(sc, IWX_UREG_CHICK,
2523 		    IWX_UREG_CHICK_MSIX_ENABLE);
2524 		iwx_nic_unlock(sc);
2525 	}
2526 
2527 	/* Disable all interrupts */
2528 	IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, ~0);
2529 	IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, ~0);
2530 
2531 	/* Map fallback-queue (command/mgmt) to a single vector */
2532 	IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(0),
2533 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2534 	/* Map RSS queue (data) to the same vector */
2535 	IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(1),
2536 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2537 
2538 	/* Enable the RX queues cause interrupts */
2539 	IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2540 	    IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1);
2541 
2542 	/* Map non-RX causes to the same vector */
2543 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
2544 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2545 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
2546 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2547 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_S2D),
2548 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2549 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_FH_ERR),
2550 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2551 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_ALIVE),
2552 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2553 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_WAKEUP),
2554 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2555 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RESET_DONE),
2556 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2557 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_CT_KILL),
2558 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2559 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RF_KILL),
2560 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2561 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_PERIODIC),
2562 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2563 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SW_ERR),
2564 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2565 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SCD),
2566 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2567 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_FH_TX),
2568 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2569 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HW_ERR),
2570 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2571 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HAP),
2572 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2573 
2574 	/* Enable non-RX causes interrupts */
2575 	IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2576 	    IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
2577 	    IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
2578 	    IWX_MSIX_FH_INT_CAUSES_S2D |
2579 	    IWX_MSIX_FH_INT_CAUSES_FH_ERR);
2580 	IWX_CLRBITS(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2581 	    IWX_MSIX_HW_INT_CAUSES_REG_ALIVE |
2582 	    IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP |
2583 	    IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE |
2584 	    IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL |
2585 	    IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL |
2586 	    IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC |
2587 	    IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR |
2588 	    IWX_MSIX_HW_INT_CAUSES_REG_SCD |
2589 	    IWX_MSIX_HW_INT_CAUSES_REG_FH_TX |
2590 	    IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR |
2591 	    IWX_MSIX_HW_INT_CAUSES_REG_HAP);
2592 }
2593 
2594 int
2595 iwx_clear_persistence_bit(struct iwx_softc *sc)
2596 {
2597 	uint32_t hpm, wprot;
2598 
2599 	hpm = iwx_read_prph_unlocked(sc, IWX_HPM_DEBUG);
2600 	if (hpm != 0xa5a5a5a0 && (hpm & IWX_PERSISTENCE_BIT)) {
2601 		wprot = iwx_read_prph_unlocked(sc, IWX_PREG_PRPH_WPROT_22000);
2602 		if (wprot & IWX_PREG_WFPM_ACCESS) {
2603 			printf("%s: cannot clear persistence bit\n",
2604 			    DEVNAME(sc));
2605 			return EPERM;
2606 		}
2607 		iwx_write_prph_unlocked(sc, IWX_HPM_DEBUG,
2608 		    hpm & ~IWX_PERSISTENCE_BIT);
2609 	}
2610 
2611 	return 0;
2612 }
2613 
2614 int
2615 iwx_start_hw(struct iwx_softc *sc)
2616 {
2617 	int err;
2618 
2619 	err = iwx_prepare_card_hw(sc);
2620 	if (err)
2621 		return err;
2622 
2623 	if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
2624 		err = iwx_clear_persistence_bit(sc);
2625 		if (err)
2626 			return err;
2627 	}
2628 
2629 	/* Reset the entire device */
2630 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2631 	DELAY(5000);
2632 
2633 	if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000 &&
2634 	    sc->sc_integrated) {
2635 		IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2636 		    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2637 		DELAY(20);
2638 		if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2639 		    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2640 		    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2641 			printf("%s: timeout waiting for clock stabilization\n",
2642 			    DEVNAME(sc));
2643 			return ETIMEDOUT;
2644 		}
2645 
2646 		err = iwx_force_power_gating(sc);
2647 		if (err)
2648 			return err;
2649 
2650 		/* Reset the entire device */
2651 		IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2652 		DELAY(5000);
2653 	}
2654 
2655 	err = iwx_apm_init(sc);
2656 	if (err)
2657 		return err;
2658 
2659 	iwx_init_msix_hw(sc);
2660 
2661 	iwx_enable_rfkill_int(sc);
2662 	iwx_check_rfkill(sc);
2663 
2664 	return 0;
2665 }
2666 
2667 void
2668 iwx_stop_device(struct iwx_softc *sc)
2669 {
2670 	struct ieee80211com *ic = &sc->sc_ic;
2671 	struct ieee80211_node *ni = ic->ic_bss;
2672 	int i;
2673 
2674 	iwx_disable_interrupts(sc);
2675 	sc->sc_flags &= ~IWX_FLAG_USE_ICT;
2676 
2677 	iwx_disable_rx_dma(sc);
2678 	iwx_reset_rx_ring(sc, &sc->rxq);
2679 	for (i = 0; i < nitems(sc->txq); i++)
2680 		iwx_reset_tx_ring(sc, &sc->txq[i]);
2681 	for (i = 0; i < IEEE80211_NUM_TID; i++) {
2682 		struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
2683 		if (ba->ba_state != IEEE80211_BA_AGREED)
2684 			continue;
2685 		ieee80211_delba_request(ic, ni, 0, 1, i);
2686 	}
2687 
2688 	/* Make sure (redundant) we've released our request to stay awake */
2689 	IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2690 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2691 	if (sc->sc_nic_locks > 0)
2692 		printf("%s: %d active NIC locks forcefully cleared\n",
2693 		    DEVNAME(sc), sc->sc_nic_locks);
2694 	sc->sc_nic_locks = 0;
2695 
2696 	/* Stop the device, and put it in low power state */
2697 	iwx_apm_stop(sc);
2698 
2699 	/* Reset the on-board processor. */
2700 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2701 	DELAY(5000);
2702 
2703 	/*
2704 	 * Upon stop, the IVAR table gets erased, so msi-x won't
2705 	 * work. This causes a bug in RF-KILL flows, since the interrupt
2706 	 * that enables radio won't fire on the correct irq, and the
2707 	 * driver won't be able to handle the interrupt.
2708 	 * Configure the IVAR table again after reset.
2709 	 */
2710 	iwx_conf_msix_hw(sc, 1);
2711 
2712 	/*
2713 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2714 	 * Clear the interrupt again.
2715 	 */
2716 	iwx_disable_interrupts(sc);
2717 
2718 	/* Even though we stop the HW we still want the RF kill interrupt. */
2719 	iwx_enable_rfkill_int(sc);
2720 	iwx_check_rfkill(sc);
2721 
2722 	iwx_prepare_card_hw(sc);
2723 
2724 	iwx_ctxt_info_free_paging(sc);
2725 	iwx_dma_contig_free(&sc->pnvm_dma);
2726 }
2727 
2728 void
2729 iwx_nic_config(struct iwx_softc *sc)
2730 {
2731 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2732 	uint32_t mask, val, reg_val = 0;
2733 
2734 	radio_cfg_type = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_TYPE) >>
2735 	    IWX_FW_PHY_CFG_RADIO_TYPE_POS;
2736 	radio_cfg_step = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_STEP) >>
2737 	    IWX_FW_PHY_CFG_RADIO_STEP_POS;
2738 	radio_cfg_dash = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_DASH) >>
2739 	    IWX_FW_PHY_CFG_RADIO_DASH_POS;
2740 
2741 	reg_val |= IWX_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2742 	    IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2743 	reg_val |= IWX_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2744 	    IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2745 
2746 	/* radio configuration */
2747 	reg_val |= radio_cfg_type << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2748 	reg_val |= radio_cfg_step << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2749 	reg_val |= radio_cfg_dash << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2750 
2751 	mask = IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2752 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2753 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2754 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2755 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2756 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2757 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2758 
2759 	val = IWX_READ(sc, IWX_CSR_HW_IF_CONFIG_REG);
2760 	val &= ~mask;
2761 	val |= reg_val;
2762 	IWX_WRITE(sc, IWX_CSR_HW_IF_CONFIG_REG, val);
2763 }
2764 
2765 int
2766 iwx_nic_rx_init(struct iwx_softc *sc)
2767 {
2768 	IWX_WRITE_1(sc, IWX_CSR_INT_COALESCING, IWX_HOST_INT_TIMEOUT_DEF);
2769 
2770 	/*
2771 	 * We don't configure the RFH; the firmware will do that.
2772 	 * Rx descriptors are set when firmware sends an ALIVE interrupt.
2773 	 */
2774 	return 0;
2775 }
2776 
2777 int
2778 iwx_nic_init(struct iwx_softc *sc)
2779 {
2780 	int err;
2781 
2782 	iwx_apm_init(sc);
2783 	if (sc->sc_device_family < IWX_DEVICE_FAMILY_AX210)
2784 		iwx_nic_config(sc);
2785 
2786 	err = iwx_nic_rx_init(sc);
2787 	if (err)
2788 		return err;
2789 
2790 	IWX_SETBITS(sc, IWX_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2791 
2792 	return 0;
2793 }
2794 
2795 /* Map a TID to an ieee80211_edca_ac category. */
2796 const uint8_t iwx_tid_to_ac[IWX_MAX_TID_COUNT] = {
2797 	EDCA_AC_BE,
2798 	EDCA_AC_BK,
2799 	EDCA_AC_BK,
2800 	EDCA_AC_BE,
2801 	EDCA_AC_VI,
2802 	EDCA_AC_VI,
2803 	EDCA_AC_VO,
2804 	EDCA_AC_VO,
2805 };
2806 
2807 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2808 const uint8_t iwx_ac_to_tx_fifo[] = {
2809 	IWX_GEN2_EDCA_TX_FIFO_BE,
2810 	IWX_GEN2_EDCA_TX_FIFO_BK,
2811 	IWX_GEN2_EDCA_TX_FIFO_VI,
2812 	IWX_GEN2_EDCA_TX_FIFO_VO,
2813 };
2814 
2815 int
2816 iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid,
2817     int num_slots)
2818 {
2819 	struct iwx_rx_packet *pkt;
2820 	struct iwx_tx_queue_cfg_rsp *resp;
2821 	struct iwx_tx_queue_cfg_cmd cmd_v0;
2822 	struct iwx_scd_queue_cfg_cmd cmd_v3;
2823 	struct iwx_host_cmd hcmd = {
2824 		.flags = IWX_CMD_WANT_RESP,
2825 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2826 	};
2827 	struct iwx_tx_ring *ring = &sc->txq[qid];
2828 	int err, fwqid, cmd_ver;
2829 	uint32_t wr_idx;
2830 	size_t resp_len;
2831 
2832 	iwx_reset_tx_ring(sc, ring);
2833 
2834 	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
2835 	    IWX_SCD_QUEUE_CONFIG_CMD);
2836 	if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) {
2837 		memset(&cmd_v0, 0, sizeof(cmd_v0));
2838 		cmd_v0.sta_id = sta_id;
2839 		cmd_v0.tid = tid;
2840 		cmd_v0.flags = htole16(IWX_TX_QUEUE_CFG_ENABLE_QUEUE);
2841 		cmd_v0.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2842 		cmd_v0.byte_cnt_addr = htole64(ring->bc_tbl.paddr);
2843 		cmd_v0.tfdq_addr = htole64(ring->desc_dma.paddr);
2844 		hcmd.id = IWX_SCD_QUEUE_CFG;
2845 		hcmd.data[0] = &cmd_v0;
2846 		hcmd.len[0] = sizeof(cmd_v0);
2847 	} else if (cmd_ver == 3) {
2848 		memset(&cmd_v3, 0, sizeof(cmd_v3));
2849 		cmd_v3.operation = htole32(IWX_SCD_QUEUE_ADD);
2850 		cmd_v3.u.add.tfdq_dram_addr = htole64(ring->desc_dma.paddr);
2851 		cmd_v3.u.add.bc_dram_addr = htole64(ring->bc_tbl.paddr);
2852 		cmd_v3.u.add.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2853 		cmd_v3.u.add.flags = htole32(0);
2854 		cmd_v3.u.add.sta_mask = htole32(1 << sta_id);
2855 		cmd_v3.u.add.tid = tid;
2856 		hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
2857 		    IWX_SCD_QUEUE_CONFIG_CMD);
2858 		hcmd.data[0] = &cmd_v3;
2859 		hcmd.len[0] = sizeof(cmd_v3);
2860 	} else {
2861 		printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
2862 		    DEVNAME(sc), cmd_ver);
2863 		return ENOTSUP;
2864 	}
2865 
2866 	err = iwx_send_cmd(sc, &hcmd);
2867 	if (err)
2868 		return err;
2869 
2870 	pkt = hcmd.resp_pkt;
2871 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2872 		err = EIO;
2873 		goto out;
2874 	}
2875 
2876 	resp_len = iwx_rx_packet_payload_len(pkt);
2877 	if (resp_len != sizeof(*resp)) {
2878 		err = EIO;
2879 		goto out;
2880 	}
2881 
2882 	resp = (void *)pkt->data;
2883 	fwqid = le16toh(resp->queue_number);
2884 	wr_idx = le16toh(resp->write_pointer);
2885 
2886 	/* Unlike iwlwifi, we do not support dynamic queue ID assignment. */
2887 	if (fwqid != qid) {
2888 		err = EIO;
2889 		goto out;
2890 	}
2891 
2892 	if (wr_idx != ring->cur_hw) {
2893 		err = EIO;
2894 		goto out;
2895 	}
2896 
2897 	sc->qenablemsk |= (1 << qid);
2898 	ring->tid = tid;
2899 out:
2900 	iwx_free_resp(sc, &hcmd);
2901 	return err;
2902 }
2903 
2904 int
2905 iwx_disable_txq(struct iwx_softc *sc, int sta_id, int qid, uint8_t tid)
2906 {
2907 	struct iwx_rx_packet *pkt;
2908 	struct iwx_tx_queue_cfg_rsp *resp;
2909 	struct iwx_tx_queue_cfg_cmd cmd_v0;
2910 	struct iwx_scd_queue_cfg_cmd cmd_v3;
2911 	struct iwx_host_cmd hcmd = {
2912 		.flags = IWX_CMD_WANT_RESP,
2913 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2914 	};
2915 	struct iwx_tx_ring *ring = &sc->txq[qid];
2916 	int err, cmd_ver;
2917 
2918 	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
2919 	    IWX_SCD_QUEUE_CONFIG_CMD);
2920 	if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) {
2921 		memset(&cmd_v0, 0, sizeof(cmd_v0));
2922 		cmd_v0.sta_id = sta_id;
2923 		cmd_v0.tid = tid;
2924 		cmd_v0.flags = htole16(0); /* clear "queue enabled" flag */
2925 		cmd_v0.cb_size = htole32(0);
2926 		cmd_v0.byte_cnt_addr = htole64(0);
2927 		cmd_v0.tfdq_addr = htole64(0);
2928 		hcmd.id = IWX_SCD_QUEUE_CFG,
2929 		hcmd.data[0] = &cmd_v0;
2930 		hcmd.len[0] = sizeof(cmd_v0);
2931 	} else if (cmd_ver == 3) {
2932 		memset(&cmd_v3, 0, sizeof(cmd_v3));
2933 		cmd_v3.operation = htole32(IWX_SCD_QUEUE_REMOVE);
2934 		cmd_v3.u.remove.sta_mask = htole32(1 << sta_id);
2935 		cmd_v3.u.remove.tid = tid;
2936 		hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
2937 		    IWX_SCD_QUEUE_CONFIG_CMD);
2938 		hcmd.data[0] = &cmd_v3;
2939 		hcmd.len[0] = sizeof(cmd_v3);
2940 	} else {
2941 		printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
2942 		    DEVNAME(sc), cmd_ver);
2943 		return ENOTSUP;
2944 	}
2945 
2946 	err = iwx_send_cmd(sc, &hcmd);
2947 	if (err)
2948 		return err;
2949 
2950 	pkt = hcmd.resp_pkt;
2951 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2952 		err = EIO;
2953 		goto out;
2954 	}
2955 
2956 	sc->qenablemsk &= ~(1 << qid);
2957 	iwx_reset_tx_ring(sc, ring);
2958 out:
2959 	iwx_free_resp(sc, &hcmd);
2960 	return err;
2961 }
2962 
2963 void
2964 iwx_post_alive(struct iwx_softc *sc)
2965 {
2966 	int txcmd_ver;
2967 
2968 	iwx_ict_reset(sc);
2969 
2970 	txcmd_ver = iwx_lookup_notif_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD) ;
2971 	if (txcmd_ver != IWX_FW_CMD_VER_UNKNOWN && txcmd_ver > 6)
2972 		sc->sc_rate_n_flags_version = 2;
2973 	else
2974 		sc->sc_rate_n_flags_version = 1;
2975 
2976 	txcmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD);
2977 }
2978 
2979 int
2980 iwx_schedule_session_protection(struct iwx_softc *sc, struct iwx_node *in,
2981     uint32_t duration_tu)
2982 {
2983 	struct iwx_session_prot_cmd cmd = {
2984 		.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
2985 		    in->in_color)),
2986 		.action = htole32(IWX_FW_CTXT_ACTION_ADD),
2987 		.conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
2988 		.duration_tu = htole32(duration_tu),
2989 	};
2990 	uint32_t cmd_id;
2991 	int err;
2992 
2993 	cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
2994 	err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
2995 	if (!err)
2996 		sc->sc_flags |= IWX_FLAG_TE_ACTIVE;
2997 	return err;
2998 }
2999 
3000 void
3001 iwx_unprotect_session(struct iwx_softc *sc, struct iwx_node *in)
3002 {
3003 	struct iwx_session_prot_cmd cmd = {
3004 		.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
3005 		    in->in_color)),
3006 		.action = htole32(IWX_FW_CTXT_ACTION_REMOVE),
3007 		.conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
3008 		.duration_tu = 0,
3009 	};
3010 	uint32_t cmd_id;
3011 
3012 	/* Do nothing if the time event has already ended. */
3013 	if ((sc->sc_flags & IWX_FLAG_TE_ACTIVE) == 0)
3014 		return;
3015 
3016 	cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
3017 	if (iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd) == 0)
3018 		sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
3019 }
3020 
3021 /*
3022  * NVM read access and content parsing.  We do not support
3023  * external NVM or writing NVM.
3024  */
3025 
3026 uint8_t
3027 iwx_fw_valid_tx_ant(struct iwx_softc *sc)
3028 {
3029 	uint8_t tx_ant;
3030 
3031 	tx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_TX_CHAIN)
3032 	    >> IWX_FW_PHY_CFG_TX_CHAIN_POS);
3033 
3034 	if (sc->sc_nvm.valid_tx_ant)
3035 		tx_ant &= sc->sc_nvm.valid_tx_ant;
3036 
3037 	return tx_ant;
3038 }
3039 
3040 uint8_t
3041 iwx_fw_valid_rx_ant(struct iwx_softc *sc)
3042 {
3043 	uint8_t rx_ant;
3044 
3045 	rx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RX_CHAIN)
3046 	    >> IWX_FW_PHY_CFG_RX_CHAIN_POS);
3047 
3048 	if (sc->sc_nvm.valid_rx_ant)
3049 		rx_ant &= sc->sc_nvm.valid_rx_ant;
3050 
3051 	return rx_ant;
3052 }
3053 
3054 void
3055 iwx_init_channel_map(struct iwx_softc *sc, uint16_t *channel_profile_v3,
3056     uint32_t *channel_profile_v4, int nchan_profile)
3057 {
3058 	struct ieee80211com *ic = &sc->sc_ic;
3059 	struct iwx_nvm_data *data = &sc->sc_nvm;
3060 	int ch_idx;
3061 	struct ieee80211_channel *channel;
3062 	uint32_t ch_flags;
3063 	int is_5ghz;
3064 	int flags, hw_value;
3065 	int nchan;
3066 	const uint8_t *nvm_channels;
3067 
3068 	if (sc->sc_uhb_supported) {
3069 		nchan = nitems(iwx_nvm_channels_uhb);
3070 		nvm_channels = iwx_nvm_channels_uhb;
3071 	} else {
3072 		nchan = nitems(iwx_nvm_channels_8000);
3073 		nvm_channels = iwx_nvm_channels_8000;
3074 	}
3075 
3076 	for (ch_idx = 0; ch_idx < nchan && ch_idx < nchan_profile; ch_idx++) {
3077 		if (channel_profile_v4)
3078 			ch_flags = le32_to_cpup(channel_profile_v4 + ch_idx);
3079 		else
3080 			ch_flags = le16_to_cpup(channel_profile_v3 + ch_idx);
3081 
3082 		/* net80211 cannot handle 6 GHz channel numbers yet */
3083 		if (ch_idx >= IWX_NUM_2GHZ_CHANNELS + IWX_NUM_5GHZ_CHANNELS)
3084 			break;
3085 
3086 		is_5ghz = ch_idx >= IWX_NUM_2GHZ_CHANNELS;
3087 		if (is_5ghz && !data->sku_cap_band_52GHz_enable)
3088 			ch_flags &= ~IWX_NVM_CHANNEL_VALID;
3089 
3090 		hw_value = nvm_channels[ch_idx];
3091 		channel = &ic->ic_channels[hw_value];
3092 
3093 		if (!(ch_flags & IWX_NVM_CHANNEL_VALID)) {
3094 			channel->ic_freq = 0;
3095 			channel->ic_flags = 0;
3096 			continue;
3097 		}
3098 
3099 		if (!is_5ghz) {
3100 			flags = IEEE80211_CHAN_2GHZ;
3101 			channel->ic_flags
3102 			    = IEEE80211_CHAN_CCK
3103 			    | IEEE80211_CHAN_OFDM
3104 			    | IEEE80211_CHAN_DYN
3105 			    | IEEE80211_CHAN_2GHZ;
3106 		} else {
3107 			flags = IEEE80211_CHAN_5GHZ;
3108 			channel->ic_flags =
3109 			    IEEE80211_CHAN_A;
3110 		}
3111 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
3112 
3113 		if (!(ch_flags & IWX_NVM_CHANNEL_ACTIVE))
3114 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
3115 
3116 		if (data->sku_cap_11n_enable) {
3117 			channel->ic_flags |= IEEE80211_CHAN_HT;
3118 			if (ch_flags & IWX_NVM_CHANNEL_40MHZ)
3119 				channel->ic_flags |= IEEE80211_CHAN_40MHZ;
3120 		}
3121 
3122 		if (is_5ghz && data->sku_cap_11ac_enable) {
3123 			channel->ic_flags |= IEEE80211_CHAN_VHT;
3124 			if (ch_flags & IWX_NVM_CHANNEL_80MHZ)
3125 				channel->ic_xflags |= IEEE80211_CHANX_80MHZ;
3126 		}
3127 	}
3128 }
3129 
3130 int
3131 iwx_mimo_enabled(struct iwx_softc *sc)
3132 {
3133 	struct ieee80211com *ic = &sc->sc_ic;
3134 
3135 	return !sc->sc_nvm.sku_cap_mimo_disable &&
3136 	    (ic->ic_userflags & IEEE80211_F_NOMIMO) == 0;
3137 }
3138 
3139 void
3140 iwx_setup_ht_rates(struct iwx_softc *sc)
3141 {
3142 	struct ieee80211com *ic = &sc->sc_ic;
3143 	uint8_t rx_ant;
3144 
3145 	/* TX is supported with the same MCS as RX. */
3146 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
3147 
3148 	memset(ic->ic_sup_mcs, 0, sizeof(ic->ic_sup_mcs));
3149 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
3150 
3151 	if (!iwx_mimo_enabled(sc))
3152 		return;
3153 
3154 	rx_ant = iwx_fw_valid_rx_ant(sc);
3155 	if ((rx_ant & IWX_ANT_AB) == IWX_ANT_AB ||
3156 	    (rx_ant & IWX_ANT_BC) == IWX_ANT_BC)
3157 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
3158 }
3159 
3160 void
3161 iwx_setup_vht_rates(struct iwx_softc *sc)
3162 {
3163 	struct ieee80211com *ic = &sc->sc_ic;
3164 	uint8_t rx_ant = iwx_fw_valid_rx_ant(sc);
3165 	int n;
3166 
3167 	ic->ic_vht_rxmcs = (IEEE80211_VHT_MCS_0_9 <<
3168 	    IEEE80211_VHT_MCS_FOR_SS_SHIFT(1));
3169 
3170 	if (iwx_mimo_enabled(sc) &&
3171 	    ((rx_ant & IWX_ANT_AB) == IWX_ANT_AB ||
3172 	    (rx_ant & IWX_ANT_BC) == IWX_ANT_BC)) {
3173 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_0_9 <<
3174 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2));
3175 	} else {
3176 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_SS_NOT_SUPP <<
3177 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2));
3178 	}
3179 
3180 	for (n = 3; n <= IEEE80211_VHT_NUM_SS; n++) {
3181 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_SS_NOT_SUPP <<
3182 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(n));
3183 	}
3184 
3185 	ic->ic_vht_txmcs = ic->ic_vht_rxmcs;
3186 }
3187 
3188 void
3189 iwx_init_reorder_buffer(struct iwx_reorder_buffer *reorder_buf,
3190     uint16_t ssn, uint16_t buf_size)
3191 {
3192 	reorder_buf->head_sn = ssn;
3193 	reorder_buf->num_stored = 0;
3194 	reorder_buf->buf_size = buf_size;
3195 	reorder_buf->last_amsdu = 0;
3196 	reorder_buf->last_sub_index = 0;
3197 	reorder_buf->removed = 0;
3198 	reorder_buf->valid = 0;
3199 	reorder_buf->consec_oldsn_drops = 0;
3200 	reorder_buf->consec_oldsn_ampdu_gp2 = 0;
3201 	reorder_buf->consec_oldsn_prev_drop = 0;
3202 }
3203 
3204 void
3205 iwx_clear_reorder_buffer(struct iwx_softc *sc, struct iwx_rxba_data *rxba)
3206 {
3207 	int i;
3208 	struct iwx_reorder_buffer *reorder_buf = &rxba->reorder_buf;
3209 	struct iwx_reorder_buf_entry *entry;
3210 
3211 	for (i = 0; i < reorder_buf->buf_size; i++) {
3212 		entry = &rxba->entries[i];
3213 		ml_purge(&entry->frames);
3214 		timerclear(&entry->reorder_time);
3215 	}
3216 
3217 	reorder_buf->removed = 1;
3218 	timeout_del(&reorder_buf->reorder_timer);
3219 	timerclear(&rxba->last_rx);
3220 	timeout_del(&rxba->session_timer);
3221 	rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
3222 }
3223 
3224 #define RX_REORDER_BUF_TIMEOUT_MQ_USEC (100000ULL)
3225 
3226 void
3227 iwx_rx_ba_session_expired(void *arg)
3228 {
3229 	struct iwx_rxba_data *rxba = arg;
3230 	struct iwx_softc *sc = rxba->sc;
3231 	struct ieee80211com *ic = &sc->sc_ic;
3232 	struct ieee80211_node *ni = ic->ic_bss;
3233 	struct timeval now, timeout, expiry;
3234 	int s;
3235 
3236 	s = splnet();
3237 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0 &&
3238 	    ic->ic_state == IEEE80211_S_RUN &&
3239 	    rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
3240 		getmicrouptime(&now);
3241 		USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
3242 		timeradd(&rxba->last_rx, &timeout, &expiry);
3243 		if (timercmp(&now, &expiry, <)) {
3244 			timeout_add_usec(&rxba->session_timer, rxba->timeout);
3245 		} else {
3246 			ic->ic_stats.is_ht_rx_ba_timeout++;
3247 			ieee80211_delba_request(ic, ni,
3248 			    IEEE80211_REASON_TIMEOUT, 0, rxba->tid);
3249 		}
3250 	}
3251 	splx(s);
3252 }
3253 
3254 void
3255 iwx_rx_bar_frame_release(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
3256     struct mbuf_list *ml)
3257 {
3258 	struct ieee80211com *ic = &sc->sc_ic;
3259 	struct ieee80211_node *ni = ic->ic_bss;
3260 	struct iwx_bar_frame_release *release = (void *)pkt->data;
3261 	struct iwx_reorder_buffer *buf;
3262 	struct iwx_rxba_data *rxba;
3263 	unsigned int baid, nssn, sta_id, tid;
3264 
3265 	if (iwx_rx_packet_payload_len(pkt) < sizeof(*release))
3266 		return;
3267 
3268 	baid = (le32toh(release->ba_info) & IWX_BAR_FRAME_RELEASE_BAID_MASK) >>
3269 	    IWX_BAR_FRAME_RELEASE_BAID_SHIFT;
3270 	if (baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
3271 	    baid >= nitems(sc->sc_rxba_data))
3272 		return;
3273 
3274 	rxba = &sc->sc_rxba_data[baid];
3275 	if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID)
3276 		return;
3277 
3278 	tid = le32toh(release->sta_tid) & IWX_BAR_FRAME_RELEASE_TID_MASK;
3279 	sta_id = (le32toh(release->sta_tid) &
3280 	    IWX_BAR_FRAME_RELEASE_STA_MASK) >> IWX_BAR_FRAME_RELEASE_STA_SHIFT;
3281 	if (tid != rxba->tid || rxba->sta_id != IWX_STATION_ID)
3282 		return;
3283 
3284 	nssn = le32toh(release->ba_info) & IWX_BAR_FRAME_RELEASE_NSSN_MASK;
3285 	buf = &rxba->reorder_buf;
3286 	iwx_release_frames(sc, ni, rxba, buf, nssn, ml);
3287 }
3288 
3289 void
3290 iwx_reorder_timer_expired(void *arg)
3291 {
3292 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
3293 	struct iwx_reorder_buffer *buf = arg;
3294 	struct iwx_rxba_data *rxba = iwx_rxba_data_from_reorder_buf(buf);
3295 	struct iwx_reorder_buf_entry *entries = &rxba->entries[0];
3296 	struct iwx_softc *sc = rxba->sc;
3297 	struct ieee80211com *ic = &sc->sc_ic;
3298 	struct ieee80211_node *ni = ic->ic_bss;
3299 	int i, s;
3300 	uint16_t sn = 0, index = 0;
3301 	int expired = 0;
3302 	int cont = 0;
3303 	struct timeval now, timeout, expiry;
3304 
3305 	if (!buf->num_stored || buf->removed)
3306 		return;
3307 
3308 	s = splnet();
3309 	getmicrouptime(&now);
3310 	USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
3311 
3312 	for (i = 0; i < buf->buf_size ; i++) {
3313 		index = (buf->head_sn + i) % buf->buf_size;
3314 
3315 		if (ml_empty(&entries[index].frames)) {
3316 			/*
3317 			 * If there is a hole and the next frame didn't expire
3318 			 * we want to break and not advance SN.
3319 			 */
3320 			cont = 0;
3321 			continue;
3322 		}
3323 		timeradd(&entries[index].reorder_time, &timeout, &expiry);
3324 		if (!cont && timercmp(&now, &expiry, <))
3325 			break;
3326 
3327 		expired = 1;
3328 		/* continue until next hole after this expired frame */
3329 		cont = 1;
3330 		sn = (buf->head_sn + (i + 1)) & 0xfff;
3331 	}
3332 
3333 	if (expired) {
3334 		/* SN is set to the last expired frame + 1 */
3335 		iwx_release_frames(sc, ni, rxba, buf, sn, &ml);
3336 		if_input(&sc->sc_ic.ic_if, &ml);
3337 		ic->ic_stats.is_ht_rx_ba_window_gap_timeout++;
3338 	} else {
3339 		/*
3340 		 * If no frame expired and there are stored frames, index is now
3341 		 * pointing to the first unexpired frame - modify reorder timeout
3342 		 * accordingly.
3343 		 */
3344 		timeout_add_usec(&buf->reorder_timer,
3345 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
3346 	}
3347 
3348 	splx(s);
3349 }
3350 
3351 #define IWX_MAX_RX_BA_SESSIONS 16
3352 
3353 struct iwx_rxba_data *
3354 iwx_find_rxba_data(struct iwx_softc *sc, uint8_t tid)
3355 {
3356 	int i;
3357 
3358 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
3359 		if (sc->sc_rxba_data[i].baid ==
3360 		    IWX_RX_REORDER_DATA_INVALID_BAID)
3361 			continue;
3362 		if (sc->sc_rxba_data[i].tid == tid)
3363 			return &sc->sc_rxba_data[i];
3364 	}
3365 
3366 	return NULL;
3367 }
3368 
3369 int
3370 iwx_sta_rx_agg_baid_cfg_cmd(struct iwx_softc *sc, struct ieee80211_node *ni,
3371     uint8_t tid, uint16_t ssn, uint16_t winsize, int timeout_val, int start,
3372     uint8_t *baid)
3373 {
3374 	struct iwx_rx_baid_cfg_cmd cmd;
3375 	uint32_t new_baid = 0;
3376 	int err;
3377 
3378 	splassert(IPL_NET);
3379 
3380 	memset(&cmd, 0, sizeof(cmd));
3381 
3382 	if (start) {
3383 		cmd.action = IWX_RX_BAID_ACTION_ADD;
3384 		cmd.alloc.sta_id_mask = htole32(1 << IWX_STATION_ID);
3385 		cmd.alloc.tid = tid;
3386 		cmd.alloc.ssn = htole16(ssn);
3387 		cmd.alloc.win_size = htole16(winsize);
3388 	} else {
3389 		struct iwx_rxba_data *rxba;
3390 
3391 		rxba = iwx_find_rxba_data(sc, tid);
3392 		if (rxba == NULL)
3393 			return ENOENT;
3394 		*baid = rxba->baid;
3395 
3396 		cmd.action = IWX_RX_BAID_ACTION_REMOVE;
3397 		if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
3398 		    IWX_RX_BAID_ALLOCATION_CONFIG_CMD) == 1) {
3399 			cmd.remove_v1.baid = rxba->baid;
3400 		} else {
3401 			cmd.remove.sta_id_mask = htole32(1 << IWX_STATION_ID);
3402 			cmd.remove.tid = tid;
3403 		}
3404 	}
3405 
3406 	err = iwx_send_cmd_pdu_status(sc, IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
3407 	    IWX_RX_BAID_ALLOCATION_CONFIG_CMD), sizeof(cmd), &cmd, &new_baid);
3408 	if (err)
3409 		return err;
3410 
3411 	if (start) {
3412 		if (new_baid >= nitems(sc->sc_rxba_data))
3413 			return ERANGE;
3414 		*baid = new_baid;
3415 	}
3416 
3417 	return 0;
3418 }
3419 
3420 int
3421 iwx_sta_rx_agg_sta_cmd(struct iwx_softc *sc, struct ieee80211_node *ni,
3422     uint8_t tid, uint16_t ssn, uint16_t winsize, int timeout_val, int start,
3423     uint8_t *baid)
3424 {
3425 	struct iwx_add_sta_cmd cmd;
3426 	struct iwx_node *in = (void *)ni;
3427 	int err;
3428 	uint32_t status;
3429 
3430 	splassert(IPL_NET);
3431 
3432 	memset(&cmd, 0, sizeof(cmd));
3433 
3434 	cmd.sta_id = IWX_STATION_ID;
3435 	cmd.mac_id_n_color
3436 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3437 	cmd.add_modify = IWX_STA_MODE_MODIFY;
3438 
3439 	if (start) {
3440 		cmd.add_immediate_ba_tid = (uint8_t)tid;
3441 		cmd.add_immediate_ba_ssn = htole16(ssn);
3442 		cmd.rx_ba_window = htole16(winsize);
3443 	} else {
3444 		struct iwx_rxba_data *rxba;
3445 
3446 		rxba = iwx_find_rxba_data(sc, tid);
3447 		if (rxba == NULL)
3448 			return ENOENT;
3449 		*baid = rxba->baid;
3450 
3451 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
3452 	}
3453 	cmd.modify_mask = start ? IWX_STA_MODIFY_ADD_BA_TID :
3454 	    IWX_STA_MODIFY_REMOVE_BA_TID;
3455 
3456 	status = IWX_ADD_STA_SUCCESS;
3457 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(cmd), &cmd,
3458 	    &status);
3459 	if (err)
3460 		return err;
3461 
3462 	if ((status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
3463 		return EIO;
3464 
3465 	if (!(status & IWX_ADD_STA_BAID_VALID_MASK))
3466 		return EINVAL;
3467 
3468 	if (start) {
3469 		*baid = (status & IWX_ADD_STA_BAID_MASK) >>
3470 		    IWX_ADD_STA_BAID_SHIFT;
3471 		if (*baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
3472 		    *baid >= nitems(sc->sc_rxba_data))
3473 			return ERANGE;
3474 	}
3475 
3476 	return 0;
3477 }
3478 
3479 void
3480 iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3481     uint16_t ssn, uint16_t winsize, int timeout_val, int start)
3482 {
3483 	struct ieee80211com *ic = &sc->sc_ic;
3484 	int err, s;
3485 	struct iwx_rxba_data *rxba = NULL;
3486 	uint8_t baid = 0;
3487 
3488 	s = splnet();
3489 
3490 	if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS) {
3491 		ieee80211_addba_req_refuse(ic, ni, tid);
3492 		splx(s);
3493 		return;
3494 	}
3495 
3496 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_BAID_ML_SUPPORT)) {
3497 		err = iwx_sta_rx_agg_baid_cfg_cmd(sc, ni, tid, ssn, winsize,
3498 		    timeout_val, start, &baid);
3499 	} else {
3500 		err = iwx_sta_rx_agg_sta_cmd(sc, ni, tid, ssn, winsize,
3501 		    timeout_val, start, &baid);
3502 	}
3503 	if (err) {
3504 		ieee80211_addba_req_refuse(ic, ni, tid);
3505 		splx(s);
3506 		return;
3507 	}
3508 
3509 	rxba = &sc->sc_rxba_data[baid];
3510 
3511 	/* Deaggregation is done in hardware. */
3512 	if (start) {
3513 		if (rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
3514 			ieee80211_addba_req_refuse(ic, ni, tid);
3515 			splx(s);
3516 			return;
3517 		}
3518 		rxba->sta_id = IWX_STATION_ID;
3519 		rxba->tid = tid;
3520 		rxba->baid = baid;
3521 		rxba->timeout = timeout_val;
3522 		getmicrouptime(&rxba->last_rx);
3523 		iwx_init_reorder_buffer(&rxba->reorder_buf, ssn,
3524 		    winsize);
3525 		if (timeout_val != 0) {
3526 			struct ieee80211_rx_ba *ba;
3527 			timeout_add_usec(&rxba->session_timer,
3528 			    timeout_val);
3529 			/* XXX disable net80211's BA timeout handler */
3530 			ba = &ni->ni_rx_ba[tid];
3531 			ba->ba_timeout_val = 0;
3532 		}
3533 	} else
3534 		iwx_clear_reorder_buffer(sc, rxba);
3535 
3536 	if (start) {
3537 		sc->sc_rx_ba_sessions++;
3538 		ieee80211_addba_req_accept(ic, ni, tid);
3539 	} else if (sc->sc_rx_ba_sessions > 0)
3540 		sc->sc_rx_ba_sessions--;
3541 
3542 	splx(s);
3543 }
3544 
3545 void
3546 iwx_mac_ctxt_task(void *arg)
3547 {
3548 	struct iwx_softc *sc = arg;
3549 	struct ieee80211com *ic = &sc->sc_ic;
3550 	struct iwx_node *in = (void *)ic->ic_bss;
3551 	int err, s = splnet();
3552 
3553 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) ||
3554 	    ic->ic_state != IEEE80211_S_RUN) {
3555 		refcnt_rele_wake(&sc->task_refs);
3556 		splx(s);
3557 		return;
3558 	}
3559 
3560 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
3561 	if (err)
3562 		printf("%s: failed to update MAC\n", DEVNAME(sc));
3563 
3564 	iwx_unprotect_session(sc, in);
3565 
3566 	refcnt_rele_wake(&sc->task_refs);
3567 	splx(s);
3568 }
3569 
3570 void
3571 iwx_phy_ctxt_task(void *arg)
3572 {
3573 	struct iwx_softc *sc = arg;
3574 	struct ieee80211com *ic = &sc->sc_ic;
3575 	struct iwx_node *in = (void *)ic->ic_bss;
3576 	struct ieee80211_node *ni = &in->in_ni;
3577 	uint8_t chains, sco, vht_chan_width;
3578 	int err, s = splnet();
3579 
3580 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) ||
3581 	    ic->ic_state != IEEE80211_S_RUN ||
3582 	    in->in_phyctxt == NULL) {
3583 		refcnt_rele_wake(&sc->task_refs);
3584 		splx(s);
3585 		return;
3586 	}
3587 
3588 	chains = iwx_mimo_enabled(sc) ? 2 : 1;
3589 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
3590 	    IEEE80211_CHAN_40MHZ_ALLOWED(ni->ni_chan) &&
3591 	    ieee80211_node_supports_ht_chan40(ni))
3592 		sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
3593 	else
3594 		sco = IEEE80211_HTOP0_SCO_SCN;
3595 	if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
3596 	    IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
3597 	    ieee80211_node_supports_vht_chan80(ni))
3598 		vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
3599 	else
3600 		vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
3601 	if (in->in_phyctxt->sco != sco ||
3602 	    in->in_phyctxt->vht_chan_width != vht_chan_width) {
3603 		err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
3604 		    in->in_phyctxt->channel, chains, chains, 0, sco,
3605 		    vht_chan_width);
3606 		if (err)
3607 			printf("%s: failed to update PHY\n", DEVNAME(sc));
3608 	}
3609 
3610 	refcnt_rele_wake(&sc->task_refs);
3611 	splx(s);
3612 }
3613 
3614 void
3615 iwx_updatechan(struct ieee80211com *ic)
3616 {
3617 	struct iwx_softc *sc = ic->ic_softc;
3618 
3619 	if (ic->ic_state == IEEE80211_S_RUN &&
3620 	    !task_pending(&sc->newstate_task))
3621 		iwx_add_task(sc, systq, &sc->phy_ctxt_task);
3622 }
3623 
3624 void
3625 iwx_updateprot(struct ieee80211com *ic)
3626 {
3627 	struct iwx_softc *sc = ic->ic_softc;
3628 
3629 	if (ic->ic_state == IEEE80211_S_RUN &&
3630 	    !task_pending(&sc->newstate_task))
3631 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3632 }
3633 
3634 void
3635 iwx_updateslot(struct ieee80211com *ic)
3636 {
3637 	struct iwx_softc *sc = ic->ic_softc;
3638 
3639 	if (ic->ic_state == IEEE80211_S_RUN &&
3640 	    !task_pending(&sc->newstate_task))
3641 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3642 }
3643 
3644 void
3645 iwx_updateedca(struct ieee80211com *ic)
3646 {
3647 	struct iwx_softc *sc = ic->ic_softc;
3648 
3649 	if (ic->ic_state == IEEE80211_S_RUN &&
3650 	    !task_pending(&sc->newstate_task))
3651 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3652 }
3653 
3654 void
3655 iwx_updatedtim(struct ieee80211com *ic)
3656 {
3657 	struct iwx_softc *sc = ic->ic_softc;
3658 
3659 	if (ic->ic_state == IEEE80211_S_RUN &&
3660 	    !task_pending(&sc->newstate_task))
3661 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3662 }
3663 
3664 void
3665 iwx_sta_tx_agg_start(struct iwx_softc *sc, struct ieee80211_node *ni,
3666     uint8_t tid)
3667 {
3668 	struct ieee80211com *ic = &sc->sc_ic;
3669 	struct ieee80211_tx_ba *ba;
3670 	int err, qid;
3671 	struct iwx_tx_ring *ring;
3672 
3673 	/* Ensure we can map this TID to an aggregation queue. */
3674 	if (tid >= IWX_MAX_TID_COUNT)
3675 		return;
3676 
3677 	ba = &ni->ni_tx_ba[tid];
3678 	if (ba->ba_state != IEEE80211_BA_REQUESTED)
3679 		return;
3680 
3681 	qid = sc->aggqid[tid];
3682 	if (qid == 0) {
3683 		/* Firmware should pick the next unused Tx queue. */
3684 		qid = fls(sc->qenablemsk);
3685 	}
3686 
3687 	/*
3688 	 * Simply enable the queue.
3689 	 * Firmware handles Tx Ba session setup and teardown.
3690 	 */
3691 	if ((sc->qenablemsk & (1 << qid)) == 0) {
3692 		if (!iwx_nic_lock(sc)) {
3693 			ieee80211_addba_resp_refuse(ic, ni, tid,
3694 			    IEEE80211_STATUS_UNSPECIFIED);
3695 			return;
3696 		}
3697 		err = iwx_enable_txq(sc, IWX_STATION_ID, qid, tid,
3698 		    IWX_TX_RING_COUNT);
3699 		iwx_nic_unlock(sc);
3700 		if (err) {
3701 			printf("%s: could not enable Tx queue %d "
3702 			    "(error %d)\n", DEVNAME(sc), qid, err);
3703 			ieee80211_addba_resp_refuse(ic, ni, tid,
3704 			    IEEE80211_STATUS_UNSPECIFIED);
3705 			return;
3706 		}
3707 
3708 		ba->ba_winstart = 0;
3709 	} else
3710 		ba->ba_winstart = ni->ni_qos_txseqs[tid];
3711 
3712 	ba->ba_winend = (ba->ba_winstart + ba->ba_winsize - 1) & 0xfff;
3713 
3714 	ring = &sc->txq[qid];
3715 	ba->ba_timeout_val = 0;
3716 	ieee80211_addba_resp_accept(ic, ni, tid);
3717 	sc->aggqid[tid] = qid;
3718 }
3719 
3720 void
3721 iwx_ba_task(void *arg)
3722 {
3723 	struct iwx_softc *sc = arg;
3724 	struct ieee80211com *ic = &sc->sc_ic;
3725 	struct ieee80211_node *ni = ic->ic_bss;
3726 	int s = splnet();
3727 	int tid;
3728 
3729 	for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3730 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3731 			break;
3732 		if (sc->ba_rx.start_tidmask & (1 << tid)) {
3733 			struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
3734 			iwx_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3735 			    ba->ba_winsize, ba->ba_timeout_val, 1);
3736 			sc->ba_rx.start_tidmask &= ~(1 << tid);
3737 		} else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3738 			iwx_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3739 			sc->ba_rx.stop_tidmask &= ~(1 << tid);
3740 		}
3741 	}
3742 
3743 	for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3744 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3745 			break;
3746 		if (sc->ba_tx.start_tidmask & (1 << tid)) {
3747 			iwx_sta_tx_agg_start(sc, ni, tid);
3748 			sc->ba_tx.start_tidmask &= ~(1 << tid);
3749 		}
3750 	}
3751 
3752 	refcnt_rele_wake(&sc->task_refs);
3753 	splx(s);
3754 }
3755 
3756 /*
3757  * This function is called by upper layer when an ADDBA request is received
3758  * from another STA and before the ADDBA response is sent.
3759  */
3760 int
3761 iwx_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3762     uint8_t tid)
3763 {
3764 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
3765 
3766 	if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS ||
3767 	    tid >= IWX_MAX_TID_COUNT)
3768 		return ENOSPC;
3769 
3770 	if (sc->ba_rx.start_tidmask & (1 << tid))
3771 		return EBUSY;
3772 
3773 	sc->ba_rx.start_tidmask |= (1 << tid);
3774 	iwx_add_task(sc, systq, &sc->ba_task);
3775 
3776 	return EBUSY;
3777 }
3778 
3779 /*
3780  * This function is called by upper layer on teardown of an HT-immediate
3781  * Block Ack agreement (eg. upon receipt of a DELBA frame).
3782  */
3783 void
3784 iwx_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3785     uint8_t tid)
3786 {
3787 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
3788 
3789 	if (tid >= IWX_MAX_TID_COUNT || sc->ba_rx.stop_tidmask & (1 << tid))
3790 		return;
3791 
3792 	sc->ba_rx.stop_tidmask |= (1 << tid);
3793 	iwx_add_task(sc, systq, &sc->ba_task);
3794 }
3795 
3796 int
3797 iwx_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3798     uint8_t tid)
3799 {
3800 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
3801 	struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3802 
3803 	/*
3804 	 * Require a firmware version which uses an internal AUX queue.
3805 	 * The value of IWX_FIRST_AGG_TX_QUEUE would be incorrect otherwise.
3806 	 */
3807 	if (sc->first_data_qid != IWX_DQA_CMD_QUEUE + 1)
3808 		return ENOTSUP;
3809 
3810 	/* Ensure we can map this TID to an aggregation queue. */
3811 	if (tid >= IWX_MAX_TID_COUNT)
3812 		return EINVAL;
3813 
3814 	/* We only support a fixed Tx aggregation window size, for now. */
3815 	if (ba->ba_winsize != IWX_FRAME_LIMIT)
3816 		return ENOTSUP;
3817 
3818 	/* Is firmware already using an agg queue with this TID? */
3819 	if (sc->aggqid[tid] != 0)
3820 		return ENOSPC;
3821 
3822 	/* Are we already processing an ADDBA request? */
3823 	if (sc->ba_tx.start_tidmask & (1 << tid))
3824 		return EBUSY;
3825 
3826 	sc->ba_tx.start_tidmask |= (1 << tid);
3827 	iwx_add_task(sc, systq, &sc->ba_task);
3828 
3829 	return EBUSY;
3830 }
3831 
3832 void
3833 iwx_set_mac_addr_from_csr(struct iwx_softc *sc, struct iwx_nvm_data *data)
3834 {
3835 	uint32_t mac_addr0, mac_addr1;
3836 
3837 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
3838 
3839 	if (!iwx_nic_lock(sc))
3840 		return;
3841 
3842 	mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_STRAP(sc)));
3843 	mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_STRAP(sc)));
3844 
3845 	iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3846 
3847 	/* If OEM fused a valid address, use it instead of the one in OTP. */
3848 	if (iwx_is_valid_mac_addr(data->hw_addr)) {
3849 		iwx_nic_unlock(sc);
3850 		return;
3851 	}
3852 
3853 	mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_OTP(sc)));
3854 	mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_OTP(sc)));
3855 
3856 	iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3857 
3858 	iwx_nic_unlock(sc);
3859 }
3860 
3861 int
3862 iwx_is_valid_mac_addr(const uint8_t *addr)
3863 {
3864 	static const uint8_t reserved_mac[] = {
3865 		0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3866 	};
3867 
3868 	return (memcmp(reserved_mac, addr, ETHER_ADDR_LEN) != 0 &&
3869 	    memcmp(etherbroadcastaddr, addr, sizeof(etherbroadcastaddr)) != 0 &&
3870 	    memcmp(etheranyaddr, addr, sizeof(etheranyaddr)) != 0 &&
3871 	    !ETHER_IS_MULTICAST(addr));
3872 }
3873 
3874 void
3875 iwx_flip_hw_address(uint32_t mac_addr0, uint32_t mac_addr1, uint8_t *dest)
3876 {
3877 	const uint8_t *hw_addr;
3878 
3879 	hw_addr = (const uint8_t *)&mac_addr0;
3880 	dest[0] = hw_addr[3];
3881 	dest[1] = hw_addr[2];
3882 	dest[2] = hw_addr[1];
3883 	dest[3] = hw_addr[0];
3884 
3885 	hw_addr = (const uint8_t *)&mac_addr1;
3886 	dest[4] = hw_addr[1];
3887 	dest[5] = hw_addr[0];
3888 }
3889 
3890 int
3891 iwx_nvm_get(struct iwx_softc *sc)
3892 {
3893 	struct iwx_nvm_get_info cmd = {};
3894 	struct iwx_nvm_data *nvm = &sc->sc_nvm;
3895 	struct iwx_host_cmd hcmd = {
3896 		.flags = IWX_CMD_WANT_RESP | IWX_CMD_SEND_IN_RFKILL,
3897 		.data = { &cmd, },
3898 		.len = { sizeof(cmd) },
3899 		.id = IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
3900 		    IWX_NVM_GET_INFO)
3901 	};
3902 	int err;
3903 	uint32_t mac_flags;
3904 	/*
3905 	 * All the values in iwx_nvm_get_info_rsp v4 are the same as
3906 	 * in v3, except for the channel profile part of the
3907 	 * regulatory.  So we can just access the new struct, with the
3908 	 * exception of the latter.
3909 	 */
3910 	struct iwx_nvm_get_info_rsp *rsp;
3911 	struct iwx_nvm_get_info_rsp_v3 *rsp_v3;
3912 	int v4 = isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REGULATORY_NVM_INFO);
3913 	size_t resp_len = v4 ? sizeof(*rsp) : sizeof(*rsp_v3);
3914 
3915 	hcmd.resp_pkt_len = sizeof(struct iwx_rx_packet) + resp_len;
3916 	err = iwx_send_cmd(sc, &hcmd);
3917 	if (err)
3918 		return err;
3919 
3920 	if (iwx_rx_packet_payload_len(hcmd.resp_pkt) != resp_len) {
3921 		err = EIO;
3922 		goto out;
3923 	}
3924 
3925 	memset(nvm, 0, sizeof(*nvm));
3926 
3927 	iwx_set_mac_addr_from_csr(sc, nvm);
3928 	if (!iwx_is_valid_mac_addr(nvm->hw_addr)) {
3929 		printf("%s: no valid mac address was found\n", DEVNAME(sc));
3930 		err = EINVAL;
3931 		goto out;
3932 	}
3933 
3934 	rsp = (void *)hcmd.resp_pkt->data;
3935 
3936 	/* Initialize general data */
3937 	nvm->nvm_version = le16toh(rsp->general.nvm_version);
3938 	nvm->n_hw_addrs = rsp->general.n_hw_addrs;
3939 
3940 	/* Initialize MAC sku data */
3941 	mac_flags = le32toh(rsp->mac_sku.mac_sku_flags);
3942 	nvm->sku_cap_11ac_enable =
3943 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
3944 	nvm->sku_cap_11n_enable =
3945 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
3946 	nvm->sku_cap_11ax_enable =
3947 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
3948 	nvm->sku_cap_band_24GHz_enable =
3949 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
3950 	nvm->sku_cap_band_52GHz_enable =
3951 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
3952 	nvm->sku_cap_mimo_disable =
3953 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
3954 
3955 	/* Initialize PHY sku data */
3956 	nvm->valid_tx_ant = (uint8_t)le32toh(rsp->phy_sku.tx_chains);
3957 	nvm->valid_rx_ant = (uint8_t)le32toh(rsp->phy_sku.rx_chains);
3958 
3959 	if (le32toh(rsp->regulatory.lar_enabled) &&
3960 	    isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_SUPPORT)) {
3961 		nvm->lar_enabled = 1;
3962 	}
3963 
3964 	if (v4) {
3965 		iwx_init_channel_map(sc, NULL,
3966 		    rsp->regulatory.channel_profile, IWX_NUM_CHANNELS);
3967 	} else {
3968 		rsp_v3 = (void *)rsp;
3969 		iwx_init_channel_map(sc, rsp_v3->regulatory.channel_profile,
3970 		    NULL, IWX_NUM_CHANNELS_V1);
3971 	}
3972 out:
3973 	iwx_free_resp(sc, &hcmd);
3974 	return err;
3975 }
3976 
3977 int
3978 iwx_load_firmware(struct iwx_softc *sc)
3979 {
3980 	struct iwx_fw_sects *fws;
3981 	int err;
3982 
3983 	splassert(IPL_NET);
3984 
3985 	sc->sc_uc.uc_intr = 0;
3986 	sc->sc_uc.uc_ok = 0;
3987 
3988 	fws = &sc->sc_fw.fw_sects[IWX_UCODE_TYPE_REGULAR];
3989 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
3990 		err = iwx_ctxt_info_gen3_init(sc, fws);
3991 	else
3992 		err = iwx_ctxt_info_init(sc, fws);
3993 	if (err) {
3994 		printf("%s: could not init context info\n", DEVNAME(sc));
3995 		return err;
3996 	}
3997 
3998 	/* wait for the firmware to load */
3999 	err = tsleep_nsec(&sc->sc_uc, 0, "iwxuc", SEC_TO_NSEC(1));
4000 	if (err || !sc->sc_uc.uc_ok) {
4001 		printf("%s: could not load firmware, %d\n", DEVNAME(sc), err);
4002 		iwx_ctxt_info_free_paging(sc);
4003 	}
4004 
4005 	iwx_dma_contig_free(&sc->iml_dma);
4006 	iwx_ctxt_info_free_fw_img(sc);
4007 
4008 	if (!sc->sc_uc.uc_ok)
4009 		return EINVAL;
4010 
4011 	return err;
4012 }
4013 
4014 int
4015 iwx_start_fw(struct iwx_softc *sc)
4016 {
4017 	int err;
4018 
4019 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
4020 
4021 	iwx_disable_interrupts(sc);
4022 
4023 	/* make sure rfkill handshake bits are cleared */
4024 	IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, IWX_CSR_UCODE_SW_BIT_RFKILL);
4025 	IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR,
4026 	    IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4027 
4028 	/* clear (again), then enable firmware load interrupt */
4029 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
4030 
4031 	err = iwx_nic_init(sc);
4032 	if (err) {
4033 		printf("%s: unable to init nic\n", DEVNAME(sc));
4034 		return err;
4035 	}
4036 
4037 	iwx_enable_fwload_interrupt(sc);
4038 
4039 	return iwx_load_firmware(sc);
4040 }
4041 
4042 int
4043 iwx_pnvm_handle_section(struct iwx_softc *sc, const uint8_t *data,
4044     size_t len)
4045 {
4046 	const struct iwx_ucode_tlv *tlv;
4047 	uint32_t sha1 = 0;
4048 	uint16_t mac_type = 0, rf_id = 0;
4049 	uint8_t *pnvm_data = NULL, *tmp;
4050 	int hw_match = 0;
4051 	uint32_t size = 0;
4052 	int err;
4053 
4054 	while (len >= sizeof(*tlv)) {
4055 		uint32_t tlv_len, tlv_type;
4056 
4057 		len -= sizeof(*tlv);
4058 		tlv = (const void *)data;
4059 
4060 		tlv_len = le32toh(tlv->length);
4061 		tlv_type = le32toh(tlv->type);
4062 
4063 		if (len < tlv_len) {
4064 			printf("%s: invalid TLV len: %zd/%u\n",
4065 			    DEVNAME(sc), len, tlv_len);
4066 			err = EINVAL;
4067 			goto out;
4068 		}
4069 
4070 		data += sizeof(*tlv);
4071 
4072 		switch (tlv_type) {
4073 		case IWX_UCODE_TLV_PNVM_VERSION:
4074 			if (tlv_len < sizeof(uint32_t))
4075 				break;
4076 
4077 			sha1 = le32_to_cpup((const uint32_t *)data);
4078 			break;
4079 		case IWX_UCODE_TLV_HW_TYPE:
4080 			if (tlv_len < 2 * sizeof(uint16_t))
4081 				break;
4082 
4083 			if (hw_match)
4084 				break;
4085 
4086 			mac_type = le16_to_cpup((const uint16_t *)data);
4087 			rf_id = le16_to_cpup((const uint16_t *)(data +
4088 			    sizeof(uint16_t)));
4089 
4090 			if (mac_type == IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev) &&
4091 			    rf_id == IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id))
4092 				hw_match = 1;
4093 			break;
4094 		case IWX_UCODE_TLV_SEC_RT: {
4095 			const struct iwx_pnvm_section *section;
4096 			uint32_t data_len;
4097 
4098 			section = (const void *)data;
4099 			data_len = tlv_len - sizeof(*section);
4100 
4101 			/* TODO: remove, this is a deprecated separator */
4102 			if (le32_to_cpup((const uint32_t *)data) == 0xddddeeee)
4103 				break;
4104 
4105 			tmp = malloc(size + data_len, M_DEVBUF,
4106 			    M_WAITOK | M_CANFAIL | M_ZERO);
4107 			if (tmp == NULL) {
4108 				err = ENOMEM;
4109 				goto out;
4110 			}
4111 			memcpy(tmp, pnvm_data, size);
4112 			memcpy(tmp + size, section->data, data_len);
4113 			free(pnvm_data, M_DEVBUF, size);
4114 			pnvm_data = tmp;
4115 			size += data_len;
4116 			break;
4117 		}
4118 		case IWX_UCODE_TLV_PNVM_SKU:
4119 			/* New PNVM section started, stop parsing. */
4120 			goto done;
4121 		default:
4122 			break;
4123 		}
4124 
4125 		if (roundup(tlv_len, 4) > len)
4126 			break;
4127 		len -= roundup(tlv_len, 4);
4128 		data += roundup(tlv_len, 4);
4129 	}
4130 done:
4131 	if (!hw_match || size == 0) {
4132 		err = ENOENT;
4133 		goto out;
4134 	}
4135 
4136 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->pnvm_dma, size, 0);
4137 	if (err) {
4138 		printf("%s: could not allocate DMA memory for PNVM\n",
4139 		    DEVNAME(sc));
4140 		err = ENOMEM;
4141 		goto out;
4142 	}
4143 	memcpy(sc->pnvm_dma.vaddr, pnvm_data, size);
4144 	iwx_ctxt_info_gen3_set_pnvm(sc);
4145 	sc->sc_pnvm_ver = sha1;
4146 out:
4147 	free(pnvm_data, M_DEVBUF, size);
4148 	return err;
4149 }
4150 
4151 int
4152 iwx_pnvm_parse(struct iwx_softc *sc, const uint8_t *data, size_t len)
4153 {
4154 	const struct iwx_ucode_tlv *tlv;
4155 
4156 	while (len >= sizeof(*tlv)) {
4157 		uint32_t tlv_len, tlv_type;
4158 
4159 		len -= sizeof(*tlv);
4160 		tlv = (const void *)data;
4161 
4162 		tlv_len = le32toh(tlv->length);
4163 		tlv_type = le32toh(tlv->type);
4164 
4165 		if (len < tlv_len || roundup(tlv_len, 4) > len)
4166 			return EINVAL;
4167 
4168 		if (tlv_type == IWX_UCODE_TLV_PNVM_SKU) {
4169 			const struct iwx_sku_id *sku_id =
4170 				(const void *)(data + sizeof(*tlv));
4171 
4172 			data += sizeof(*tlv) + roundup(tlv_len, 4);
4173 			len -= roundup(tlv_len, 4);
4174 
4175 			if (sc->sc_sku_id[0] == le32toh(sku_id->data[0]) &&
4176 			    sc->sc_sku_id[1] == le32toh(sku_id->data[1]) &&
4177 			    sc->sc_sku_id[2] == le32toh(sku_id->data[2]) &&
4178 			    iwx_pnvm_handle_section(sc, data, len) == 0)
4179 				return 0;
4180 		} else {
4181 			data += sizeof(*tlv) + roundup(tlv_len, 4);
4182 			len -= roundup(tlv_len, 4);
4183 		}
4184 	}
4185 
4186 	return ENOENT;
4187 }
4188 
4189 /* Make AX210 firmware loading context point at PNVM image in DMA memory. */
4190 void
4191 iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *sc)
4192 {
4193 	struct iwx_prph_scratch *prph_scratch;
4194 	struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
4195 
4196 	prph_scratch = sc->prph_scratch_dma.vaddr;
4197 	prph_sc_ctrl = &prph_scratch->ctrl_cfg;
4198 
4199 	prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = htole64(sc->pnvm_dma.paddr);
4200 	prph_sc_ctrl->pnvm_cfg.pnvm_size = htole32(sc->pnvm_dma.size);
4201 
4202 	bus_dmamap_sync(sc->sc_dmat, sc->pnvm_dma.map, 0, sc->pnvm_dma.size,
4203 	    BUS_DMASYNC_PREWRITE);
4204 }
4205 
4206 /*
4207  * Load platform-NVM (non-volatile-memory) data from the filesystem.
4208  * This data apparently contains regulatory information and affects device
4209  * channel configuration.
4210  * The SKU of AX210 devices tells us which PNVM file section is needed.
4211  * Pre-AX210 devices store NVM data onboard.
4212  */
4213 int
4214 iwx_load_pnvm(struct iwx_softc *sc)
4215 {
4216 	const int wait_flags = IWX_PNVM_COMPLETE;
4217 	int s, err = 0;
4218 	u_char *pnvm_data = NULL;
4219 	size_t pnvm_size = 0;
4220 
4221 	if (sc->sc_sku_id[0] == 0 &&
4222 	    sc->sc_sku_id[1] == 0 &&
4223 	    sc->sc_sku_id[2] == 0)
4224 		return 0;
4225 
4226 	if (sc->sc_pnvm_name) {
4227 		if (sc->pnvm_dma.vaddr == NULL) {
4228 			err = loadfirmware(sc->sc_pnvm_name,
4229 			    &pnvm_data, &pnvm_size);
4230 			if (err) {
4231 				printf("%s: could not read %s (error %d)\n",
4232 				    DEVNAME(sc), sc->sc_pnvm_name, err);
4233 				return err;
4234 			}
4235 
4236 			err = iwx_pnvm_parse(sc, pnvm_data, pnvm_size);
4237 			if (err && err != ENOENT) {
4238 				free(pnvm_data, M_DEVBUF, pnvm_size);
4239 				return err;
4240 			}
4241 		} else
4242 			iwx_ctxt_info_gen3_set_pnvm(sc);
4243 	}
4244 
4245 	s = splnet();
4246 
4247 	if (!iwx_nic_lock(sc)) {
4248 		splx(s);
4249 		free(pnvm_data, M_DEVBUF, pnvm_size);
4250 		return EBUSY;
4251 	}
4252 
4253 	/*
4254 	 * If we don't have a platform NVM file simply ask firmware
4255 	 * to proceed without it.
4256 	 */
4257 
4258 	iwx_write_umac_prph(sc, IWX_UREG_DOORBELL_TO_ISR6,
4259 	    IWX_UREG_DOORBELL_TO_ISR6_PNVM);
4260 
4261 	/* Wait for the pnvm complete notification from firmware. */
4262 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4263 		err = tsleep_nsec(&sc->sc_init_complete, 0, "iwxinit",
4264 		    SEC_TO_NSEC(2));
4265 		if (err)
4266 			break;
4267 	}
4268 
4269 	splx(s);
4270 	iwx_nic_unlock(sc);
4271 	free(pnvm_data, M_DEVBUF, pnvm_size);
4272 	return err;
4273 }
4274 
4275 int
4276 iwx_send_tx_ant_cfg(struct iwx_softc *sc, uint8_t valid_tx_ant)
4277 {
4278 	struct iwx_tx_ant_cfg_cmd tx_ant_cmd = {
4279 		.valid = htole32(valid_tx_ant),
4280 	};
4281 
4282 	return iwx_send_cmd_pdu(sc, IWX_TX_ANT_CONFIGURATION_CMD,
4283 	    0, sizeof(tx_ant_cmd), &tx_ant_cmd);
4284 }
4285 
4286 int
4287 iwx_send_phy_cfg_cmd(struct iwx_softc *sc)
4288 {
4289 	struct iwx_phy_cfg_cmd phy_cfg_cmd;
4290 
4291 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
4292 	phy_cfg_cmd.calib_control.event_trigger =
4293 	    sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].event_trigger;
4294 	phy_cfg_cmd.calib_control.flow_trigger =
4295 	    sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].flow_trigger;
4296 
4297 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONFIGURATION_CMD, 0,
4298 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
4299 }
4300 
4301 int
4302 iwx_send_dqa_cmd(struct iwx_softc *sc)
4303 {
4304 	struct iwx_dqa_enable_cmd dqa_cmd = {
4305 		.cmd_queue = htole32(IWX_DQA_CMD_QUEUE),
4306 	};
4307 	uint32_t cmd_id;
4308 
4309 	cmd_id = iwx_cmd_id(IWX_DQA_ENABLE_CMD, IWX_DATA_PATH_GROUP, 0);
4310 	return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
4311 }
4312 
4313 int
4314 iwx_load_ucode_wait_alive(struct iwx_softc *sc)
4315 {
4316 	int err;
4317 
4318 	err = iwx_read_firmware(sc);
4319 	if (err)
4320 		return err;
4321 
4322 	err = iwx_start_fw(sc);
4323 	if (err)
4324 		return err;
4325 
4326 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4327 		err = iwx_load_pnvm(sc);
4328 		if (err)
4329 			return err;
4330 	}
4331 
4332 	iwx_post_alive(sc);
4333 
4334 	return 0;
4335 }
4336 
4337 int
4338 iwx_run_init_mvm_ucode(struct iwx_softc *sc, int readnvm)
4339 {
4340 	const int wait_flags = IWX_INIT_COMPLETE;
4341 	struct iwx_nvm_access_complete_cmd nvm_complete = {};
4342 	struct iwx_init_extended_cfg_cmd init_cfg = {
4343 		.init_flags = htole32(IWX_INIT_NVM),
4344 	};
4345 	int err, s;
4346 
4347 	if ((sc->sc_flags & IWX_FLAG_RFKILL) && !readnvm) {
4348 		printf("%s: radio is disabled by hardware switch\n",
4349 		    DEVNAME(sc));
4350 		return EPERM;
4351 	}
4352 
4353 	s = splnet();
4354 	sc->sc_init_complete = 0;
4355 	err = iwx_load_ucode_wait_alive(sc);
4356 	if (err) {
4357 		printf("%s: failed to load init firmware\n", DEVNAME(sc));
4358 		splx(s);
4359 		return err;
4360 	}
4361 
4362 	/*
4363 	 * Send init config command to mark that we are sending NVM
4364 	 * access commands
4365 	 */
4366 	err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_SYSTEM_GROUP,
4367 	    IWX_INIT_EXTENDED_CFG_CMD), 0, sizeof(init_cfg), &init_cfg);
4368 	if (err) {
4369 		splx(s);
4370 		return err;
4371 	}
4372 
4373 	err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
4374 	    IWX_NVM_ACCESS_COMPLETE), 0, sizeof(nvm_complete), &nvm_complete);
4375 	if (err) {
4376 		splx(s);
4377 		return err;
4378 	}
4379 
4380 	/* Wait for the init complete notification from the firmware. */
4381 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4382 		err = tsleep_nsec(&sc->sc_init_complete, 0, "iwxinit",
4383 		    SEC_TO_NSEC(2));
4384 		if (err) {
4385 			splx(s);
4386 			return err;
4387 		}
4388 	}
4389 	splx(s);
4390 	if (readnvm) {
4391 		err = iwx_nvm_get(sc);
4392 		if (err) {
4393 			printf("%s: failed to read nvm\n", DEVNAME(sc));
4394 			return err;
4395 		}
4396 		if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
4397 			IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
4398 			    sc->sc_nvm.hw_addr);
4399 
4400 	}
4401 	return 0;
4402 }
4403 
4404 int
4405 iwx_config_ltr(struct iwx_softc *sc)
4406 {
4407 	struct iwx_ltr_config_cmd cmd = {
4408 		.flags = htole32(IWX_LTR_CFG_FLAG_FEATURE_ENABLE),
4409 	};
4410 
4411 	if (!sc->sc_ltr_enabled)
4412 		return 0;
4413 
4414 	return iwx_send_cmd_pdu(sc, IWX_LTR_CONFIG, 0, sizeof(cmd), &cmd);
4415 }
4416 
4417 void
4418 iwx_update_rx_desc(struct iwx_softc *sc, struct iwx_rx_ring *ring, int idx)
4419 {
4420 	struct iwx_rx_data *data = &ring->data[idx];
4421 
4422 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4423 		struct iwx_rx_transfer_desc *desc = ring->desc;
4424 		desc[idx].rbid = htole16(idx & 0xffff);
4425 		desc[idx].addr = htole64(data->map->dm_segs[0].ds_addr);
4426 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
4427 		    idx * sizeof(*desc), sizeof(*desc),
4428 		    BUS_DMASYNC_PREWRITE);
4429 	} else {
4430 		((uint64_t *)ring->desc)[idx] =
4431 		    htole64(data->map->dm_segs[0].ds_addr | (idx & 0x0fff));
4432 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
4433 		    idx * sizeof(uint64_t), sizeof(uint64_t),
4434 		    BUS_DMASYNC_PREWRITE);
4435 	}
4436 }
4437 
4438 int
4439 iwx_rx_addbuf(struct iwx_softc *sc, int size, int idx)
4440 {
4441 	struct iwx_rx_ring *ring = &sc->rxq;
4442 	struct iwx_rx_data *data = &ring->data[idx];
4443 	struct mbuf *m;
4444 	int err;
4445 	int fatal = 0;
4446 
4447 	m = m_gethdr(M_DONTWAIT, MT_DATA);
4448 	if (m == NULL)
4449 		return ENOBUFS;
4450 
4451 	if (size <= MCLBYTES) {
4452 		MCLGET(m, M_DONTWAIT);
4453 	} else {
4454 		MCLGETL(m, M_DONTWAIT, IWX_RBUF_SIZE);
4455 	}
4456 	if ((m->m_flags & M_EXT) == 0) {
4457 		m_freem(m);
4458 		return ENOBUFS;
4459 	}
4460 
4461 	if (data->m != NULL) {
4462 		bus_dmamap_unload(sc->sc_dmat, data->map);
4463 		fatal = 1;
4464 	}
4465 
4466 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4467 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4468 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
4469 	if (err) {
4470 		/* XXX */
4471 		if (fatal)
4472 			panic("%s: could not load RX mbuf", DEVNAME(sc));
4473 		m_freem(m);
4474 		return err;
4475 	}
4476 	data->m = m;
4477 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
4478 
4479 	/* Update RX descriptor. */
4480 	iwx_update_rx_desc(sc, ring, idx);
4481 
4482 	return 0;
4483 }
4484 
4485 int
4486 iwx_rxmq_get_signal_strength(struct iwx_softc *sc,
4487     struct iwx_rx_mpdu_desc *desc)
4488 {
4489 	int energy_a, energy_b;
4490 
4491 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4492 		energy_a = desc->v3.energy_a;
4493 		energy_b = desc->v3.energy_b;
4494 	} else {
4495 		energy_a = desc->v1.energy_a;
4496 		energy_b = desc->v1.energy_b;
4497 	}
4498 	energy_a = energy_a ? -energy_a : -256;
4499 	energy_b = energy_b ? -energy_b : -256;
4500 	return MAX(energy_a, energy_b);
4501 }
4502 
4503 void
4504 iwx_rx_rx_phy_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4505     struct iwx_rx_data *data)
4506 {
4507 	struct iwx_rx_phy_info *phy_info = (void *)pkt->data;
4508 
4509 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
4510 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
4511 
4512 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
4513 }
4514 
4515 /*
4516  * Retrieve the average noise (in dBm) among receivers.
4517  */
4518 int
4519 iwx_get_noise(const struct iwx_statistics_rx_non_phy *stats)
4520 {
4521 	int i, total, nbant, noise;
4522 
4523 	total = nbant = noise = 0;
4524 	for (i = 0; i < 3; i++) {
4525 		noise = letoh32(stats->beacon_silence_rssi[i]) & 0xff;
4526 		if (noise) {
4527 			total += noise;
4528 			nbant++;
4529 		}
4530 	}
4531 
4532 	/* There should be at least one antenna but check anyway. */
4533 	return (nbant == 0) ? -127 : (total / nbant) - 107;
4534 }
4535 
4536 int
4537 iwx_ccmp_decap(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
4538     struct ieee80211_rxinfo *rxi)
4539 {
4540 	struct ieee80211com *ic = &sc->sc_ic;
4541 	struct ieee80211_key *k;
4542 	struct ieee80211_frame *wh;
4543 	uint64_t pn, *prsc;
4544 	uint8_t *ivp;
4545 	uint8_t tid;
4546 	int hdrlen, hasqos;
4547 
4548 	wh = mtod(m, struct ieee80211_frame *);
4549 	hdrlen = ieee80211_get_hdrlen(wh);
4550 	ivp = (uint8_t *)wh + hdrlen;
4551 
4552 	/* find key for decryption */
4553 	k = ieee80211_get_rxkey(ic, m, ni);
4554 	if (k == NULL || k->k_cipher != IEEE80211_CIPHER_CCMP)
4555 		return 1;
4556 
4557 	/* Check that ExtIV bit is be set. */
4558 	if (!(ivp[3] & IEEE80211_WEP_EXTIV))
4559 		return 1;
4560 
4561 	hasqos = ieee80211_has_qos(wh);
4562 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4563 	prsc = &k->k_rsc[tid];
4564 
4565 	/* Extract the 48-bit PN from the CCMP header. */
4566 	pn = (uint64_t)ivp[0]       |
4567 	     (uint64_t)ivp[1] <<  8 |
4568 	     (uint64_t)ivp[4] << 16 |
4569 	     (uint64_t)ivp[5] << 24 |
4570 	     (uint64_t)ivp[6] << 32 |
4571 	     (uint64_t)ivp[7] << 40;
4572 	if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
4573 		if (pn < *prsc) {
4574 			ic->ic_stats.is_ccmp_replays++;
4575 			return 1;
4576 		}
4577 	} else if (pn <= *prsc) {
4578 		ic->ic_stats.is_ccmp_replays++;
4579 		return 1;
4580 	}
4581 	/* Last seen packet number is updated in ieee80211_inputm(). */
4582 
4583 	/*
4584 	 * Some firmware versions strip the MIC, and some don't. It is not
4585 	 * clear which of the capability flags could tell us what to expect.
4586 	 * For now, keep things simple and just leave the MIC in place if
4587 	 * it is present.
4588 	 *
4589 	 * The IV will be stripped by ieee80211_inputm().
4590 	 */
4591 	return 0;
4592 }
4593 
4594 int
4595 iwx_rx_hwdecrypt(struct iwx_softc *sc, struct mbuf *m, uint32_t rx_pkt_status,
4596     struct ieee80211_rxinfo *rxi)
4597 {
4598 	struct ieee80211com *ic = &sc->sc_ic;
4599 	struct ifnet *ifp = IC2IFP(ic);
4600 	struct ieee80211_frame *wh;
4601 	struct ieee80211_node *ni;
4602 	int ret = 0;
4603 	uint8_t type, subtype;
4604 
4605 	wh = mtod(m, struct ieee80211_frame *);
4606 
4607 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4608 	if (type == IEEE80211_FC0_TYPE_CTL)
4609 		return 0;
4610 
4611 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4612 	if (ieee80211_has_qos(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA))
4613 		return 0;
4614 
4615 	ni = ieee80211_find_rxnode(ic, wh);
4616 	/* Handle hardware decryption. */
4617 	if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL)
4618 	    && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) &&
4619 	    (ni->ni_flags & IEEE80211_NODE_RXPROT) &&
4620 	    ((!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4621 	    ni->ni_rsncipher == IEEE80211_CIPHER_CCMP) ||
4622 	    (IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4623 	    ni->ni_rsngroupcipher == IEEE80211_CIPHER_CCMP))) {
4624 		if ((rx_pkt_status & IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
4625 		    IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4626 			ic->ic_stats.is_ccmp_dec_errs++;
4627 			ret = 1;
4628 			goto out;
4629 		}
4630 		/* Check whether decryption was successful or not. */
4631 		if ((rx_pkt_status &
4632 		    (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
4633 		    IWX_RX_MPDU_RES_STATUS_MIC_OK)) !=
4634 		    (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
4635 		    IWX_RX_MPDU_RES_STATUS_MIC_OK)) {
4636 			ic->ic_stats.is_ccmp_dec_errs++;
4637 			ret = 1;
4638 			goto out;
4639 		}
4640 		rxi->rxi_flags |= IEEE80211_RXI_HWDEC;
4641 	}
4642 out:
4643 	if (ret)
4644 		ifp->if_ierrors++;
4645 	ieee80211_release_node(ic, ni);
4646 	return ret;
4647 }
4648 
4649 void
4650 iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx,
4651     uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
4652     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
4653     struct mbuf_list *ml)
4654 {
4655 	struct ieee80211com *ic = &sc->sc_ic;
4656 	struct ifnet *ifp = IC2IFP(ic);
4657 	struct ieee80211_frame *wh;
4658 	struct ieee80211_node *ni;
4659 
4660 	if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
4661 		chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
4662 
4663 	wh = mtod(m, struct ieee80211_frame *);
4664 	ni = ieee80211_find_rxnode(ic, wh);
4665 	if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
4666 	    iwx_ccmp_decap(sc, m, ni, rxi) != 0) {
4667 		ifp->if_ierrors++;
4668 		m_freem(m);
4669 		ieee80211_release_node(ic, ni);
4670 		return;
4671 	}
4672 
4673 #if NBPFILTER > 0
4674 	if (sc->sc_drvbpf != NULL) {
4675 		struct iwx_rx_radiotap_header *tap = &sc->sc_rxtap;
4676 		uint16_t chan_flags;
4677 		int have_legacy_rate = 1;
4678 		uint8_t mcs, rate;
4679 
4680 		tap->wr_flags = 0;
4681 		if (is_shortpre)
4682 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
4683 		tap->wr_chan_freq =
4684 		    htole16(ic->ic_channels[chanidx].ic_freq);
4685 		chan_flags = ic->ic_channels[chanidx].ic_flags;
4686 		if (ic->ic_curmode != IEEE80211_MODE_11N &&
4687 		    ic->ic_curmode != IEEE80211_MODE_11AC) {
4688 			chan_flags &= ~IEEE80211_CHAN_HT;
4689 			chan_flags &= ~IEEE80211_CHAN_40MHZ;
4690 		}
4691 		if (ic->ic_curmode != IEEE80211_MODE_11AC)
4692 			chan_flags &= ~IEEE80211_CHAN_VHT;
4693 		tap->wr_chan_flags = htole16(chan_flags);
4694 		tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
4695 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4696 		tap->wr_tsft = device_timestamp;
4697 		if (sc->sc_rate_n_flags_version >= 2) {
4698 			uint32_t mod_type = (rate_n_flags &
4699 			    IWX_RATE_MCS_MOD_TYPE_MSK);
4700 			const struct ieee80211_rateset *rs = NULL;
4701 			uint32_t ridx;
4702 			have_legacy_rate = (mod_type == IWX_RATE_MCS_CCK_MSK ||
4703 			    mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK);
4704 			mcs = (rate_n_flags & IWX_RATE_HT_MCS_CODE_MSK);
4705 			ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
4706 			if (mod_type == IWX_RATE_MCS_CCK_MSK)
4707 				rs = &ieee80211_std_rateset_11b;
4708 			else if (mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK)
4709 				rs = &ieee80211_std_rateset_11a;
4710 			if (rs && ridx < rs->rs_nrates) {
4711 				rate = (rs->rs_rates[ridx] &
4712 				    IEEE80211_RATE_VAL);
4713 			} else
4714 				rate = 0;
4715 		} else {
4716 			have_legacy_rate = ((rate_n_flags &
4717 			    (IWX_RATE_MCS_HT_MSK_V1 |
4718 			    IWX_RATE_MCS_VHT_MSK_V1)) == 0);
4719 			mcs = (rate_n_flags &
4720 			    (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 |
4721 			    IWX_RATE_HT_MCS_NSS_MSK_V1));
4722 			rate = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1);
4723 		}
4724 		if (!have_legacy_rate) {
4725 			tap->wr_rate = (0x80 | mcs);
4726 		} else {
4727 			switch (rate) {
4728 			/* CCK rates. */
4729 			case  10: tap->wr_rate =   2; break;
4730 			case  20: tap->wr_rate =   4; break;
4731 			case  55: tap->wr_rate =  11; break;
4732 			case 110: tap->wr_rate =  22; break;
4733 			/* OFDM rates. */
4734 			case 0xd: tap->wr_rate =  12; break;
4735 			case 0xf: tap->wr_rate =  18; break;
4736 			case 0x5: tap->wr_rate =  24; break;
4737 			case 0x7: tap->wr_rate =  36; break;
4738 			case 0x9: tap->wr_rate =  48; break;
4739 			case 0xb: tap->wr_rate =  72; break;
4740 			case 0x1: tap->wr_rate =  96; break;
4741 			case 0x3: tap->wr_rate = 108; break;
4742 			/* Unknown rate: should not happen. */
4743 			default:  tap->wr_rate =   0;
4744 			}
4745 		}
4746 
4747 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
4748 		    m, BPF_DIRECTION_IN);
4749 	}
4750 #endif
4751 	ieee80211_inputm(IC2IFP(ic), m, ni, rxi, ml);
4752 	ieee80211_release_node(ic, ni);
4753 }
4754 
4755 /*
4756  * Drop duplicate 802.11 retransmissions
4757  * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
4758  * and handle pseudo-duplicate frames which result from deaggregation
4759  * of A-MSDU frames in hardware.
4760  */
4761 int
4762 iwx_detect_duplicate(struct iwx_softc *sc, struct mbuf *m,
4763     struct iwx_rx_mpdu_desc *desc, struct ieee80211_rxinfo *rxi)
4764 {
4765 	struct ieee80211com *ic = &sc->sc_ic;
4766 	struct iwx_node *in = (void *)ic->ic_bss;
4767 	struct iwx_rxq_dup_data *dup_data = &in->dup_data;
4768 	uint8_t tid = IWX_MAX_TID_COUNT, subframe_idx;
4769 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4770 	uint8_t type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4771 	uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4772 	int hasqos = ieee80211_has_qos(wh);
4773 	uint16_t seq;
4774 
4775 	if (type == IEEE80211_FC0_TYPE_CTL ||
4776 	    (hasqos && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) ||
4777 	    IEEE80211_IS_MULTICAST(wh->i_addr1))
4778 		return 0;
4779 
4780 	if (hasqos) {
4781 		tid = (ieee80211_get_qos(wh) & IEEE80211_QOS_TID);
4782 		if (tid > IWX_MAX_TID_COUNT)
4783 			tid = IWX_MAX_TID_COUNT;
4784 	}
4785 
4786 	/* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
4787 	subframe_idx = desc->amsdu_info &
4788 		IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
4789 
4790 	seq = letoh16(*(u_int16_t *)wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
4791 	if ((wh->i_fc[1] & IEEE80211_FC1_RETRY) &&
4792 	    dup_data->last_seq[tid] == seq &&
4793 	    dup_data->last_sub_frame[tid] >= subframe_idx)
4794 		return 1;
4795 
4796 	/*
4797 	 * Allow the same frame sequence number for all A-MSDU subframes
4798 	 * following the first subframe.
4799 	 * Otherwise these subframes would be discarded as replays.
4800 	 */
4801 	if (dup_data->last_seq[tid] == seq &&
4802 	    subframe_idx > dup_data->last_sub_frame[tid] &&
4803 	    (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU)) {
4804 		rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
4805 	}
4806 
4807 	dup_data->last_seq[tid] = seq;
4808 	dup_data->last_sub_frame[tid] = subframe_idx;
4809 
4810 	return 0;
4811 }
4812 
4813 /*
4814  * Returns true if sn2 - buffer_size < sn1 < sn2.
4815  * To be used only in order to compare reorder buffer head with NSSN.
4816  * We fully trust NSSN unless it is behind us due to reorder timeout.
4817  * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
4818  */
4819 int
4820 iwx_is_sn_less(uint16_t sn1, uint16_t sn2, uint16_t buffer_size)
4821 {
4822 	return SEQ_LT(sn1, sn2) && !SEQ_LT(sn1, sn2 - buffer_size);
4823 }
4824 
4825 void
4826 iwx_release_frames(struct iwx_softc *sc, struct ieee80211_node *ni,
4827     struct iwx_rxba_data *rxba, struct iwx_reorder_buffer *reorder_buf,
4828     uint16_t nssn, struct mbuf_list *ml)
4829 {
4830 	struct iwx_reorder_buf_entry *entries = &rxba->entries[0];
4831 	uint16_t ssn = reorder_buf->head_sn;
4832 
4833 	/* ignore nssn smaller than head sn - this can happen due to timeout */
4834 	if (iwx_is_sn_less(nssn, ssn, reorder_buf->buf_size))
4835 		goto set_timer;
4836 
4837 	while (iwx_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
4838 		int index = ssn % reorder_buf->buf_size;
4839 		struct mbuf *m;
4840 		int chanidx, is_shortpre;
4841 		uint32_t rx_pkt_status, rate_n_flags, device_timestamp;
4842 		struct ieee80211_rxinfo *rxi;
4843 
4844 		/* This data is the same for all A-MSDU subframes. */
4845 		chanidx = entries[index].chanidx;
4846 		rx_pkt_status = entries[index].rx_pkt_status;
4847 		is_shortpre = entries[index].is_shortpre;
4848 		rate_n_flags = entries[index].rate_n_flags;
4849 		device_timestamp = entries[index].device_timestamp;
4850 		rxi = &entries[index].rxi;
4851 
4852 		/*
4853 		 * Empty the list. Will have more than one frame for A-MSDU.
4854 		 * Empty list is valid as well since nssn indicates frames were
4855 		 * received.
4856 		 */
4857 		while ((m = ml_dequeue(&entries[index].frames)) != NULL) {
4858 			iwx_rx_frame(sc, m, chanidx, rx_pkt_status, is_shortpre,
4859 			    rate_n_flags, device_timestamp, rxi, ml);
4860 			reorder_buf->num_stored--;
4861 
4862 			/*
4863 			 * Allow the same frame sequence number and CCMP PN for
4864 			 * all A-MSDU subframes following the first subframe.
4865 			 * Otherwise they would be discarded as replays.
4866 			 */
4867 			rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
4868 			rxi->rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
4869 		}
4870 
4871 		ssn = (ssn + 1) & 0xfff;
4872 	}
4873 	reorder_buf->head_sn = nssn;
4874 
4875 set_timer:
4876 	if (reorder_buf->num_stored && !reorder_buf->removed) {
4877 		timeout_add_usec(&reorder_buf->reorder_timer,
4878 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
4879 	} else
4880 		timeout_del(&reorder_buf->reorder_timer);
4881 }
4882 
4883 int
4884 iwx_oldsn_workaround(struct iwx_softc *sc, struct ieee80211_node *ni, int tid,
4885     struct iwx_reorder_buffer *buffer, uint32_t reorder_data, uint32_t gp2)
4886 {
4887 	struct ieee80211com *ic = &sc->sc_ic;
4888 
4889 	if (gp2 != buffer->consec_oldsn_ampdu_gp2) {
4890 		/* we have a new (A-)MPDU ... */
4891 
4892 		/*
4893 		 * reset counter to 0 if we didn't have any oldsn in
4894 		 * the last A-MPDU (as detected by GP2 being identical)
4895 		 */
4896 		if (!buffer->consec_oldsn_prev_drop)
4897 			buffer->consec_oldsn_drops = 0;
4898 
4899 		/* either way, update our tracking state */
4900 		buffer->consec_oldsn_ampdu_gp2 = gp2;
4901 	} else if (buffer->consec_oldsn_prev_drop) {
4902 		/*
4903 		 * tracking state didn't change, and we had an old SN
4904 		 * indication before - do nothing in this case, we
4905 		 * already noted this one down and are waiting for the
4906 		 * next A-MPDU (by GP2)
4907 		 */
4908 		return 0;
4909 	}
4910 
4911 	/* return unless this MPDU has old SN */
4912 	if (!(reorder_data & IWX_RX_MPDU_REORDER_BA_OLD_SN))
4913 		return 0;
4914 
4915 	/* update state */
4916 	buffer->consec_oldsn_prev_drop = 1;
4917 	buffer->consec_oldsn_drops++;
4918 
4919 	/* if limit is reached, send del BA and reset state */
4920 	if (buffer->consec_oldsn_drops == IWX_AMPDU_CONSEC_DROPS_DELBA) {
4921 		ieee80211_delba_request(ic, ni, IEEE80211_REASON_UNSPECIFIED,
4922 		    0, tid);
4923 		buffer->consec_oldsn_prev_drop = 0;
4924 		buffer->consec_oldsn_drops = 0;
4925 		return 1;
4926 	}
4927 
4928 	return 0;
4929 }
4930 
4931 /*
4932  * Handle re-ordering of frames which were de-aggregated in hardware.
4933  * Returns 1 if the MPDU was consumed (buffered or dropped).
4934  * Returns 0 if the MPDU should be passed to upper layer.
4935  */
4936 int
4937 iwx_rx_reorder(struct iwx_softc *sc, struct mbuf *m, int chanidx,
4938     struct iwx_rx_mpdu_desc *desc, int is_shortpre, int rate_n_flags,
4939     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
4940     struct mbuf_list *ml)
4941 {
4942 	struct ieee80211com *ic = &sc->sc_ic;
4943 	struct ieee80211_frame *wh;
4944 	struct ieee80211_node *ni;
4945 	struct iwx_rxba_data *rxba;
4946 	struct iwx_reorder_buffer *buffer;
4947 	uint32_t reorder_data = le32toh(desc->reorder_data);
4948 	int is_amsdu = (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU);
4949 	int last_subframe =
4950 		(desc->amsdu_info & IWX_RX_MPDU_AMSDU_LAST_SUBFRAME);
4951 	uint8_t tid;
4952 	uint8_t subframe_idx = (desc->amsdu_info &
4953 	    IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
4954 	struct iwx_reorder_buf_entry *entries;
4955 	int index;
4956 	uint16_t nssn, sn;
4957 	uint8_t baid, type, subtype;
4958 	int hasqos;
4959 
4960 	wh = mtod(m, struct ieee80211_frame *);
4961 	hasqos = ieee80211_has_qos(wh);
4962 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4963 
4964 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4965 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4966 
4967 	/*
4968 	 * We are only interested in Block Ack requests and unicast QoS data.
4969 	 */
4970 	if (IEEE80211_IS_MULTICAST(wh->i_addr1))
4971 		return 0;
4972 	if (hasqos) {
4973 		if (subtype & IEEE80211_FC0_SUBTYPE_NODATA)
4974 			return 0;
4975 	} else {
4976 		if (type != IEEE80211_FC0_TYPE_CTL ||
4977 		    subtype != IEEE80211_FC0_SUBTYPE_BAR)
4978 			return 0;
4979 	}
4980 
4981 	baid = (reorder_data & IWX_RX_MPDU_REORDER_BAID_MASK) >>
4982 		IWX_RX_MPDU_REORDER_BAID_SHIFT;
4983 	if (baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
4984 	    baid >= nitems(sc->sc_rxba_data))
4985 		return 0;
4986 
4987 	rxba = &sc->sc_rxba_data[baid];
4988 	if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
4989 	    tid != rxba->tid || rxba->sta_id != IWX_STATION_ID)
4990 		return 0;
4991 
4992 	if (rxba->timeout != 0)
4993 		getmicrouptime(&rxba->last_rx);
4994 
4995 	/* Bypass A-MPDU re-ordering in net80211. */
4996 	rxi->rxi_flags |= IEEE80211_RXI_AMPDU_DONE;
4997 
4998 	nssn = reorder_data & IWX_RX_MPDU_REORDER_NSSN_MASK;
4999 	sn = (reorder_data & IWX_RX_MPDU_REORDER_SN_MASK) >>
5000 		IWX_RX_MPDU_REORDER_SN_SHIFT;
5001 
5002 	buffer = &rxba->reorder_buf;
5003 	entries = &rxba->entries[0];
5004 
5005 	if (!buffer->valid) {
5006 		if (reorder_data & IWX_RX_MPDU_REORDER_BA_OLD_SN)
5007 			return 0;
5008 		buffer->valid = 1;
5009 	}
5010 
5011 	ni = ieee80211_find_rxnode(ic, wh);
5012 	if (type == IEEE80211_FC0_TYPE_CTL &&
5013 	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
5014 		iwx_release_frames(sc, ni, rxba, buffer, nssn, ml);
5015 		goto drop;
5016 	}
5017 
5018 	/*
5019 	 * If there was a significant jump in the nssn - adjust.
5020 	 * If the SN is smaller than the NSSN it might need to first go into
5021 	 * the reorder buffer, in which case we just release up to it and the
5022 	 * rest of the function will take care of storing it and releasing up to
5023 	 * the nssn.
5024 	 */
5025 	if (!iwx_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
5026 	    buffer->buf_size) ||
5027 	    !SEQ_LT(sn, buffer->head_sn + buffer->buf_size)) {
5028 		uint16_t min_sn = SEQ_LT(sn, nssn) ? sn : nssn;
5029 		ic->ic_stats.is_ht_rx_frame_above_ba_winend++;
5030 		iwx_release_frames(sc, ni, rxba, buffer, min_sn, ml);
5031 	}
5032 
5033 	if (iwx_oldsn_workaround(sc, ni, tid, buffer, reorder_data,
5034 	    device_timestamp)) {
5035 		 /* BA session will be torn down. */
5036 		ic->ic_stats.is_ht_rx_ba_window_jump++;
5037 		goto drop;
5038 
5039 	}
5040 
5041 	/* drop any outdated packets */
5042 	if (SEQ_LT(sn, buffer->head_sn)) {
5043 		ic->ic_stats.is_ht_rx_frame_below_ba_winstart++;
5044 		goto drop;
5045 	}
5046 
5047 	/* release immediately if allowed by nssn and no stored frames */
5048 	if (!buffer->num_stored && SEQ_LT(sn, nssn)) {
5049 		if (iwx_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) &&
5050 		   (!is_amsdu || last_subframe))
5051 			buffer->head_sn = nssn;
5052 		ieee80211_release_node(ic, ni);
5053 		return 0;
5054 	}
5055 
5056 	/*
5057 	 * release immediately if there are no stored frames, and the sn is
5058 	 * equal to the head.
5059 	 * This can happen due to reorder timer, where NSSN is behind head_sn.
5060 	 * When we released everything, and we got the next frame in the
5061 	 * sequence, according to the NSSN we can't release immediately,
5062 	 * while technically there is no hole and we can move forward.
5063 	 */
5064 	if (!buffer->num_stored && sn == buffer->head_sn) {
5065 		if (!is_amsdu || last_subframe)
5066 			buffer->head_sn = (buffer->head_sn + 1) & 0xfff;
5067 		ieee80211_release_node(ic, ni);
5068 		return 0;
5069 	}
5070 
5071 	index = sn % buffer->buf_size;
5072 
5073 	/*
5074 	 * Check if we already stored this frame
5075 	 * As AMSDU is either received or not as whole, logic is simple:
5076 	 * If we have frames in that position in the buffer and the last frame
5077 	 * originated from AMSDU had a different SN then it is a retransmission.
5078 	 * If it is the same SN then if the subframe index is incrementing it
5079 	 * is the same AMSDU - otherwise it is a retransmission.
5080 	 */
5081 	if (!ml_empty(&entries[index].frames)) {
5082 		if (!is_amsdu) {
5083 			ic->ic_stats.is_ht_rx_ba_no_buf++;
5084 			goto drop;
5085 		} else if (sn != buffer->last_amsdu ||
5086 		    buffer->last_sub_index >= subframe_idx) {
5087 			ic->ic_stats.is_ht_rx_ba_no_buf++;
5088 			goto drop;
5089 		}
5090 	} else {
5091 		/* This data is the same for all A-MSDU subframes. */
5092 		entries[index].chanidx = chanidx;
5093 		entries[index].is_shortpre = is_shortpre;
5094 		entries[index].rate_n_flags = rate_n_flags;
5095 		entries[index].device_timestamp = device_timestamp;
5096 		memcpy(&entries[index].rxi, rxi, sizeof(entries[index].rxi));
5097 	}
5098 
5099 	/* put in reorder buffer */
5100 	ml_enqueue(&entries[index].frames, m);
5101 	buffer->num_stored++;
5102 	getmicrouptime(&entries[index].reorder_time);
5103 
5104 	if (is_amsdu) {
5105 		buffer->last_amsdu = sn;
5106 		buffer->last_sub_index = subframe_idx;
5107 	}
5108 
5109 	/*
5110 	 * We cannot trust NSSN for AMSDU sub-frames that are not the last.
5111 	 * The reason is that NSSN advances on the first sub-frame, and may
5112 	 * cause the reorder buffer to advance before all the sub-frames arrive.
5113 	 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
5114 	 * SN 1. NSSN for first sub frame will be 3 with the result of driver
5115 	 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
5116 	 * already ahead and it will be dropped.
5117 	 * If the last sub-frame is not on this queue - we will get frame
5118 	 * release notification with up to date NSSN.
5119 	 */
5120 	if (!is_amsdu || last_subframe)
5121 		iwx_release_frames(sc, ni, rxba, buffer, nssn, ml);
5122 
5123 	ieee80211_release_node(ic, ni);
5124 	return 1;
5125 
5126 drop:
5127 	m_freem(m);
5128 	ieee80211_release_node(ic, ni);
5129 	return 1;
5130 }
5131 
5132 void
5133 iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata,
5134     size_t maxlen, struct mbuf_list *ml)
5135 {
5136 	struct ieee80211com *ic = &sc->sc_ic;
5137 	struct ieee80211_rxinfo rxi;
5138 	struct iwx_rx_mpdu_desc *desc;
5139 	uint32_t len, hdrlen, rate_n_flags, device_timestamp;
5140 	int rssi;
5141 	uint8_t chanidx;
5142 	uint16_t phy_info;
5143 	size_t desc_size;
5144 
5145 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
5146 		desc_size = sizeof(*desc);
5147 	else
5148 		desc_size = IWX_RX_DESC_SIZE_V1;
5149 
5150 	if (maxlen < desc_size) {
5151 		m_freem(m);
5152 		return; /* drop */
5153 	}
5154 
5155 	desc = (struct iwx_rx_mpdu_desc *)pktdata;
5156 
5157 	if (!(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_CRC_OK)) ||
5158 	    !(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
5159 		m_freem(m);
5160 		return; /* drop */
5161 	}
5162 
5163 	len = le16toh(desc->mpdu_len);
5164 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5165 		/* Allow control frames in monitor mode. */
5166 		if (len < sizeof(struct ieee80211_frame_cts)) {
5167 			ic->ic_stats.is_rx_tooshort++;
5168 			IC2IFP(ic)->if_ierrors++;
5169 			m_freem(m);
5170 			return;
5171 		}
5172 	} else if (len < sizeof(struct ieee80211_frame)) {
5173 		ic->ic_stats.is_rx_tooshort++;
5174 		IC2IFP(ic)->if_ierrors++;
5175 		m_freem(m);
5176 		return;
5177 	}
5178 	if (len > maxlen - desc_size) {
5179 		IC2IFP(ic)->if_ierrors++;
5180 		m_freem(m);
5181 		return;
5182 	}
5183 
5184 	m->m_data = pktdata + desc_size;
5185 	m->m_pkthdr.len = m->m_len = len;
5186 
5187 	/* Account for padding following the frame header. */
5188 	if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_PAD) {
5189 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
5190 		int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5191 		if (type == IEEE80211_FC0_TYPE_CTL) {
5192 			switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
5193 			case IEEE80211_FC0_SUBTYPE_CTS:
5194 				hdrlen = sizeof(struct ieee80211_frame_cts);
5195 				break;
5196 			case IEEE80211_FC0_SUBTYPE_ACK:
5197 				hdrlen = sizeof(struct ieee80211_frame_ack);
5198 				break;
5199 			default:
5200 				hdrlen = sizeof(struct ieee80211_frame_min);
5201 				break;
5202 			}
5203 		} else
5204 			hdrlen = ieee80211_get_hdrlen(wh);
5205 
5206 		if ((le16toh(desc->status) &
5207 		    IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
5208 		    IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
5209 			/* Padding is inserted after the IV. */
5210 			hdrlen += IEEE80211_CCMP_HDRLEN;
5211 		}
5212 
5213 		memmove(m->m_data + 2, m->m_data, hdrlen);
5214 		m_adj(m, 2);
5215 	}
5216 
5217 	memset(&rxi, 0, sizeof(rxi));
5218 
5219 	/*
5220 	 * Hardware de-aggregates A-MSDUs and copies the same MAC header
5221 	 * in place for each subframe. But it leaves the 'A-MSDU present'
5222 	 * bit set in the frame header. We need to clear this bit ourselves.
5223 	 * (XXX This workaround is not required on AX200/AX201 devices that
5224 	 * have been tested by me, but it's unclear when this problem was
5225 	 * fixed in the hardware. It definitely affects the 9k generation.
5226 	 * Leaving this in place for now since some 9k/AX200 hybrids seem
5227 	 * to exist that we may eventually add support for.)
5228 	 *
5229 	 * And we must allow the same CCMP PN for subframes following the
5230 	 * first subframe. Otherwise they would be discarded as replays.
5231 	 */
5232 	if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU) {
5233 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
5234 		uint8_t subframe_idx = (desc->amsdu_info &
5235 		    IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
5236 		if (subframe_idx > 0)
5237 			rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
5238 		if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
5239 		    m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) {
5240 			struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,
5241 			    struct ieee80211_qosframe_addr4 *);
5242 			qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
5243 		} else if (ieee80211_has_qos(wh) &&
5244 		    m->m_len >= sizeof(struct ieee80211_qosframe)) {
5245 			struct ieee80211_qosframe *qwh = mtod(m,
5246 			    struct ieee80211_qosframe *);
5247 			qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
5248 		}
5249 	}
5250 
5251 	/*
5252 	 * Verify decryption before duplicate detection. The latter uses
5253 	 * the TID supplied in QoS frame headers and this TID is implicitly
5254 	 * verified as part of the CCMP nonce.
5255 	 */
5256 	if (iwx_rx_hwdecrypt(sc, m, le16toh(desc->status), &rxi)) {
5257 		m_freem(m);
5258 		return;
5259 	}
5260 
5261 	if (iwx_detect_duplicate(sc, m, desc, &rxi)) {
5262 		m_freem(m);
5263 		return;
5264 	}
5265 
5266 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
5267 		rate_n_flags = le32toh(desc->v3.rate_n_flags);
5268 		chanidx = desc->v3.channel;
5269 		device_timestamp = le32toh(desc->v3.gp2_on_air_rise);
5270 	} else {
5271 		rate_n_flags = le32toh(desc->v1.rate_n_flags);
5272 		chanidx = desc->v1.channel;
5273 		device_timestamp = le32toh(desc->v1.gp2_on_air_rise);
5274 	}
5275 
5276 	phy_info = le16toh(desc->phy_info);
5277 
5278 	rssi = iwx_rxmq_get_signal_strength(sc, desc);
5279 	rssi = (0 - IWX_MIN_DBM) + rssi;	/* normalize */
5280 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
5281 
5282 	rxi.rxi_rssi = rssi;
5283 	rxi.rxi_tstamp = device_timestamp;
5284 	rxi.rxi_chan = chanidx;
5285 
5286 	if (iwx_rx_reorder(sc, m, chanidx, desc,
5287 	    (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
5288 	    rate_n_flags, device_timestamp, &rxi, ml))
5289 		return;
5290 
5291 	iwx_rx_frame(sc, m, chanidx, le16toh(desc->status),
5292 	    (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
5293 	    rate_n_flags, device_timestamp, &rxi, ml);
5294 }
5295 
5296 void
5297 iwx_clear_tx_desc(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx)
5298 {
5299 	struct iwx_tfh_tfd *desc = &ring->desc[idx];
5300 	uint8_t num_tbs = le16toh(desc->num_tbs) & 0x1f;
5301 	int i;
5302 
5303 	/* First TB is never cleared - it is bidirectional DMA data. */
5304 	for (i = 1; i < num_tbs; i++) {
5305 		struct iwx_tfh_tb *tb = &desc->tbs[i];
5306 		memset(tb, 0, sizeof(*tb));
5307 	}
5308 	desc->num_tbs = htole16(1);
5309 
5310 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
5311 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
5312 	    sizeof(*desc), BUS_DMASYNC_PREWRITE);
5313 }
5314 
5315 void
5316 iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_data *txd)
5317 {
5318 	struct ieee80211com *ic = &sc->sc_ic;
5319 
5320 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
5321 	    BUS_DMASYNC_POSTWRITE);
5322 	bus_dmamap_unload(sc->sc_dmat, txd->map);
5323 	m_freem(txd->m);
5324 	txd->m = NULL;
5325 
5326 	KASSERT(txd->in);
5327 	ieee80211_release_node(ic, &txd->in->in_ni);
5328 	txd->in = NULL;
5329 }
5330 
5331 void
5332 iwx_txq_advance(struct iwx_softc *sc, struct iwx_tx_ring *ring, uint16_t idx)
5333 {
5334  	struct iwx_tx_data *txd;
5335 
5336 	while (ring->tail_hw != idx) {
5337 		txd = &ring->data[ring->tail];
5338 		if (txd->m != NULL) {
5339 			iwx_clear_tx_desc(sc, ring, ring->tail);
5340 			iwx_tx_update_byte_tbl(sc, ring, ring->tail, 0, 0);
5341 			iwx_txd_done(sc, txd);
5342 			ring->queued--;
5343 		}
5344 		ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT;
5345 		ring->tail_hw = (ring->tail_hw + 1) % sc->max_tfd_queue_size;
5346 	}
5347 }
5348 
5349 void
5350 iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
5351     struct iwx_rx_data *data)
5352 {
5353 	struct ieee80211com *ic = &sc->sc_ic;
5354 	struct ifnet *ifp = IC2IFP(ic);
5355 	struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
5356 	int qid = cmd_hdr->qid, status, txfail;
5357 	struct iwx_tx_ring *ring = &sc->txq[qid];
5358 	struct iwx_tx_resp *tx_resp = (void *)pkt->data;
5359 	uint32_t ssn;
5360 	uint32_t len = iwx_rx_packet_len(pkt);
5361 
5362 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,
5363 	    BUS_DMASYNC_POSTREAD);
5364 
5365 	/* Sanity checks. */
5366 	if (sizeof(*tx_resp) > len)
5367 		return;
5368 	if (qid < IWX_FIRST_AGG_TX_QUEUE && tx_resp->frame_count > 1)
5369 		return;
5370 	if (qid >= IWX_FIRST_AGG_TX_QUEUE && sizeof(*tx_resp) + sizeof(ssn) +
5371 	    tx_resp->frame_count * sizeof(tx_resp->status) > len)
5372 		return;
5373 
5374 	sc->sc_tx_timer[qid] = 0;
5375 
5376 	if (tx_resp->frame_count > 1) /* A-MPDU */
5377 		return;
5378 
5379 	status = le16toh(tx_resp->status.status) & IWX_TX_STATUS_MSK;
5380 	txfail = (status != IWX_TX_STATUS_SUCCESS &&
5381 	    status != IWX_TX_STATUS_DIRECT_DONE);
5382 
5383 	if (txfail)
5384 		ifp->if_oerrors++;
5385 
5386 	/*
5387 	 * On hardware supported by iwx(4) the SSN counter corresponds
5388 	 * to a Tx ring index rather than a sequence number.
5389 	 * Frames up to this index (non-inclusive) can now be freed.
5390 	 */
5391 	memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn));
5392 	ssn = le32toh(ssn);
5393 	if (ssn < sc->max_tfd_queue_size) {
5394 		iwx_txq_advance(sc, ring, ssn);
5395 		iwx_clear_oactive(sc, ring);
5396 	}
5397 }
5398 
5399 void
5400 iwx_clear_oactive(struct iwx_softc *sc, struct iwx_tx_ring *ring)
5401 {
5402 	struct ieee80211com *ic = &sc->sc_ic;
5403 	struct ifnet *ifp = IC2IFP(ic);
5404 
5405 	if (ring->queued < IWX_TX_RING_LOMARK) {
5406 		sc->qfullmsk &= ~(1 << ring->qid);
5407 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
5408 			ifq_clr_oactive(&ifp->if_snd);
5409 			/*
5410 			 * Well, we're in interrupt context, but then again
5411 			 * I guess net80211 does all sorts of stunts in
5412 			 * interrupt context, so maybe this is no biggie.
5413 			 */
5414 			(*ifp->if_start)(ifp);
5415 		}
5416 	}
5417 }
5418 
5419 void
5420 iwx_rx_compressed_ba(struct iwx_softc *sc, struct iwx_rx_packet *pkt)
5421 {
5422 	struct iwx_compressed_ba_notif *ba_res = (void *)pkt->data;
5423 	struct ieee80211com *ic = &sc->sc_ic;
5424 	struct ieee80211_node *ni;
5425 	struct ieee80211_tx_ba *ba;
5426 	struct iwx_node *in;
5427 	struct iwx_tx_ring *ring;
5428 	uint16_t i, tfd_cnt, ra_tid_cnt, idx;
5429 	int qid;
5430 
5431 	if (ic->ic_state != IEEE80211_S_RUN)
5432 		return;
5433 
5434 	if (iwx_rx_packet_payload_len(pkt) < sizeof(*ba_res))
5435 		return;
5436 
5437 	if (ba_res->sta_id != IWX_STATION_ID)
5438 		return;
5439 
5440 	ni = ic->ic_bss;
5441 	in = (void *)ni;
5442 
5443 	tfd_cnt = le16toh(ba_res->tfd_cnt);
5444 	ra_tid_cnt = le16toh(ba_res->ra_tid_cnt);
5445 	if (!tfd_cnt || iwx_rx_packet_payload_len(pkt) < (sizeof(*ba_res) +
5446 	    sizeof(ba_res->ra_tid[0]) * ra_tid_cnt +
5447 	    sizeof(ba_res->tfd[0]) * tfd_cnt))
5448 		return;
5449 
5450 	for (i = 0; i < tfd_cnt; i++) {
5451 		struct iwx_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
5452 		uint8_t tid;
5453 
5454 		tid = ba_tfd->tid;
5455 		if (tid >= nitems(sc->aggqid))
5456 			continue;
5457 
5458 		qid = sc->aggqid[tid];
5459 		if (qid != htole16(ba_tfd->q_num))
5460 			continue;
5461 
5462 		ring = &sc->txq[qid];
5463 
5464 		ba = &ni->ni_tx_ba[tid];
5465 		if (ba->ba_state != IEEE80211_BA_AGREED)
5466 			continue;
5467 
5468 		idx = le16toh(ba_tfd->tfd_index);
5469 		sc->sc_tx_timer[qid] = 0;
5470 		iwx_txq_advance(sc, ring, idx);
5471 		iwx_clear_oactive(sc, ring);
5472 	}
5473 }
5474 
5475 void
5476 iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
5477     struct iwx_rx_data *data)
5478 {
5479 	struct ieee80211com *ic = &sc->sc_ic;
5480 	struct iwx_missed_beacons_notif *mbn = (void *)pkt->data;
5481 	uint32_t missed;
5482 
5483 	if ((ic->ic_opmode != IEEE80211_M_STA) ||
5484 	    (ic->ic_state != IEEE80211_S_RUN))
5485 		return;
5486 
5487 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
5488 	    sizeof(*mbn), BUS_DMASYNC_POSTREAD);
5489 
5490 	missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
5491 	if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
5492 		if (ic->ic_if.if_flags & IFF_DEBUG)
5493 			printf("%s: receiving no beacons from %s; checking if "
5494 			    "this AP is still responding to probe requests\n",
5495 			    DEVNAME(sc), ether_sprintf(ic->ic_bss->ni_macaddr));
5496 		/*
5497 		 * Rather than go directly to scan state, try to send a
5498 		 * directed probe request first. If that fails then the
5499 		 * state machine will drop us into scanning after timing
5500 		 * out waiting for a probe response.
5501 		 */
5502 		IEEE80211_SEND_MGMT(ic, ic->ic_bss,
5503 		    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
5504 	}
5505 
5506 }
5507 
5508 int
5509 iwx_binding_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action)
5510 {
5511 	struct iwx_binding_cmd cmd;
5512 	struct iwx_phy_ctxt *phyctxt = in->in_phyctxt;
5513 	uint32_t mac_id = IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
5514 	int i, err, active = (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE);
5515 	uint32_t status;
5516 
5517 	if (action == IWX_FW_CTXT_ACTION_ADD && active)
5518 		panic("binding already added");
5519 	if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
5520 		panic("binding already removed");
5521 
5522 	if (phyctxt == NULL) /* XXX race with iwx_stop() */
5523 		return EINVAL;
5524 
5525 	memset(&cmd, 0, sizeof(cmd));
5526 
5527 	cmd.id_and_color
5528 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
5529 	cmd.action = htole32(action);
5530 	cmd.phy = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
5531 
5532 	cmd.macs[0] = htole32(mac_id);
5533 	for (i = 1; i < IWX_MAX_MACS_IN_BINDING; i++)
5534 		cmd.macs[i] = htole32(IWX_FW_CTXT_INVALID);
5535 
5536 	if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
5537 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
5538 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
5539 	else
5540 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
5541 
5542 	status = 0;
5543 	err = iwx_send_cmd_pdu_status(sc, IWX_BINDING_CONTEXT_CMD, sizeof(cmd),
5544 	    &cmd, &status);
5545 	if (err == 0 && status != 0)
5546 		err = EIO;
5547 
5548 	return err;
5549 }
5550 
5551 uint8_t
5552 iwx_get_vht_ctrl_pos(struct ieee80211com *ic, struct ieee80211_channel *chan)
5553 {
5554 	int center_idx = ic->ic_bss->ni_vht_chan_center_freq_idx0;
5555 	int primary_idx = ic->ic_bss->ni_primary_chan;
5556 	/*
5557 	 * The FW is expected to check the control channel position only
5558 	 * when in HT/VHT and the channel width is not 20MHz. Return
5559 	 * this value as the default one:
5560 	 */
5561 	uint8_t pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5562 
5563 	switch (primary_idx - center_idx) {
5564 	case -6:
5565 		pos = IWX_PHY_VHT_CTRL_POS_2_BELOW;
5566 		break;
5567 	case -2:
5568 		pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5569 		break;
5570 	case 2:
5571 		pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5572 		break;
5573 	case 6:
5574 		pos = IWX_PHY_VHT_CTRL_POS_2_ABOVE;
5575 		break;
5576 	default:
5577 		break;
5578 	}
5579 
5580 	return pos;
5581 }
5582 
5583 int
5584 iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5585     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
5586     uint8_t vht_chan_width, int cmdver)
5587 {
5588 	struct ieee80211com *ic = &sc->sc_ic;
5589 	struct iwx_phy_context_cmd_uhb cmd;
5590 	uint8_t active_cnt, idle_cnt;
5591 	struct ieee80211_channel *chan = ctxt->channel;
5592 
5593 	memset(&cmd, 0, sizeof(cmd));
5594 	cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
5595 	    ctxt->color));
5596 	cmd.action = htole32(action);
5597 
5598 	if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel) ||
5599 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
5600 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
5601 	else
5602 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
5603 
5604 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
5605 	    IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
5606 	cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
5607 	if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
5608 		cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
5609 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
5610 	} else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
5611 		if (sco == IEEE80211_HTOP0_SCO_SCA) {
5612 			/* secondary chan above -> control chan below */
5613 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5614 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5615 		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
5616 			/* secondary chan below -> control chan above */
5617 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5618 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5619 		} else {
5620 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5621 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5622 		}
5623 	} else {
5624 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5625 		cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5626 	}
5627 
5628 	if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
5629 	    IWX_RLC_CONFIG_CMD) != 2) {
5630 		idle_cnt = chains_static;
5631 		active_cnt = chains_dynamic;
5632 		cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
5633 		    IWX_PHY_RX_CHAIN_VALID_POS);
5634 		cmd.rxchain_info |= htole32(idle_cnt <<
5635 		    IWX_PHY_RX_CHAIN_CNT_POS);
5636 		cmd.rxchain_info |= htole32(active_cnt <<
5637 		    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
5638 	}
5639 
5640 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5641 }
5642 
5643 int
5644 iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5645     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
5646     uint8_t vht_chan_width, int cmdver)
5647 {
5648 	struct ieee80211com *ic = &sc->sc_ic;
5649 	struct iwx_phy_context_cmd cmd;
5650 	uint8_t active_cnt, idle_cnt;
5651 	struct ieee80211_channel *chan = ctxt->channel;
5652 
5653 	memset(&cmd, 0, sizeof(cmd));
5654 	cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
5655 	    ctxt->color));
5656 	cmd.action = htole32(action);
5657 
5658 	if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel) ||
5659 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
5660 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
5661 	else
5662 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
5663 
5664 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
5665 	    IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
5666 	cmd.ci.channel = ieee80211_chan2ieee(ic, chan);
5667 	if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
5668 		cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
5669 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
5670 	} else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
5671 		if (sco == IEEE80211_HTOP0_SCO_SCA) {
5672 			/* secondary chan above -> control chan below */
5673 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5674 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5675 		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
5676 			/* secondary chan below -> control chan above */
5677 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5678 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5679 		} else {
5680 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5681 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5682 		}
5683 	} else {
5684 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5685 		cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5686 	}
5687 
5688 	if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
5689 	    IWX_RLC_CONFIG_CMD) != 2) {
5690 		idle_cnt = chains_static;
5691 		active_cnt = chains_dynamic;
5692 		cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
5693 		    IWX_PHY_RX_CHAIN_VALID_POS);
5694 		cmd.rxchain_info |= htole32(idle_cnt <<
5695 		    IWX_PHY_RX_CHAIN_CNT_POS);
5696 		cmd.rxchain_info |= htole32(active_cnt <<
5697 		    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
5698 	}
5699 
5700 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5701 }
5702 
5703 int
5704 iwx_phy_ctxt_cmd(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5705     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
5706     uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
5707 {
5708 	int cmdver;
5709 
5710 	cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_PHY_CONTEXT_CMD);
5711 	if (cmdver != 3 && cmdver != 4) {
5712 		printf("%s: firmware does not support phy-context-cmd v3/v4\n",
5713 		    DEVNAME(sc));
5714 		return ENOTSUP;
5715 	}
5716 
5717 	/*
5718 	 * Intel increased the size of the fw_channel_info struct and neglected
5719 	 * to bump the phy_context_cmd struct, which contains an fw_channel_info
5720 	 * member in the middle.
5721 	 * To keep things simple we use a separate function to handle the larger
5722 	 * variant of the phy context command.
5723 	 */
5724 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS)) {
5725 		return iwx_phy_ctxt_cmd_uhb_v3_v4(sc, ctxt, chains_static,
5726 		    chains_dynamic, action, sco, vht_chan_width, cmdver);
5727 	}
5728 
5729 	return iwx_phy_ctxt_cmd_v3_v4(sc, ctxt, chains_static, chains_dynamic,
5730 	    action, sco, vht_chan_width, cmdver);
5731 }
5732 
5733 int
5734 iwx_send_cmd(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5735 {
5736 	struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5737 	struct iwx_tfh_tfd *desc;
5738 	struct iwx_tx_data *txdata;
5739 	struct iwx_device_cmd *cmd;
5740 	struct mbuf *m;
5741 	bus_addr_t paddr;
5742 	uint64_t addr;
5743 	int err = 0, i, paylen, off, s;
5744 	int idx, code, async, group_id;
5745 	size_t hdrlen, datasz;
5746 	uint8_t *data;
5747 	int generation = sc->sc_generation;
5748 
5749 	code = hcmd->id;
5750 	async = hcmd->flags & IWX_CMD_ASYNC;
5751 	idx = ring->cur;
5752 
5753 	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
5754 		paylen += hcmd->len[i];
5755 	}
5756 
5757 	/* If this command waits for a response, allocate response buffer. */
5758 	hcmd->resp_pkt = NULL;
5759 	if (hcmd->flags & IWX_CMD_WANT_RESP) {
5760 		uint8_t *resp_buf;
5761 		KASSERT(!async);
5762 		KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet));
5763 		KASSERT(hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX);
5764 		if (sc->sc_cmd_resp_pkt[idx] != NULL)
5765 			return ENOSPC;
5766 		resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
5767 		    M_NOWAIT | M_ZERO);
5768 		if (resp_buf == NULL)
5769 			return ENOMEM;
5770 		sc->sc_cmd_resp_pkt[idx] = resp_buf;
5771 		sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
5772 	} else {
5773 		sc->sc_cmd_resp_pkt[idx] = NULL;
5774 	}
5775 
5776 	s = splnet();
5777 
5778 	desc = &ring->desc[idx];
5779 	txdata = &ring->data[idx];
5780 
5781 	/*
5782 	 * XXX Intel inside (tm)
5783 	 * Firmware API versions >= 50 reject old-style commands in
5784 	 * group 0 with a "BAD_COMMAND" firmware error. We must pretend
5785 	 * that such commands were in the LONG_GROUP instead in order
5786 	 * for firmware to accept them.
5787 	 */
5788 	if (iwx_cmd_groupid(code) == 0) {
5789 		code = IWX_WIDE_ID(IWX_LONG_GROUP, code);
5790 		txdata->flags |= IWX_TXDATA_FLAG_CMD_IS_NARROW;
5791 	} else
5792 		txdata->flags &= ~IWX_TXDATA_FLAG_CMD_IS_NARROW;
5793 
5794 	group_id = iwx_cmd_groupid(code);
5795 
5796 	hdrlen = sizeof(cmd->hdr_wide);
5797 	datasz = sizeof(cmd->data_wide);
5798 
5799 	if (paylen > datasz) {
5800 		/* Command is too large to fit in pre-allocated space. */
5801 		size_t totlen = hdrlen + paylen;
5802 		if (paylen > IWX_MAX_CMD_PAYLOAD_SIZE) {
5803 			printf("%s: firmware command too long (%zd bytes)\n",
5804 			    DEVNAME(sc), totlen);
5805 			err = EINVAL;
5806 			goto out;
5807 		}
5808 		m = MCLGETL(NULL, M_DONTWAIT, totlen);
5809 		if (m == NULL) {
5810 			printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
5811 			    DEVNAME(sc), totlen);
5812 			err = ENOMEM;
5813 			goto out;
5814 		}
5815 		cmd = mtod(m, struct iwx_device_cmd *);
5816 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
5817 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
5818 		if (err) {
5819 			printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
5820 			    DEVNAME(sc), totlen);
5821 			m_freem(m);
5822 			goto out;
5823 		}
5824 		txdata->m = m; /* mbuf will be freed in iwx_cmd_done() */
5825 		paddr = txdata->map->dm_segs[0].ds_addr;
5826 	} else {
5827 		cmd = &ring->cmd[idx];
5828 		paddr = txdata->cmd_paddr;
5829 	}
5830 
5831 	memset(cmd, 0, sizeof(*cmd));
5832 	cmd->hdr_wide.opcode = iwx_cmd_opcode(code);
5833 	cmd->hdr_wide.group_id = group_id;
5834 	cmd->hdr_wide.qid = ring->qid;
5835 	cmd->hdr_wide.idx = idx;
5836 	cmd->hdr_wide.length = htole16(paylen);
5837 	cmd->hdr_wide.version = iwx_cmd_version(code);
5838 	data = cmd->data_wide;
5839 
5840 	for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
5841 		if (hcmd->len[i] == 0)
5842 			continue;
5843 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
5844 		off += hcmd->len[i];
5845 	}
5846 	KASSERT(off == paylen);
5847 
5848 	desc->tbs[0].tb_len = htole16(MIN(hdrlen + paylen, IWX_FIRST_TB_SIZE));
5849 	addr = htole64(paddr);
5850 	memcpy(&desc->tbs[0].addr, &addr, sizeof(addr));
5851 	if (hdrlen + paylen > IWX_FIRST_TB_SIZE) {
5852 		desc->tbs[1].tb_len = htole16(hdrlen + paylen -
5853 		    IWX_FIRST_TB_SIZE);
5854 		addr = htole64(paddr + IWX_FIRST_TB_SIZE);
5855 		memcpy(&desc->tbs[1].addr, &addr, sizeof(addr));
5856 		desc->num_tbs = htole16(2);
5857 	} else
5858 		desc->num_tbs = htole16(1);
5859 
5860 	if (paylen > datasz) {
5861 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
5862 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
5863 	} else {
5864 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
5865 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
5866 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
5867 	}
5868 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
5869 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
5870 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
5871 	/* Kick command ring. */
5872 	DPRINTF(("%s: sending command 0x%x\n", __func__, code));
5873 	ring->queued++;
5874 	ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
5875 	ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
5876 	IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
5877 
5878 	if (!async) {
5879 		err = tsleep_nsec(desc, PCATCH, "iwxcmd", SEC_TO_NSEC(1));
5880 		if (err == 0) {
5881 			/* if hardware is no longer up, return error */
5882 			if (generation != sc->sc_generation) {
5883 				err = ENXIO;
5884 				goto out;
5885 			}
5886 
5887 			/* Response buffer will be freed in iwx_free_resp(). */
5888 			hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
5889 			sc->sc_cmd_resp_pkt[idx] = NULL;
5890 		} else if (generation == sc->sc_generation) {
5891 			free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
5892 			    sc->sc_cmd_resp_len[idx]);
5893 			sc->sc_cmd_resp_pkt[idx] = NULL;
5894 		}
5895 	}
5896  out:
5897 	splx(s);
5898 
5899 	return err;
5900 }
5901 
5902 int
5903 iwx_send_cmd_pdu(struct iwx_softc *sc, uint32_t id, uint32_t flags,
5904     uint16_t len, const void *data)
5905 {
5906 	struct iwx_host_cmd cmd = {
5907 		.id = id,
5908 		.len = { len, },
5909 		.data = { data, },
5910 		.flags = flags,
5911 	};
5912 
5913 	return iwx_send_cmd(sc, &cmd);
5914 }
5915 
5916 int
5917 iwx_send_cmd_status(struct iwx_softc *sc, struct iwx_host_cmd *cmd,
5918     uint32_t *status)
5919 {
5920 	struct iwx_rx_packet *pkt;
5921 	struct iwx_cmd_response *resp;
5922 	int err, resp_len;
5923 
5924 	KASSERT((cmd->flags & IWX_CMD_WANT_RESP) == 0);
5925 	cmd->flags |= IWX_CMD_WANT_RESP;
5926 	cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
5927 
5928 	err = iwx_send_cmd(sc, cmd);
5929 	if (err)
5930 		return err;
5931 
5932 	pkt = cmd->resp_pkt;
5933 	if (pkt == NULL || (pkt->hdr.flags & IWX_CMD_FAILED_MSK))
5934 		return EIO;
5935 
5936 	resp_len = iwx_rx_packet_payload_len(pkt);
5937 	if (resp_len != sizeof(*resp)) {
5938 		iwx_free_resp(sc, cmd);
5939 		return EIO;
5940 	}
5941 
5942 	resp = (void *)pkt->data;
5943 	*status = le32toh(resp->status);
5944 	iwx_free_resp(sc, cmd);
5945 	return err;
5946 }
5947 
5948 int
5949 iwx_send_cmd_pdu_status(struct iwx_softc *sc, uint32_t id, uint16_t len,
5950     const void *data, uint32_t *status)
5951 {
5952 	struct iwx_host_cmd cmd = {
5953 		.id = id,
5954 		.len = { len, },
5955 		.data = { data, },
5956 	};
5957 
5958 	return iwx_send_cmd_status(sc, &cmd, status);
5959 }
5960 
5961 void
5962 iwx_free_resp(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5963 {
5964 	KASSERT((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP);
5965 	free(hcmd->resp_pkt, M_DEVBUF, hcmd->resp_pkt_len);
5966 	hcmd->resp_pkt = NULL;
5967 }
5968 
5969 void
5970 iwx_cmd_done(struct iwx_softc *sc, int qid, int idx, int code)
5971 {
5972 	struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5973 	struct iwx_tx_data *data;
5974 
5975 	if (qid != IWX_DQA_CMD_QUEUE) {
5976 		return;	/* Not a command ack. */
5977 	}
5978 
5979 	data = &ring->data[idx];
5980 
5981 	if (data->m != NULL) {
5982 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
5983 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5984 		bus_dmamap_unload(sc->sc_dmat, data->map);
5985 		m_freem(data->m);
5986 		data->m = NULL;
5987 	}
5988 	wakeup(&ring->desc[idx]);
5989 
5990 	DPRINTF(("%s: command 0x%x done\n", __func__, code));
5991 	if (ring->queued == 0) {
5992 		DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
5993 			DEVNAME(sc), code));
5994 	} else if (ring->queued > 0)
5995 		ring->queued--;
5996 }
5997 
5998 uint32_t
5999 iwx_fw_rateidx_ofdm(uint8_t rval)
6000 {
6001 	/* Firmware expects indices which match our 11a rate set. */
6002 	const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11a;
6003 	int i;
6004 
6005 	for (i = 0; i < rs->rs_nrates; i++) {
6006 		if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
6007 			return i;
6008 	}
6009 
6010 	return 0;
6011 }
6012 
6013 uint32_t
6014 iwx_fw_rateidx_cck(uint8_t rval)
6015 {
6016 	/* Firmware expects indices which match our 11b rate set. */
6017 	const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11b;
6018 	int i;
6019 
6020 	for (i = 0; i < rs->rs_nrates; i++) {
6021 		if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
6022 			return i;
6023 	}
6024 
6025 	return 0;
6026 }
6027 
6028 /*
6029  * Determine the Tx command flags and Tx rate+flags to use.
6030  * Return the selected Tx rate.
6031  */
6032 const struct iwx_rate *
6033 iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in,
6034     struct ieee80211_frame *wh, uint16_t *flags, uint32_t *rate_n_flags)
6035 {
6036 	struct ieee80211com *ic = &sc->sc_ic;
6037 	struct ieee80211_node *ni = &in->in_ni;
6038 	struct ieee80211_rateset *rs = &ni->ni_rates;
6039 	const struct iwx_rate *rinfo;
6040 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
6041 	int min_ridx = iwx_rval2ridx(ieee80211_min_basic_rate(ic));
6042 	int ridx, rate_flags;
6043 	uint8_t rval;
6044 
6045 	*flags = 0;
6046 
6047 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
6048 	    type != IEEE80211_FC0_TYPE_DATA) {
6049 		/* for non-data, use the lowest supported rate */
6050 		ridx = min_ridx;
6051 		*flags |= IWX_TX_FLAGS_CMD_RATE;
6052 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
6053 		ridx = iwx_mcs2ridx[ni->ni_txmcs];
6054 	} else {
6055 		rval = (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL);
6056 		ridx = iwx_rval2ridx(rval);
6057 		if (ridx < min_ridx)
6058 			ridx = min_ridx;
6059 	}
6060 
6061 	if ((ic->ic_flags & IEEE80211_F_RSNON) &&
6062 	    ni->ni_rsn_supp_state == RSNA_SUPP_PTKNEGOTIATING)
6063 		*flags |= IWX_TX_FLAGS_HIGH_PRI;
6064 
6065 	rinfo = &iwx_rates[ridx];
6066 
6067 	/*
6068 	 * Do not fill rate_n_flags if firmware controls the Tx rate.
6069 	 * For data frames we rely on Tx rate scaling in firmware by default.
6070 	 */
6071 	if ((*flags & IWX_TX_FLAGS_CMD_RATE) == 0) {
6072 		*rate_n_flags = 0;
6073 		return rinfo;
6074 	}
6075 
6076 	/*
6077 	 * Forcing a CCK/OFDM legacy rate is important for management frames.
6078 	 * Association will only succeed if we do this correctly.
6079 	 */
6080 	rate_flags = IWX_RATE_MCS_ANT_A_MSK;
6081 	if (IWX_RIDX_IS_CCK(ridx)) {
6082 		if (sc->sc_rate_n_flags_version >= 2)
6083 			rate_flags |= IWX_RATE_MCS_CCK_MSK;
6084 		else
6085 			rate_flags |= IWX_RATE_MCS_CCK_MSK_V1;
6086 	} else if (sc->sc_rate_n_flags_version >= 2)
6087 		rate_flags |= IWX_RATE_MCS_LEGACY_OFDM_MSK;
6088 
6089 	rval = (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL);
6090 	if (sc->sc_rate_n_flags_version >= 2) {
6091 		if (rate_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK) {
6092 			rate_flags |= (iwx_fw_rateidx_ofdm(rval) &
6093 			    IWX_RATE_LEGACY_RATE_MSK);
6094 		} else {
6095 			rate_flags |= (iwx_fw_rateidx_cck(rval) &
6096 			    IWX_RATE_LEGACY_RATE_MSK);
6097 		}
6098 	} else
6099 		rate_flags |= rinfo->plcp;
6100 
6101 	*rate_n_flags = rate_flags;
6102 
6103 	return rinfo;
6104 }
6105 
6106 void
6107 iwx_tx_update_byte_tbl(struct iwx_softc *sc, struct iwx_tx_ring *txq,
6108     int idx, uint16_t byte_cnt, uint16_t num_tbs)
6109 {
6110 	uint8_t filled_tfd_size, num_fetch_chunks;
6111 	uint16_t len = byte_cnt;
6112 	uint16_t bc_ent;
6113 
6114 	filled_tfd_size = offsetof(struct iwx_tfh_tfd, tbs) +
6115 			  num_tbs * sizeof(struct iwx_tfh_tb);
6116 	/*
6117 	 * filled_tfd_size contains the number of filled bytes in the TFD.
6118 	 * Dividing it by 64 will give the number of chunks to fetch
6119 	 * to SRAM- 0 for one chunk, 1 for 2 and so on.
6120 	 * If, for example, TFD contains only 3 TBs then 32 bytes
6121 	 * of the TFD are used, and only one chunk of 64 bytes should
6122 	 * be fetched
6123 	 */
6124 	num_fetch_chunks = howmany(filled_tfd_size, 64) - 1;
6125 
6126 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
6127 		struct iwx_gen3_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.vaddr;
6128 		/* Starting from AX210, the HW expects bytes */
6129 		bc_ent = htole16(len | (num_fetch_chunks << 14));
6130 		scd_bc_tbl[idx].tfd_offset = bc_ent;
6131 	} else {
6132 		struct iwx_agn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.vaddr;
6133 		/* Before AX210, the HW expects DW */
6134 		len = howmany(len, 4);
6135 		bc_ent = htole16(len | (num_fetch_chunks << 12));
6136 		scd_bc_tbl->tfd_offset[idx] = bc_ent;
6137 	}
6138 
6139 	bus_dmamap_sync(sc->sc_dmat, txq->bc_tbl.map, 0,
6140 	    txq->bc_tbl.map->dm_mapsize, BUS_DMASYNC_PREWRITE);
6141 }
6142 
6143 int
6144 iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
6145 {
6146 	struct ieee80211com *ic = &sc->sc_ic;
6147 	struct iwx_node *in = (void *)ni;
6148 	struct iwx_tx_ring *ring;
6149 	struct iwx_tx_data *data;
6150 	struct iwx_tfh_tfd *desc;
6151 	struct iwx_device_cmd *cmd;
6152 	struct ieee80211_frame *wh;
6153 	struct ieee80211_key *k = NULL;
6154 	const struct iwx_rate *rinfo;
6155 	uint64_t paddr;
6156 	u_int hdrlen;
6157 	bus_dma_segment_t *seg;
6158 	uint32_t rate_n_flags;
6159 	uint16_t num_tbs, flags, offload_assist = 0;
6160 	uint8_t type, subtype;
6161 	int i, totlen, err, pad, qid;
6162 	size_t txcmd_size;
6163 
6164 	wh = mtod(m, struct ieee80211_frame *);
6165 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
6166 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
6167 	if (type == IEEE80211_FC0_TYPE_CTL)
6168 		hdrlen = sizeof(struct ieee80211_frame_min);
6169 	else
6170 		hdrlen = ieee80211_get_hdrlen(wh);
6171 
6172 	qid = sc->first_data_qid;
6173 
6174 	/* Put QoS frames on the data queue which maps to their TID. */
6175 	if (ieee80211_has_qos(wh)) {
6176 		struct ieee80211_tx_ba *ba;
6177 		uint16_t qos = ieee80211_get_qos(wh);
6178 		uint8_t tid = qos & IEEE80211_QOS_TID;
6179 
6180 		ba = &ni->ni_tx_ba[tid];
6181 		if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6182 		    type == IEEE80211_FC0_TYPE_DATA &&
6183 		    subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
6184 		    sc->aggqid[tid] != 0 &&
6185 		    ba->ba_state == IEEE80211_BA_AGREED) {
6186 			qid = sc->aggqid[tid];
6187 		}
6188 	}
6189 
6190 	ring = &sc->txq[qid];
6191 	desc = &ring->desc[ring->cur];
6192 	memset(desc, 0, sizeof(*desc));
6193 	data = &ring->data[ring->cur];
6194 
6195 	cmd = &ring->cmd[ring->cur];
6196 	cmd->hdr.code = IWX_TX_CMD;
6197 	cmd->hdr.flags = 0;
6198 	cmd->hdr.qid = ring->qid;
6199 	cmd->hdr.idx = ring->cur;
6200 
6201 	rinfo = iwx_tx_fill_cmd(sc, in, wh, &flags, &rate_n_flags);
6202 
6203 #if NBPFILTER > 0
6204 	if (sc->sc_drvbpf != NULL) {
6205 		struct iwx_tx_radiotap_header *tap = &sc->sc_txtap;
6206 		uint16_t chan_flags;
6207 
6208 		tap->wt_flags = 0;
6209 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
6210 		chan_flags = ni->ni_chan->ic_flags;
6211 		if (ic->ic_curmode != IEEE80211_MODE_11N &&
6212 		    ic->ic_curmode != IEEE80211_MODE_11AC) {
6213 			chan_flags &= ~IEEE80211_CHAN_HT;
6214 			chan_flags &= ~IEEE80211_CHAN_40MHZ;
6215 		}
6216 		if (ic->ic_curmode != IEEE80211_MODE_11AC)
6217 			chan_flags &= ~IEEE80211_CHAN_VHT;
6218 		tap->wt_chan_flags = htole16(chan_flags);
6219 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6220 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6221 		    type == IEEE80211_FC0_TYPE_DATA &&
6222 		    rinfo->ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP) {
6223 			tap->wt_rate = (0x80 | rinfo->ht_plcp);
6224 		} else
6225 			tap->wt_rate = rinfo->rate;
6226 		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
6227 		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
6228 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
6229 
6230 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
6231 		    m, BPF_DIRECTION_OUT);
6232 	}
6233 #endif
6234 
6235 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
6236                 k = ieee80211_get_txkey(ic, wh, ni);
6237 		if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
6238 			if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
6239 				return ENOBUFS;
6240 			/* 802.11 header may have moved. */
6241 			wh = mtod(m, struct ieee80211_frame *);
6242 			flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
6243 		} else {
6244 			k->k_tsc++;
6245 			/* Hardware increments PN internally and adds IV. */
6246 		}
6247 	} else
6248 		flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
6249 
6250 	totlen = m->m_pkthdr.len;
6251 
6252 	if (hdrlen & 3) {
6253 		/* First segment length must be a multiple of 4. */
6254 		pad = 4 - (hdrlen & 3);
6255 		offload_assist |= IWX_TX_CMD_OFFLD_PAD;
6256 	} else
6257 		pad = 0;
6258 
6259 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
6260 		struct iwx_tx_cmd_gen3 *tx = (void *)cmd->data;
6261 		memset(tx, 0, sizeof(*tx));
6262 		tx->len = htole16(totlen);
6263 		tx->offload_assist = htole32(offload_assist);
6264 		tx->flags = htole16(flags);
6265 		tx->rate_n_flags = htole32(rate_n_flags);
6266 		memcpy(tx->hdr, wh, hdrlen);
6267 		txcmd_size = sizeof(*tx);
6268 	} else {
6269 		struct iwx_tx_cmd_gen2 *tx = (void *)cmd->data;
6270 		memset(tx, 0, sizeof(*tx));
6271 		tx->len = htole16(totlen);
6272 		tx->offload_assist = htole16(offload_assist);
6273 		tx->flags = htole32(flags);
6274 		tx->rate_n_flags = htole32(rate_n_flags);
6275 		memcpy(tx->hdr, wh, hdrlen);
6276 		txcmd_size = sizeof(*tx);
6277 	}
6278 
6279 	/* Trim 802.11 header. */
6280 	m_adj(m, hdrlen);
6281 
6282 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
6283 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6284 	if (err && err != EFBIG) {
6285 		printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
6286 		m_freem(m);
6287 		return err;
6288 	}
6289 	if (err) {
6290 		/* Too many DMA segments, linearize mbuf. */
6291 		if (m_defrag(m, M_DONTWAIT)) {
6292 			m_freem(m);
6293 			return ENOBUFS;
6294 		}
6295 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
6296 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6297 		if (err) {
6298 			printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
6299 			    err);
6300 			m_freem(m);
6301 			return err;
6302 		}
6303 	}
6304 	data->m = m;
6305 	data->in = in;
6306 
6307 	/* Fill TX descriptor. */
6308 	num_tbs = 2 + data->map->dm_nsegs;
6309 	desc->num_tbs = htole16(num_tbs);
6310 
6311 	desc->tbs[0].tb_len = htole16(IWX_FIRST_TB_SIZE);
6312 	paddr = htole64(data->cmd_paddr);
6313 	memcpy(&desc->tbs[0].addr, &paddr, sizeof(paddr));
6314 	if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[0].tb_len)) >> 32)
6315 		DPRINTF(("%s: TB0 crosses 32bit boundary\n", __func__));
6316 	desc->tbs[1].tb_len = htole16(sizeof(struct iwx_cmd_header) +
6317 	    txcmd_size + hdrlen + pad - IWX_FIRST_TB_SIZE);
6318 	paddr = htole64(data->cmd_paddr + IWX_FIRST_TB_SIZE);
6319 	memcpy(&desc->tbs[1].addr, &paddr, sizeof(paddr));
6320 
6321 	if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[1].tb_len)) >> 32)
6322 		DPRINTF(("%s: TB1 crosses 32bit boundary\n", __func__));
6323 
6324 	/* Other DMA segments are for data payload. */
6325 	seg = data->map->dm_segs;
6326 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
6327 		desc->tbs[i + 2].tb_len = htole16(seg->ds_len);
6328 		paddr = htole64(seg->ds_addr);
6329 		memcpy(&desc->tbs[i + 2].addr, &paddr, sizeof(paddr));
6330 		if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[i + 2].tb_len)) >> 32)
6331 			DPRINTF(("%s: TB%d crosses 32bit boundary\n", __func__, i + 2));
6332 	}
6333 
6334 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
6335 	    BUS_DMASYNC_PREWRITE);
6336 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
6337 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
6338 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
6339 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
6340 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
6341 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
6342 
6343 	iwx_tx_update_byte_tbl(sc, ring, ring->cur, totlen, num_tbs);
6344 
6345 	/* Kick TX ring. */
6346 	ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
6347 	ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
6348 	IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
6349 
6350 	/* Mark TX ring as full if we reach a certain threshold. */
6351 	if (++ring->queued > IWX_TX_RING_HIMARK) {
6352 		sc->qfullmsk |= 1 << ring->qid;
6353 	}
6354 
6355 	if (ic->ic_if.if_flags & IFF_UP)
6356 		sc->sc_tx_timer[ring->qid] = 15;
6357 
6358 	return 0;
6359 }
6360 
6361 int
6362 iwx_flush_sta_tids(struct iwx_softc *sc, int sta_id, uint16_t tids)
6363 {
6364 	struct iwx_rx_packet *pkt;
6365 	struct iwx_tx_path_flush_cmd_rsp *resp;
6366 	struct iwx_tx_path_flush_cmd flush_cmd = {
6367 		.sta_id = htole32(sta_id),
6368 		.tid_mask = htole16(tids),
6369 	};
6370 	struct iwx_host_cmd hcmd = {
6371 		.id = IWX_TXPATH_FLUSH,
6372 		.len = { sizeof(flush_cmd), },
6373 		.data = { &flush_cmd, },
6374 		.flags = IWX_CMD_WANT_RESP,
6375 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
6376 	};
6377 	int err, resp_len, i, num_flushed_queues;
6378 
6379 	err = iwx_send_cmd(sc, &hcmd);
6380 	if (err)
6381 		return err;
6382 
6383 	pkt = hcmd.resp_pkt;
6384 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
6385 		err = EIO;
6386 		goto out;
6387 	}
6388 
6389 	resp_len = iwx_rx_packet_payload_len(pkt);
6390 	/* Some firmware versions don't provide a response. */
6391 	if (resp_len == 0)
6392 		goto out;
6393 	else if (resp_len != sizeof(*resp)) {
6394 		err = EIO;
6395 		goto out;
6396 	}
6397 
6398 	resp = (void *)pkt->data;
6399 
6400 	if (le16toh(resp->sta_id) != sta_id) {
6401 		err = EIO;
6402 		goto out;
6403 	}
6404 
6405 	num_flushed_queues = le16toh(resp->num_flushed_queues);
6406 	if (num_flushed_queues > IWX_TX_FLUSH_QUEUE_RSP) {
6407 		err = EIO;
6408 		goto out;
6409 	}
6410 
6411 	for (i = 0; i < num_flushed_queues; i++) {
6412 		struct iwx_flush_queue_info *queue_info = &resp->queues[i];
6413 		uint16_t tid = le16toh(queue_info->tid);
6414 		uint16_t read_after = le16toh(queue_info->read_after_flush);
6415 		uint16_t qid = le16toh(queue_info->queue_num);
6416 		struct iwx_tx_ring *txq;
6417 
6418 		if (qid >= nitems(sc->txq))
6419 			continue;
6420 
6421 		txq = &sc->txq[qid];
6422 		if (tid != txq->tid)
6423 			continue;
6424 
6425 		iwx_txq_advance(sc, txq, read_after);
6426 	}
6427 out:
6428 	iwx_free_resp(sc, &hcmd);
6429 	return err;
6430 }
6431 
6432 #define IWX_FLUSH_WAIT_MS	2000
6433 
6434 int
6435 iwx_wait_tx_queues_empty(struct iwx_softc *sc)
6436 {
6437 	int i, err;
6438 
6439 	for (i = 0; i < nitems(sc->txq); i++) {
6440 		struct iwx_tx_ring *ring = &sc->txq[i];
6441 
6442 		if (i == IWX_DQA_CMD_QUEUE)
6443 			continue;
6444 
6445 		while (ring->queued > 0) {
6446 			err = tsleep_nsec(ring, 0, "iwxflush",
6447 			    MSEC_TO_NSEC(IWX_FLUSH_WAIT_MS));
6448 			if (err)
6449 				return err;
6450 		}
6451 	}
6452 
6453 	return 0;
6454 }
6455 
6456 int
6457 iwx_drain_sta(struct iwx_softc *sc, struct iwx_node* in, int drain)
6458 {
6459 	struct iwx_add_sta_cmd cmd;
6460 	int err;
6461 	uint32_t status;
6462 
6463 	memset(&cmd, 0, sizeof(cmd));
6464 	cmd.mac_id_n_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
6465 	    in->in_color));
6466 	cmd.sta_id = IWX_STATION_ID;
6467 	cmd.add_modify = IWX_STA_MODE_MODIFY;
6468 	cmd.station_flags = drain ? htole32(IWX_STA_FLG_DRAIN_FLOW) : 0;
6469 	cmd.station_flags_msk = htole32(IWX_STA_FLG_DRAIN_FLOW);
6470 
6471 	status = IWX_ADD_STA_SUCCESS;
6472 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA,
6473 	    sizeof(cmd), &cmd, &status);
6474 	if (err) {
6475 		printf("%s: could not update sta (error %d)\n",
6476 		    DEVNAME(sc), err);
6477 		return err;
6478 	}
6479 
6480 	switch (status & IWX_ADD_STA_STATUS_MASK) {
6481 	case IWX_ADD_STA_SUCCESS:
6482 		break;
6483 	default:
6484 		err = EIO;
6485 		printf("%s: Couldn't %s draining for station\n",
6486 		    DEVNAME(sc), drain ? "enable" : "disable");
6487 		break;
6488 	}
6489 
6490 	return err;
6491 }
6492 
6493 int
6494 iwx_flush_sta(struct iwx_softc *sc, struct iwx_node *in)
6495 {
6496 	int err;
6497 
6498 	splassert(IPL_NET);
6499 
6500 	sc->sc_flags |= IWX_FLAG_TXFLUSH;
6501 
6502 	err = iwx_drain_sta(sc, in, 1);
6503 	if (err)
6504 		goto done;
6505 
6506 	err = iwx_flush_sta_tids(sc, IWX_STATION_ID, 0xffff);
6507 	if (err) {
6508 		printf("%s: could not flush Tx path (error %d)\n",
6509 		    DEVNAME(sc), err);
6510 		goto done;
6511 	}
6512 
6513 	err = iwx_wait_tx_queues_empty(sc);
6514 	if (err) {
6515 		printf("%s: Could not empty Tx queues (error %d)\n",
6516 		    DEVNAME(sc), err);
6517 		goto done;
6518 	}
6519 
6520 	err = iwx_drain_sta(sc, in, 0);
6521 done:
6522 	sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
6523 	return err;
6524 }
6525 
6526 #define IWX_POWER_KEEP_ALIVE_PERIOD_SEC    25
6527 
6528 int
6529 iwx_beacon_filter_send_cmd(struct iwx_softc *sc,
6530     struct iwx_beacon_filter_cmd *cmd)
6531 {
6532 	return iwx_send_cmd_pdu(sc, IWX_REPLY_BEACON_FILTERING_CMD,
6533 	    0, sizeof(struct iwx_beacon_filter_cmd), cmd);
6534 }
6535 
6536 int
6537 iwx_update_beacon_abort(struct iwx_softc *sc, struct iwx_node *in, int enable)
6538 {
6539 	struct iwx_beacon_filter_cmd cmd = {
6540 		IWX_BF_CMD_CONFIG_DEFAULTS,
6541 		.bf_enable_beacon_filter = htole32(1),
6542 		.ba_enable_beacon_abort = htole32(enable),
6543 	};
6544 
6545 	if (!sc->sc_bf.bf_enabled)
6546 		return 0;
6547 
6548 	sc->sc_bf.ba_enabled = enable;
6549 	return iwx_beacon_filter_send_cmd(sc, &cmd);
6550 }
6551 
6552 void
6553 iwx_power_build_cmd(struct iwx_softc *sc, struct iwx_node *in,
6554     struct iwx_mac_power_cmd *cmd)
6555 {
6556 	struct ieee80211com *ic = &sc->sc_ic;
6557 	struct ieee80211_node *ni = &in->in_ni;
6558 	int dtim_period, dtim_msec, keep_alive;
6559 
6560 	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
6561 	    in->in_color));
6562 	if (ni->ni_dtimperiod)
6563 		dtim_period = ni->ni_dtimperiod;
6564 	else
6565 		dtim_period = 1;
6566 
6567 	/*
6568 	 * Regardless of power management state the driver must set
6569 	 * keep alive period. FW will use it for sending keep alive NDPs
6570 	 * immediately after association. Check that keep alive period
6571 	 * is at least 3 * DTIM.
6572 	 */
6573 	dtim_msec = dtim_period * ni->ni_intval;
6574 	keep_alive = MAX(3 * dtim_msec, 1000 * IWX_POWER_KEEP_ALIVE_PERIOD_SEC);
6575 	keep_alive = roundup(keep_alive, 1000) / 1000;
6576 	cmd->keep_alive_seconds = htole16(keep_alive);
6577 
6578 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
6579 		cmd->flags = htole16(IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6580 }
6581 
6582 int
6583 iwx_power_mac_update_mode(struct iwx_softc *sc, struct iwx_node *in)
6584 {
6585 	int err;
6586 	int ba_enable;
6587 	struct iwx_mac_power_cmd cmd;
6588 
6589 	memset(&cmd, 0, sizeof(cmd));
6590 
6591 	iwx_power_build_cmd(sc, in, &cmd);
6592 
6593 	err = iwx_send_cmd_pdu(sc, IWX_MAC_PM_POWER_TABLE, 0,
6594 	    sizeof(cmd), &cmd);
6595 	if (err != 0)
6596 		return err;
6597 
6598 	ba_enable = !!(cmd.flags &
6599 	    htole16(IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
6600 	return iwx_update_beacon_abort(sc, in, ba_enable);
6601 }
6602 
6603 int
6604 iwx_power_update_device(struct iwx_softc *sc)
6605 {
6606 	struct iwx_device_power_cmd cmd = { };
6607 	struct ieee80211com *ic = &sc->sc_ic;
6608 
6609 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
6610 		cmd.flags = htole16(IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6611 
6612 	return iwx_send_cmd_pdu(sc,
6613 	    IWX_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
6614 }
6615 
6616 int
6617 iwx_enable_beacon_filter(struct iwx_softc *sc, struct iwx_node *in)
6618 {
6619 	struct iwx_beacon_filter_cmd cmd = {
6620 		IWX_BF_CMD_CONFIG_DEFAULTS,
6621 		.bf_enable_beacon_filter = htole32(1),
6622 		.ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled),
6623 	};
6624 	int err;
6625 
6626 	err = iwx_beacon_filter_send_cmd(sc, &cmd);
6627 	if (err == 0)
6628 		sc->sc_bf.bf_enabled = 1;
6629 
6630 	return err;
6631 }
6632 
6633 int
6634 iwx_disable_beacon_filter(struct iwx_softc *sc)
6635 {
6636 	struct iwx_beacon_filter_cmd cmd;
6637 	int err;
6638 
6639 	memset(&cmd, 0, sizeof(cmd));
6640 
6641 	err = iwx_beacon_filter_send_cmd(sc, &cmd);
6642 	if (err == 0)
6643 		sc->sc_bf.bf_enabled = 0;
6644 
6645 	return err;
6646 }
6647 
6648 int
6649 iwx_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update)
6650 {
6651 	struct iwx_add_sta_cmd add_sta_cmd;
6652 	int err;
6653 	uint32_t status, aggsize;
6654 	const uint32_t max_aggsize = (IWX_STA_FLG_MAX_AGG_SIZE_64K >>
6655 		    IWX_STA_FLG_MAX_AGG_SIZE_SHIFT);
6656 	struct ieee80211com *ic = &sc->sc_ic;
6657 
6658 	if (!update && (sc->sc_flags & IWX_FLAG_STA_ACTIVE))
6659 		panic("STA already added");
6660 
6661 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
6662 
6663 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6664 		add_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
6665 		add_sta_cmd.station_type = IWX_STA_GENERAL_PURPOSE;
6666 	} else {
6667 		add_sta_cmd.sta_id = IWX_STATION_ID;
6668 		add_sta_cmd.station_type = IWX_STA_LINK;
6669 	}
6670 	add_sta_cmd.mac_id_n_color
6671 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
6672 	if (!update) {
6673 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
6674 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
6675 			    etheranyaddr);
6676 		else
6677 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
6678 			    in->in_macaddr);
6679 	}
6680 	add_sta_cmd.add_modify = update ? 1 : 0;
6681 	add_sta_cmd.station_flags_msk
6682 	    |= htole32(IWX_STA_FLG_FAT_EN_MSK | IWX_STA_FLG_MIMO_EN_MSK);
6683 
6684 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
6685 		add_sta_cmd.station_flags_msk
6686 		    |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_MSK |
6687 		    IWX_STA_FLG_AGG_MPDU_DENS_MSK);
6688 
6689 		if (iwx_mimo_enabled(sc)) {
6690 			if (in->in_ni.ni_flags & IEEE80211_NODE_VHT) {
6691 				uint16_t rx_mcs = (in->in_ni.ni_vht_rxmcs &
6692 				    IEEE80211_VHT_MCS_FOR_SS_MASK(2)) >>
6693 				    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2);
6694 				if (rx_mcs != IEEE80211_VHT_MCS_SS_NOT_SUPP) {
6695 					add_sta_cmd.station_flags |=
6696 					    htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
6697 				}
6698 			} else {
6699 				if (in->in_ni.ni_rxmcs[1] != 0) {
6700 					add_sta_cmd.station_flags |=
6701 					    htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
6702 				}
6703 				if (in->in_ni.ni_rxmcs[2] != 0) {
6704 					add_sta_cmd.station_flags |=
6705 					    htole32(IWX_STA_FLG_MIMO_EN_MIMO3);
6706 				}
6707 			}
6708 		}
6709 
6710 		if (IEEE80211_CHAN_40MHZ_ALLOWED(in->in_ni.ni_chan) &&
6711 		    ieee80211_node_supports_ht_chan40(&in->in_ni)) {
6712 			add_sta_cmd.station_flags |= htole32(
6713 			    IWX_STA_FLG_FAT_EN_40MHZ);
6714 		}
6715 
6716 		if (in->in_ni.ni_flags & IEEE80211_NODE_VHT) {
6717 			if (IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
6718 			    ieee80211_node_supports_vht_chan80(&in->in_ni)) {
6719 				add_sta_cmd.station_flags |= htole32(
6720 				    IWX_STA_FLG_FAT_EN_80MHZ);
6721 			}
6722 			aggsize = (in->in_ni.ni_vhtcaps &
6723 			    IEEE80211_VHTCAP_MAX_AMPDU_LEN_MASK) >>
6724 			    IEEE80211_VHTCAP_MAX_AMPDU_LEN_SHIFT;
6725 		} else {
6726 			aggsize = (in->in_ni.ni_ampdu_param &
6727 			    IEEE80211_AMPDU_PARAM_LE);
6728 		}
6729 		if (aggsize > max_aggsize)
6730 			aggsize = max_aggsize;
6731 		add_sta_cmd.station_flags |= htole32((aggsize <<
6732 		    IWX_STA_FLG_MAX_AGG_SIZE_SHIFT) &
6733 		    IWX_STA_FLG_MAX_AGG_SIZE_MSK);
6734 
6735 		switch (in->in_ni.ni_ampdu_param & IEEE80211_AMPDU_PARAM_SS) {
6736 		case IEEE80211_AMPDU_PARAM_SS_2:
6737 			add_sta_cmd.station_flags
6738 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_2US);
6739 			break;
6740 		case IEEE80211_AMPDU_PARAM_SS_4:
6741 			add_sta_cmd.station_flags
6742 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_4US);
6743 			break;
6744 		case IEEE80211_AMPDU_PARAM_SS_8:
6745 			add_sta_cmd.station_flags
6746 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_8US);
6747 			break;
6748 		case IEEE80211_AMPDU_PARAM_SS_16:
6749 			add_sta_cmd.station_flags
6750 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_16US);
6751 			break;
6752 		default:
6753 			break;
6754 		}
6755 	}
6756 
6757 	status = IWX_ADD_STA_SUCCESS;
6758 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(add_sta_cmd),
6759 	    &add_sta_cmd, &status);
6760 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
6761 		err = EIO;
6762 
6763 	return err;
6764 }
6765 
6766 int
6767 iwx_rm_sta_cmd(struct iwx_softc *sc, struct iwx_node *in)
6768 {
6769 	struct ieee80211com *ic = &sc->sc_ic;
6770 	struct iwx_rm_sta_cmd rm_sta_cmd;
6771 	int err;
6772 
6773 	if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
6774 		panic("sta already removed");
6775 
6776 	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
6777 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6778 		rm_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
6779 	else
6780 		rm_sta_cmd.sta_id = IWX_STATION_ID;
6781 
6782 	err = iwx_send_cmd_pdu(sc, IWX_REMOVE_STA, 0, sizeof(rm_sta_cmd),
6783 	    &rm_sta_cmd);
6784 
6785 	return err;
6786 }
6787 
6788 int
6789 iwx_rm_sta(struct iwx_softc *sc, struct iwx_node *in)
6790 {
6791 	struct ieee80211com *ic = &sc->sc_ic;
6792 	struct ieee80211_node *ni = &in->in_ni;
6793 	int err, i, cmd_ver;
6794 
6795 	err = iwx_flush_sta(sc, in);
6796 	if (err) {
6797 		printf("%s: could not flush Tx path (error %d)\n",
6798 		    DEVNAME(sc), err);
6799 		return err;
6800 	}
6801 
6802 	/*
6803 	 * New SCD_QUEUE_CONFIG API requires explicit queue removal
6804 	 * before a station gets removed.
6805 	 */
6806 	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
6807 	    IWX_SCD_QUEUE_CONFIG_CMD);
6808 	if (cmd_ver != 0 && cmd_ver != IWX_FW_CMD_VER_UNKNOWN) {
6809 		err = iwx_disable_mgmt_queue(sc);
6810 		if (err)
6811 			return err;
6812 		for (i = IWX_FIRST_AGG_TX_QUEUE;
6813 		    i < IWX_LAST_AGG_TX_QUEUE; i++) {
6814 			struct iwx_tx_ring *ring = &sc->txq[i];
6815 			if ((sc->qenablemsk & (1 << i)) == 0)
6816 				continue;
6817 			err = iwx_disable_txq(sc, IWX_STATION_ID,
6818 			    ring->qid, ring->tid);
6819 			if (err) {
6820 				printf("%s: could not disable Tx queue %d "
6821 				    "(error %d)\n", DEVNAME(sc), ring->qid,
6822 				    err);
6823 				return err;
6824 			}
6825 		}
6826 	}
6827 
6828 	err = iwx_rm_sta_cmd(sc, in);
6829 	if (err) {
6830 		printf("%s: could not remove STA (error %d)\n",
6831 		    DEVNAME(sc), err);
6832 		return err;
6833 	}
6834 
6835 	in->in_flags = 0;
6836 
6837 	sc->sc_rx_ba_sessions = 0;
6838 	sc->ba_rx.start_tidmask = 0;
6839 	sc->ba_rx.stop_tidmask = 0;
6840 	memset(sc->aggqid, 0, sizeof(sc->aggqid));
6841 	sc->ba_tx.start_tidmask = 0;
6842 	sc->ba_tx.stop_tidmask = 0;
6843 	for (i = IWX_FIRST_AGG_TX_QUEUE; i < IWX_LAST_AGG_TX_QUEUE; i++)
6844 		sc->qenablemsk &= ~(1 << i);
6845 	for (i = 0; i < IEEE80211_NUM_TID; i++) {
6846 		struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
6847 		if (ba->ba_state != IEEE80211_BA_AGREED)
6848 			continue;
6849 		ieee80211_delba_request(ic, ni, 0, 1, i);
6850 	}
6851 
6852 	return 0;
6853 }
6854 
6855 uint8_t
6856 iwx_umac_scan_fill_channels(struct iwx_softc *sc,
6857     struct iwx_scan_channel_cfg_umac *chan, size_t chan_nitems,
6858     int n_ssids, int bgscan)
6859 {
6860 	struct ieee80211com *ic = &sc->sc_ic;
6861 	struct ieee80211_channel *c;
6862 	uint8_t nchan;
6863 
6864 	for (nchan = 0, c = &ic->ic_channels[1];
6865 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
6866 	    nchan < chan_nitems &&
6867 	    nchan < sc->sc_capa_n_scan_channels;
6868 	    c++) {
6869 		uint8_t channel_num;
6870 
6871 		if (c->ic_flags == 0)
6872 			continue;
6873 
6874 		channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
6875 		if (isset(sc->sc_ucode_api,
6876 		    IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
6877 			chan->v2.channel_num = channel_num;
6878 			if (IEEE80211_IS_CHAN_2GHZ(c))
6879 				chan->v2.band = IWX_PHY_BAND_24;
6880 			else
6881 				chan->v2.band = IWX_PHY_BAND_5;
6882 			chan->v2.iter_count = 1;
6883 			chan->v2.iter_interval = 0;
6884 		} else {
6885 			chan->v1.channel_num = channel_num;
6886 			chan->v1.iter_count = 1;
6887 			chan->v1.iter_interval = htole16(0);
6888 		}
6889 		if (n_ssids != 0 && !bgscan)
6890 			chan->flags = htole32(1 << 0); /* select SSID 0 */
6891 		chan++;
6892 		nchan++;
6893 	}
6894 
6895 	return nchan;
6896 }
6897 
6898 int
6899 iwx_fill_probe_req(struct iwx_softc *sc, struct iwx_scan_probe_req *preq)
6900 {
6901 	struct ieee80211com *ic = &sc->sc_ic;
6902 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
6903 	struct ieee80211_rateset *rs;
6904 	size_t remain = sizeof(preq->buf);
6905 	uint8_t *frm, *pos;
6906 
6907 	memset(preq, 0, sizeof(*preq));
6908 
6909 	if (remain < sizeof(*wh) + 2)
6910 		return ENOBUFS;
6911 
6912 	/*
6913 	 * Build a probe request frame.  Most of the following code is a
6914 	 * copy & paste of what is done in net80211.
6915 	 */
6916 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
6917 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
6918 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
6919 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
6920 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
6921 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
6922 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
6923 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
6924 
6925 	frm = (uint8_t *)(wh + 1);
6926 	*frm++ = IEEE80211_ELEMID_SSID;
6927 	*frm++ = 0;
6928 	/* hardware inserts SSID */
6929 
6930 	/* Tell the firmware where the MAC header is. */
6931 	preq->mac_header.offset = 0;
6932 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
6933 	remain -= frm - (uint8_t *)wh;
6934 
6935 	/* Fill in 2GHz IEs and tell firmware where they are. */
6936 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
6937 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6938 		if (remain < 4 + rs->rs_nrates)
6939 			return ENOBUFS;
6940 	} else if (remain < 2 + rs->rs_nrates)
6941 		return ENOBUFS;
6942 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
6943 	pos = frm;
6944 	frm = ieee80211_add_rates(frm, rs);
6945 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6946 		frm = ieee80211_add_xrates(frm, rs);
6947 	remain -= frm - pos;
6948 
6949 	if (isset(sc->sc_enabled_capa,
6950 	    IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
6951 		if (remain < 3)
6952 			return ENOBUFS;
6953 		*frm++ = IEEE80211_ELEMID_DSPARMS;
6954 		*frm++ = 1;
6955 		*frm++ = 0;
6956 		remain -= 3;
6957 	}
6958 	preq->band_data[0].len = htole16(frm - pos);
6959 
6960 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
6961 		/* Fill in 5GHz IEs. */
6962 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
6963 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6964 			if (remain < 4 + rs->rs_nrates)
6965 				return ENOBUFS;
6966 		} else if (remain < 2 + rs->rs_nrates)
6967 			return ENOBUFS;
6968 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
6969 		pos = frm;
6970 		frm = ieee80211_add_rates(frm, rs);
6971 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6972 			frm = ieee80211_add_xrates(frm, rs);
6973 		preq->band_data[1].len = htole16(frm - pos);
6974 		remain -= frm - pos;
6975 		if (ic->ic_flags & IEEE80211_F_VHTON) {
6976 			if (remain < 14)
6977 				return ENOBUFS;
6978 			frm = ieee80211_add_vhtcaps(frm, ic);
6979 			remain -= frm - pos;
6980 		}
6981 	}
6982 
6983 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
6984 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
6985 	pos = frm;
6986 	if (ic->ic_flags & IEEE80211_F_HTON) {
6987 		if (remain < 28)
6988 			return ENOBUFS;
6989 		frm = ieee80211_add_htcaps(frm, ic);
6990 		/* XXX add WME info? */
6991 		remain -= frm - pos;
6992 	}
6993 
6994 	preq->common_data.len = htole16(frm - pos);
6995 
6996 	return 0;
6997 }
6998 
6999 int
7000 iwx_config_umac_scan_reduced(struct iwx_softc *sc)
7001 {
7002 	struct iwx_scan_config scan_cfg;
7003 	struct iwx_host_cmd hcmd = {
7004 		.id = iwx_cmd_id(IWX_SCAN_CFG_CMD, IWX_LONG_GROUP, 0),
7005 		.len[0] = sizeof(scan_cfg),
7006 		.data[0] = &scan_cfg,
7007 		.flags = 0,
7008 	};
7009 	int cmdver;
7010 
7011 	if (!isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG)) {
7012 		printf("%s: firmware does not support reduced scan config\n",
7013 		    DEVNAME(sc));
7014 		return ENOTSUP;
7015 	}
7016 
7017 	memset(&scan_cfg, 0, sizeof(scan_cfg));
7018 
7019 	/*
7020 	 * SCAN_CFG version >= 5 implies that the broadcast
7021 	 * STA ID field is deprecated.
7022 	 */
7023 	cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_SCAN_CFG_CMD);
7024 	if (cmdver == IWX_FW_CMD_VER_UNKNOWN || cmdver < 5)
7025 		scan_cfg.bcast_sta_id = 0xff;
7026 
7027 	scan_cfg.tx_chains = htole32(iwx_fw_valid_tx_ant(sc));
7028 	scan_cfg.rx_chains = htole32(iwx_fw_valid_rx_ant(sc));
7029 
7030 	return iwx_send_cmd(sc, &hcmd);
7031 }
7032 
7033 uint16_t
7034 iwx_scan_umac_flags_v2(struct iwx_softc *sc, int bgscan)
7035 {
7036 	struct ieee80211com *ic = &sc->sc_ic;
7037 	uint16_t flags = 0;
7038 
7039 	if (ic->ic_des_esslen == 0)
7040 		flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
7041 
7042 	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
7043 	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
7044 	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
7045 
7046 	return flags;
7047 }
7048 
7049 #define IWX_SCAN_DWELL_ACTIVE		10
7050 #define IWX_SCAN_DWELL_PASSIVE		110
7051 
7052 /* adaptive dwell max budget time [TU] for full scan */
7053 #define IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
7054 /* adaptive dwell max budget time [TU] for directed scan */
7055 #define IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
7056 /* adaptive dwell default high band APs number */
7057 #define IWX_SCAN_ADWELL_DEFAULT_HB_N_APS 8
7058 /* adaptive dwell default low band APs number */
7059 #define IWX_SCAN_ADWELL_DEFAULT_LB_N_APS 2
7060 /* adaptive dwell default APs number in social channels (1, 6, 11) */
7061 #define IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
7062 /* adaptive dwell number of APs override for p2p friendly GO channels */
7063 #define IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY 10
7064 /* adaptive dwell number of APs override for social channels */
7065 #define IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
7066 
7067 void
7068 iwx_scan_umac_dwell_v10(struct iwx_softc *sc,
7069     struct iwx_scan_general_params_v10 *general_params, int bgscan)
7070 {
7071 	uint32_t suspend_time, max_out_time;
7072 	uint8_t active_dwell, passive_dwell;
7073 
7074 	active_dwell = IWX_SCAN_DWELL_ACTIVE;
7075 	passive_dwell = IWX_SCAN_DWELL_PASSIVE;
7076 
7077 	general_params->adwell_default_social_chn =
7078 		IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
7079 	general_params->adwell_default_2g = IWX_SCAN_ADWELL_DEFAULT_LB_N_APS;
7080 	general_params->adwell_default_5g = IWX_SCAN_ADWELL_DEFAULT_HB_N_APS;
7081 
7082 	if (bgscan)
7083 		general_params->adwell_max_budget =
7084 			htole16(IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
7085 	else
7086 		general_params->adwell_max_budget =
7087 			htole16(IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
7088 
7089 	general_params->scan_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
7090 	if (bgscan) {
7091 		max_out_time = htole32(120);
7092 		suspend_time = htole32(120);
7093 	} else {
7094 		max_out_time = htole32(0);
7095 		suspend_time = htole32(0);
7096 	}
7097 	general_params->max_out_of_time[IWX_SCAN_LB_LMAC_IDX] =
7098 		htole32(max_out_time);
7099 	general_params->suspend_time[IWX_SCAN_LB_LMAC_IDX] =
7100 		htole32(suspend_time);
7101 	general_params->max_out_of_time[IWX_SCAN_HB_LMAC_IDX] =
7102 		htole32(max_out_time);
7103 	general_params->suspend_time[IWX_SCAN_HB_LMAC_IDX] =
7104 		htole32(suspend_time);
7105 
7106 	general_params->active_dwell[IWX_SCAN_LB_LMAC_IDX] = active_dwell;
7107 	general_params->passive_dwell[IWX_SCAN_LB_LMAC_IDX] = passive_dwell;
7108 	general_params->active_dwell[IWX_SCAN_HB_LMAC_IDX] = active_dwell;
7109 	general_params->passive_dwell[IWX_SCAN_HB_LMAC_IDX] = passive_dwell;
7110 }
7111 
7112 void
7113 iwx_scan_umac_fill_general_p_v10(struct iwx_softc *sc,
7114     struct iwx_scan_general_params_v10 *gp, uint16_t gen_flags, int bgscan)
7115 {
7116 	iwx_scan_umac_dwell_v10(sc, gp, bgscan);
7117 
7118 	gp->flags = htole16(gen_flags);
7119 
7120 	if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
7121 		gp->num_of_fragments[IWX_SCAN_LB_LMAC_IDX] = 3;
7122 	if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
7123 		gp->num_of_fragments[IWX_SCAN_HB_LMAC_IDX] = 3;
7124 
7125 	gp->scan_start_mac_id = 0;
7126 }
7127 
7128 void
7129 iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *sc,
7130     struct iwx_scan_channel_params_v6 *cp, uint32_t channel_cfg_flags,
7131     int n_ssid, int bgscan)
7132 {
7133 	cp->flags = IWX_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
7134 
7135 	cp->count = iwx_umac_scan_fill_channels(sc, cp->channel_config,
7136 	    nitems(cp->channel_config), n_ssid, bgscan);
7137 
7138 	cp->n_aps_override[0] = IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY;
7139 	cp->n_aps_override[1] = IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS;
7140 }
7141 
7142 int
7143 iwx_umac_scan_v14(struct iwx_softc *sc, int bgscan)
7144 {
7145 	struct ieee80211com *ic = &sc->sc_ic;
7146 	struct iwx_host_cmd hcmd = {
7147 		.id = iwx_cmd_id(IWX_SCAN_REQ_UMAC, IWX_LONG_GROUP, 0),
7148 		.len = { 0, },
7149 		.data = { NULL, },
7150 		.flags = 0,
7151 	};
7152 	struct iwx_scan_req_umac_v14 *cmd;
7153 	struct iwx_scan_req_params_v14 *scan_p;
7154 	int err, async = bgscan, n_ssid = 0;
7155 	uint16_t gen_flags;
7156 	uint32_t bitmap_ssid = 0;
7157 
7158 	cmd = malloc(sizeof(*cmd), M_DEVBUF,
7159 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
7160 	if (cmd == NULL)
7161 		return ENOMEM;
7162 
7163 	scan_p = &cmd->scan_params;
7164 
7165 	cmd->ooc_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
7166 	cmd->uid = htole32(0);
7167 
7168 	gen_flags = iwx_scan_umac_flags_v2(sc, bgscan);
7169 	iwx_scan_umac_fill_general_p_v10(sc, &scan_p->general_params,
7170 	    gen_flags, bgscan);
7171 
7172 	scan_p->periodic_params.schedule[0].interval = htole16(0);
7173 	scan_p->periodic_params.schedule[0].iter_count = 1;
7174 
7175 	err = iwx_fill_probe_req(sc, &scan_p->probe_params.preq);
7176 	if (err) {
7177 		free(cmd, M_DEVBUF, sizeof(*cmd));
7178 		return err;
7179 	}
7180 
7181 	if (ic->ic_des_esslen != 0) {
7182 		scan_p->probe_params.direct_scan[0].id = IEEE80211_ELEMID_SSID;
7183 		scan_p->probe_params.direct_scan[0].len = ic->ic_des_esslen;
7184 		memcpy(scan_p->probe_params.direct_scan[0].ssid,
7185 		    ic->ic_des_essid, ic->ic_des_esslen);
7186 		bitmap_ssid |= (1 << 0);
7187 		n_ssid = 1;
7188 	}
7189 
7190 	iwx_scan_umac_fill_ch_p_v6(sc, &scan_p->channel_params, bitmap_ssid,
7191 	    n_ssid, bgscan);
7192 
7193 	hcmd.len[0] = sizeof(*cmd);
7194 	hcmd.data[0] = (void *)cmd;
7195 	hcmd.flags |= async ? IWX_CMD_ASYNC : 0;
7196 
7197 	err = iwx_send_cmd(sc, &hcmd);
7198 	free(cmd, M_DEVBUF, sizeof(*cmd));
7199 	return err;
7200 }
7201 
7202 void
7203 iwx_mcc_update(struct iwx_softc *sc, struct iwx_mcc_chub_notif *notif)
7204 {
7205 	struct ieee80211com *ic = &sc->sc_ic;
7206 	struct ifnet *ifp = IC2IFP(ic);
7207 	char alpha2[3];
7208 
7209 	snprintf(alpha2, sizeof(alpha2), "%c%c",
7210 	    (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
7211 
7212 	if (ifp->if_flags & IFF_DEBUG) {
7213 		printf("%s: firmware has detected regulatory domain '%s' "
7214 		    "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
7215 	}
7216 
7217 	/* TODO: Schedule a task to send MCC_UPDATE_CMD? */
7218 }
7219 
7220 uint8_t
7221 iwx_ridx2rate(struct ieee80211_rateset *rs, int ridx)
7222 {
7223 	int i;
7224 	uint8_t rval;
7225 
7226 	for (i = 0; i < rs->rs_nrates; i++) {
7227 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
7228 		if (rval == iwx_rates[ridx].rate)
7229 			return rs->rs_rates[i];
7230 	}
7231 
7232 	return 0;
7233 }
7234 
7235 int
7236 iwx_rval2ridx(int rval)
7237 {
7238 	int ridx;
7239 
7240 	for (ridx = 0; ridx < nitems(iwx_rates); ridx++) {
7241 		if (iwx_rates[ridx].plcp == IWX_RATE_INVM_PLCP)
7242 			continue;
7243 		if (rval == iwx_rates[ridx].rate)
7244 			break;
7245 	}
7246 
7247        return ridx;
7248 }
7249 
7250 void
7251 iwx_ack_rates(struct iwx_softc *sc, struct iwx_node *in, int *cck_rates,
7252     int *ofdm_rates)
7253 {
7254 	struct ieee80211_node *ni = &in->in_ni;
7255 	struct ieee80211_rateset *rs = &ni->ni_rates;
7256 	int lowest_present_ofdm = -1;
7257 	int lowest_present_cck = -1;
7258 	uint8_t cck = 0;
7259 	uint8_t ofdm = 0;
7260 	int i;
7261 
7262 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
7263 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
7264 		for (i = IWX_FIRST_CCK_RATE; i < IWX_FIRST_OFDM_RATE; i++) {
7265 			if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
7266 				continue;
7267 			cck |= (1 << i);
7268 			if (lowest_present_cck == -1 || lowest_present_cck > i)
7269 				lowest_present_cck = i;
7270 		}
7271 	}
7272 	for (i = IWX_FIRST_OFDM_RATE; i <= IWX_LAST_NON_HT_RATE; i++) {
7273 		if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
7274 			continue;
7275 		ofdm |= (1 << (i - IWX_FIRST_OFDM_RATE));
7276 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
7277 			lowest_present_ofdm = i;
7278 	}
7279 
7280 	/*
7281 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
7282 	 * variables. This isn't sufficient though, as there might not
7283 	 * be all the right rates in the bitmap. E.g. if the only basic
7284 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
7285 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
7286 	 *
7287 	 *    [...] a STA responding to a received frame shall transmit
7288 	 *    its Control Response frame [...] at the highest rate in the
7289 	 *    BSSBasicRateSet parameter that is less than or equal to the
7290 	 *    rate of the immediately previous frame in the frame exchange
7291 	 *    sequence ([...]) and that is of the same modulation class
7292 	 *    ([...]) as the received frame. If no rate contained in the
7293 	 *    BSSBasicRateSet parameter meets these conditions, then the
7294 	 *    control frame sent in response to a received frame shall be
7295 	 *    transmitted at the highest mandatory rate of the PHY that is
7296 	 *    less than or equal to the rate of the received frame, and
7297 	 *    that is of the same modulation class as the received frame.
7298 	 *
7299 	 * As a consequence, we need to add all mandatory rates that are
7300 	 * lower than all of the basic rates to these bitmaps.
7301 	 */
7302 
7303 	if (IWX_RATE_24M_INDEX < lowest_present_ofdm)
7304 		ofdm |= IWX_RATE_BIT_MSK(24) >> IWX_FIRST_OFDM_RATE;
7305 	if (IWX_RATE_12M_INDEX < lowest_present_ofdm)
7306 		ofdm |= IWX_RATE_BIT_MSK(12) >> IWX_FIRST_OFDM_RATE;
7307 	/* 6M already there or needed so always add */
7308 	ofdm |= IWX_RATE_BIT_MSK(6) >> IWX_FIRST_OFDM_RATE;
7309 
7310 	/*
7311 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
7312 	 * Note, however:
7313 	 *  - if no CCK rates are basic, it must be ERP since there must
7314 	 *    be some basic rates at all, so they're OFDM => ERP PHY
7315 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
7316 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
7317 	 *  - if 5.5M is basic, 1M and 2M are mandatory
7318 	 *  - if 2M is basic, 1M is mandatory
7319 	 *  - if 1M is basic, that's the only valid ACK rate.
7320 	 * As a consequence, it's not as complicated as it sounds, just add
7321 	 * any lower rates to the ACK rate bitmap.
7322 	 */
7323 	if (IWX_RATE_11M_INDEX < lowest_present_cck)
7324 		cck |= IWX_RATE_BIT_MSK(11) >> IWX_FIRST_CCK_RATE;
7325 	if (IWX_RATE_5M_INDEX < lowest_present_cck)
7326 		cck |= IWX_RATE_BIT_MSK(5) >> IWX_FIRST_CCK_RATE;
7327 	if (IWX_RATE_2M_INDEX < lowest_present_cck)
7328 		cck |= IWX_RATE_BIT_MSK(2) >> IWX_FIRST_CCK_RATE;
7329 	/* 1M already there or needed so always add */
7330 	cck |= IWX_RATE_BIT_MSK(1) >> IWX_FIRST_CCK_RATE;
7331 
7332 	*cck_rates = cck;
7333 	*ofdm_rates = ofdm;
7334 }
7335 
7336 void
7337 iwx_mac_ctxt_cmd_common(struct iwx_softc *sc, struct iwx_node *in,
7338     struct iwx_mac_ctx_cmd *cmd, uint32_t action)
7339 {
7340 #define IWX_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
7341 	struct ieee80211com *ic = &sc->sc_ic;
7342 	struct ieee80211_node *ni = ic->ic_bss;
7343 	int cck_ack_rates, ofdm_ack_rates;
7344 	int i;
7345 
7346 	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
7347 	    in->in_color));
7348 	cmd->action = htole32(action);
7349 
7350 	if (action == IWX_FW_CTXT_ACTION_REMOVE)
7351 		return;
7352 
7353 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7354 		cmd->mac_type = htole32(IWX_FW_MAC_TYPE_LISTENER);
7355 	else if (ic->ic_opmode == IEEE80211_M_STA)
7356 		cmd->mac_type = htole32(IWX_FW_MAC_TYPE_BSS_STA);
7357 	else
7358 		panic("unsupported operating mode %d", ic->ic_opmode);
7359 	cmd->tsf_id = htole32(IWX_TSF_ID_A);
7360 
7361 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
7362 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7363 		IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
7364 		return;
7365 	}
7366 
7367 	IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr);
7368 	iwx_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
7369 	cmd->cck_rates = htole32(cck_ack_rates);
7370 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
7371 
7372 	cmd->cck_short_preamble
7373 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
7374 	      ? IWX_MAC_FLG_SHORT_PREAMBLE : 0);
7375 	cmd->short_slot
7376 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
7377 	      ? IWX_MAC_FLG_SHORT_SLOT : 0);
7378 
7379 	for (i = 0; i < EDCA_NUM_AC; i++) {
7380 		struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
7381 		int txf = iwx_ac_to_tx_fifo[i];
7382 
7383 		cmd->ac[txf].cw_min = htole16(IWX_EXP2(ac->ac_ecwmin));
7384 		cmd->ac[txf].cw_max = htole16(IWX_EXP2(ac->ac_ecwmax));
7385 		cmd->ac[txf].aifsn = ac->ac_aifsn;
7386 		cmd->ac[txf].fifos_mask = (1 << txf);
7387 		cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32);
7388 	}
7389 	if (ni->ni_flags & IEEE80211_NODE_QOS)
7390 		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_UPDATE_EDCA);
7391 
7392 	if (ni->ni_flags & IEEE80211_NODE_HT) {
7393 		enum ieee80211_htprot htprot =
7394 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
7395 		switch (htprot) {
7396 		case IEEE80211_HTPROT_NONE:
7397 			break;
7398 		case IEEE80211_HTPROT_NONMEMBER:
7399 		case IEEE80211_HTPROT_NONHT_MIXED:
7400 			cmd->protection_flags |=
7401 			    htole32(IWX_MAC_PROT_FLG_HT_PROT |
7402 			    IWX_MAC_PROT_FLG_FAT_PROT);
7403 			break;
7404 		case IEEE80211_HTPROT_20MHZ:
7405 			if (in->in_phyctxt &&
7406 			    (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7407 			    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)) {
7408 				cmd->protection_flags |=
7409 				    htole32(IWX_MAC_PROT_FLG_HT_PROT |
7410 				    IWX_MAC_PROT_FLG_FAT_PROT);
7411 			}
7412 			break;
7413 		default:
7414 			break;
7415 		}
7416 
7417 		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_TGN);
7418 	}
7419 	if (ic->ic_flags & IEEE80211_F_USEPROT)
7420 		cmd->protection_flags |= htole32(IWX_MAC_PROT_FLG_TGG_PROTECT);
7421 
7422 	cmd->filter_flags = htole32(IWX_MAC_FILTER_ACCEPT_GRP);
7423 #undef IWX_EXP2
7424 }
7425 
7426 void
7427 iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *sc, struct iwx_node *in,
7428     struct iwx_mac_data_sta *sta, int assoc)
7429 {
7430 	struct ieee80211_node *ni = &in->in_ni;
7431 	uint32_t dtim_off;
7432 	uint64_t tsf;
7433 
7434 	dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU;
7435 	memcpy(&tsf, ni->ni_tstamp, sizeof(tsf));
7436 	tsf = letoh64(tsf);
7437 
7438 	sta->is_assoc = htole32(assoc);
7439 	if (assoc) {
7440 		sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
7441 		sta->dtim_tsf = htole64(tsf + dtim_off);
7442 		sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
7443 	}
7444 	sta->bi = htole32(ni->ni_intval);
7445 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod);
7446 	sta->data_policy = htole32(0);
7447 	sta->listen_interval = htole32(10);
7448 	sta->assoc_id = htole32(ni->ni_associd);
7449 }
7450 
7451 int
7452 iwx_mac_ctxt_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action,
7453     int assoc)
7454 {
7455 	struct ieee80211com *ic = &sc->sc_ic;
7456 	struct ieee80211_node *ni = &in->in_ni;
7457 	struct iwx_mac_ctx_cmd cmd;
7458 	int active = (sc->sc_flags & IWX_FLAG_MAC_ACTIVE);
7459 
7460 	if (action == IWX_FW_CTXT_ACTION_ADD && active)
7461 		panic("MAC already added");
7462 	if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
7463 		panic("MAC already removed");
7464 
7465 	memset(&cmd, 0, sizeof(cmd));
7466 
7467 	iwx_mac_ctxt_cmd_common(sc, in, &cmd, action);
7468 
7469 	if (action == IWX_FW_CTXT_ACTION_REMOVE) {
7470 		return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0,
7471 		    sizeof(cmd), &cmd);
7472 	}
7473 
7474 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7475 		cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_PROMISC |
7476 		    IWX_MAC_FILTER_IN_CONTROL_AND_MGMT |
7477 		    IWX_MAC_FILTER_ACCEPT_GRP |
7478 		    IWX_MAC_FILTER_IN_BEACON |
7479 		    IWX_MAC_FILTER_IN_PROBE_REQUEST |
7480 		    IWX_MAC_FILTER_IN_CRC32);
7481 	} else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod) {
7482 		/*
7483 		 * Allow beacons to pass through as long as we are not
7484 		 * associated or we do not have dtim period information.
7485 		 */
7486 		cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_BEACON);
7487 	}
7488 	iwx_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
7489 	return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
7490 }
7491 
7492 int
7493 iwx_clear_statistics(struct iwx_softc *sc)
7494 {
7495 	struct iwx_statistics_cmd scmd = {
7496 		.flags = htole32(IWX_STATISTICS_FLG_CLEAR)
7497 	};
7498 	struct iwx_host_cmd cmd = {
7499 		.id = IWX_STATISTICS_CMD,
7500 		.len[0] = sizeof(scmd),
7501 		.data[0] = &scmd,
7502 		.flags = IWX_CMD_WANT_RESP,
7503 		.resp_pkt_len = sizeof(struct iwx_notif_statistics),
7504 	};
7505 	int err;
7506 
7507 	err = iwx_send_cmd(sc, &cmd);
7508 	if (err)
7509 		return err;
7510 
7511 	iwx_free_resp(sc, &cmd);
7512 	return 0;
7513 }
7514 
7515 void
7516 iwx_add_task(struct iwx_softc *sc, struct taskq *taskq, struct task *task)
7517 {
7518 	int s = splnet();
7519 
7520 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
7521 		splx(s);
7522 		return;
7523 	}
7524 
7525 	refcnt_take(&sc->task_refs);
7526 	if (!task_add(taskq, task))
7527 		refcnt_rele_wake(&sc->task_refs);
7528 	splx(s);
7529 }
7530 
7531 void
7532 iwx_del_task(struct iwx_softc *sc, struct taskq *taskq, struct task *task)
7533 {
7534 	if (task_del(taskq, task))
7535 		refcnt_rele(&sc->task_refs);
7536 }
7537 
7538 int
7539 iwx_scan(struct iwx_softc *sc)
7540 {
7541 	struct ieee80211com *ic = &sc->sc_ic;
7542 	struct ifnet *ifp = IC2IFP(ic);
7543 	int err;
7544 
7545 	if (sc->sc_flags & IWX_FLAG_BGSCAN) {
7546 		err = iwx_scan_abort(sc);
7547 		if (err) {
7548 			printf("%s: could not abort background scan\n",
7549 			    DEVNAME(sc));
7550 			return err;
7551 		}
7552 	}
7553 
7554 	err = iwx_umac_scan_v14(sc, 0);
7555 	if (err) {
7556 		printf("%s: could not initiate scan\n", DEVNAME(sc));
7557 		return err;
7558 	}
7559 
7560 	/*
7561 	 * The current mode might have been fixed during association.
7562 	 * Ensure all channels get scanned.
7563 	 */
7564 	if (IFM_MODE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
7565 		ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
7566 
7567 	sc->sc_flags |= IWX_FLAG_SCANNING;
7568 	if (ifp->if_flags & IFF_DEBUG)
7569 		printf("%s: %s -> %s\n", ifp->if_xname,
7570 		    ieee80211_state_name[ic->ic_state],
7571 		    ieee80211_state_name[IEEE80211_S_SCAN]);
7572 	if ((sc->sc_flags & IWX_FLAG_BGSCAN) == 0) {
7573 		ieee80211_set_link_state(ic, LINK_STATE_DOWN);
7574 		ieee80211_node_cleanup(ic, ic->ic_bss);
7575 	}
7576 	ic->ic_state = IEEE80211_S_SCAN;
7577 	wakeup(&ic->ic_state); /* wake iwx_init() */
7578 
7579 	return 0;
7580 }
7581 
7582 int
7583 iwx_bgscan(struct ieee80211com *ic)
7584 {
7585 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
7586 	int err;
7587 
7588 	if (sc->sc_flags & IWX_FLAG_SCANNING)
7589 		return 0;
7590 
7591 	err = iwx_umac_scan_v14(sc, 1);
7592 	if (err) {
7593 		printf("%s: could not initiate scan\n", DEVNAME(sc));
7594 		return err;
7595 	}
7596 
7597 	sc->sc_flags |= IWX_FLAG_BGSCAN;
7598 	return 0;
7599 }
7600 
7601 void
7602 iwx_bgscan_done(struct ieee80211com *ic,
7603     struct ieee80211_node_switch_bss_arg *arg, size_t arg_size)
7604 {
7605 	struct iwx_softc *sc = ic->ic_softc;
7606 
7607 	free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
7608 	sc->bgscan_unref_arg = arg;
7609 	sc->bgscan_unref_arg_size = arg_size;
7610 	iwx_add_task(sc, sc->sc_nswq, &sc->bgscan_done_task);
7611 }
7612 
7613 void
7614 iwx_bgscan_done_task(void *arg)
7615 {
7616 	struct iwx_softc *sc = arg;
7617 	struct ieee80211com *ic = &sc->sc_ic;
7618 	struct iwx_node *in = (void *)ic->ic_bss;
7619 	struct ieee80211_node *ni = &in->in_ni;
7620 	int tid, err = 0, s = splnet();
7621 
7622 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) ||
7623 	    (ic->ic_flags & IEEE80211_F_BGSCAN) == 0 ||
7624 	    ic->ic_state != IEEE80211_S_RUN) {
7625 		err = ENXIO;
7626 		goto done;
7627 	}
7628 
7629 	err = iwx_flush_sta(sc, in);
7630 	if (err)
7631 		goto done;
7632 
7633 	for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
7634 		int qid = IWX_FIRST_AGG_TX_QUEUE + tid;
7635 
7636 		if (sc->aggqid[tid] == 0)
7637 			continue;
7638 
7639 		err = iwx_disable_txq(sc, IWX_STATION_ID, qid, tid);
7640 		if (err)
7641 			goto done;
7642 #if 0 /* disabled for now; we are going to DEAUTH soon anyway */
7643 		IEEE80211_SEND_ACTION(ic, ni, IEEE80211_CATEG_BA,
7644 		    IEEE80211_ACTION_DELBA,
7645 		    IEEE80211_REASON_AUTH_LEAVE << 16 |
7646 		    IEEE80211_FC1_DIR_TODS << 8 | tid);
7647 #endif
7648 		ieee80211_node_tx_ba_clear(ni, tid);
7649 		sc->aggqid[tid] = 0;
7650 	}
7651 
7652 	/*
7653 	 * Tx queues have been flushed and Tx agg has been stopped.
7654 	 * Allow roaming to proceed.
7655 	 */
7656 	ni->ni_unref_arg = sc->bgscan_unref_arg;
7657 	ni->ni_unref_arg_size = sc->bgscan_unref_arg_size;
7658 	sc->bgscan_unref_arg = NULL;
7659 	sc->bgscan_unref_arg_size = 0;
7660 	ieee80211_node_tx_stopped(ic, &in->in_ni);
7661 done:
7662 	if (err) {
7663 		free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
7664 		sc->bgscan_unref_arg = NULL;
7665 		sc->bgscan_unref_arg_size = 0;
7666 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
7667 			task_add(systq, &sc->init_task);
7668 	}
7669 	refcnt_rele_wake(&sc->task_refs);
7670 	splx(s);
7671 }
7672 
7673 int
7674 iwx_umac_scan_abort(struct iwx_softc *sc)
7675 {
7676 	struct iwx_umac_scan_abort cmd = { 0 };
7677 
7678 	return iwx_send_cmd_pdu(sc,
7679 	    IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC),
7680 	    0, sizeof(cmd), &cmd);
7681 }
7682 
7683 int
7684 iwx_scan_abort(struct iwx_softc *sc)
7685 {
7686 	int err;
7687 
7688 	err = iwx_umac_scan_abort(sc);
7689 	if (err == 0)
7690 		sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
7691 	return err;
7692 }
7693 
7694 int
7695 iwx_enable_mgmt_queue(struct iwx_softc *sc)
7696 {
7697 	int err;
7698 
7699 	sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
7700 
7701 	/*
7702 	 * Non-QoS frames use the "MGMT" TID and queue.
7703 	 * Other TIDs and data queues are reserved for QoS data frames.
7704 	 */
7705 	err = iwx_enable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
7706 	    IWX_MGMT_TID, IWX_TX_RING_COUNT);
7707 	if (err) {
7708 		printf("%s: could not enable Tx queue %d (error %d)\n",
7709 		    DEVNAME(sc), sc->first_data_qid, err);
7710 		return err;
7711 	}
7712 
7713 	return 0;
7714 }
7715 
7716 int
7717 iwx_disable_mgmt_queue(struct iwx_softc *sc)
7718 {
7719 	int err, cmd_ver;
7720 
7721 	/* Explicit removal is only required with old SCD_QUEUE_CFG command. */
7722 	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7723 	    IWX_SCD_QUEUE_CONFIG_CMD);
7724 	if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN)
7725 		return 0;
7726 
7727 	sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
7728 
7729 	err = iwx_disable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
7730 	    IWX_MGMT_TID);
7731 	if (err) {
7732 		printf("%s: could not disable Tx queue %d (error %d)\n",
7733 		    DEVNAME(sc), sc->first_data_qid, err);
7734 		return err;
7735 	}
7736 
7737 	return 0;
7738 }
7739 
7740 int
7741 iwx_rs_rval2idx(uint8_t rval)
7742 {
7743 	/* Firmware expects indices which match our 11g rate set. */
7744 	const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11g;
7745 	int i;
7746 
7747 	for (i = 0; i < rs->rs_nrates; i++) {
7748 		if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
7749 			return i;
7750 	}
7751 
7752 	return -1;
7753 }
7754 
7755 uint16_t
7756 iwx_rs_ht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int rsidx)
7757 {
7758 	struct ieee80211com *ic = &sc->sc_ic;
7759 	const struct ieee80211_ht_rateset *rs;
7760 	uint16_t htrates = 0;
7761 	int mcs;
7762 
7763 	rs = &ieee80211_std_ratesets_11n[rsidx];
7764 	for (mcs = rs->min_mcs; mcs <= rs->max_mcs; mcs++) {
7765 		if (!isset(ni->ni_rxmcs, mcs) ||
7766 		    !isset(ic->ic_sup_mcs, mcs))
7767 			continue;
7768 		htrates |= (1 << (mcs - rs->min_mcs));
7769 	}
7770 
7771 	return htrates;
7772 }
7773 
7774 uint16_t
7775 iwx_rs_vht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int num_ss)
7776 {
7777 	uint16_t rx_mcs;
7778 	int max_mcs = -1;
7779 
7780 	rx_mcs = (ni->ni_vht_rxmcs & IEEE80211_VHT_MCS_FOR_SS_MASK(num_ss)) >>
7781 	    IEEE80211_VHT_MCS_FOR_SS_SHIFT(num_ss);
7782 	switch (rx_mcs) {
7783 	case IEEE80211_VHT_MCS_SS_NOT_SUPP:
7784 		break;
7785 	case IEEE80211_VHT_MCS_0_7:
7786 		max_mcs = 7;
7787 		break;
7788 	case IEEE80211_VHT_MCS_0_8:
7789 		max_mcs = 8;
7790 		break;
7791 	case IEEE80211_VHT_MCS_0_9:
7792 		/* Disable VHT MCS 9 for 20MHz-only stations. */
7793 		if (!ieee80211_node_supports_ht_chan40(ni))
7794 			max_mcs = 8;
7795 		else
7796 			max_mcs = 9;
7797 		break;
7798 	default:
7799 		/* Should not happen; Values above cover the possible range. */
7800 		panic("invalid VHT Rx MCS value %u", rx_mcs);
7801 	}
7802 
7803 	return ((1 << (max_mcs + 1)) - 1);
7804 }
7805 
7806 int
7807 iwx_rs_init_v3(struct iwx_softc *sc, struct iwx_node *in)
7808 {
7809 	struct ieee80211_node *ni = &in->in_ni;
7810 	struct ieee80211_rateset *rs = &ni->ni_rates;
7811 	struct iwx_tlc_config_cmd_v3 cfg_cmd;
7812 	uint32_t cmd_id;
7813 	int i;
7814 	size_t cmd_size = sizeof(cfg_cmd);
7815 
7816 	memset(&cfg_cmd, 0, sizeof(cfg_cmd));
7817 
7818 	for (i = 0; i < rs->rs_nrates; i++) {
7819 		uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7820 		int idx = iwx_rs_rval2idx(rval);
7821 		if (idx == -1)
7822 			return EINVAL;
7823 		cfg_cmd.non_ht_rates |= (1 << idx);
7824 	}
7825 
7826 	if (ni->ni_flags & IEEE80211_NODE_VHT) {
7827 		cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
7828 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7829 		    htole16(iwx_rs_vht_rates(sc, ni, 1));
7830 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7831 		    htole16(iwx_rs_vht_rates(sc, ni, 2));
7832 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
7833 		cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
7834 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7835 		    htole16(iwx_rs_ht_rates(sc, ni,
7836 		    IEEE80211_HT_RATESET_SISO));
7837 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7838 		    htole16(iwx_rs_ht_rates(sc, ni,
7839 		    IEEE80211_HT_RATESET_MIMO2));
7840 	} else
7841 		cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
7842 
7843 	cfg_cmd.sta_id = IWX_STATION_ID;
7844 	if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
7845 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7846 	else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7847 	    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
7848 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7849 	else
7850 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7851 	cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
7852 	if (ni->ni_flags & IEEE80211_NODE_VHT)
7853 		cfg_cmd.max_mpdu_len = htole16(3895);
7854 	else
7855 		cfg_cmd.max_mpdu_len = htole16(3839);
7856 	if (ni->ni_flags & IEEE80211_NODE_HT) {
7857 		if (ieee80211_node_supports_ht_sgi20(ni)) {
7858 			cfg_cmd.sgi_ch_width_supp |= (1 <<
7859 			    IWX_TLC_MNG_CH_WIDTH_20MHZ);
7860 		}
7861 		if (ieee80211_node_supports_ht_sgi40(ni)) {
7862 			cfg_cmd.sgi_ch_width_supp |= (1 <<
7863 			    IWX_TLC_MNG_CH_WIDTH_40MHZ);
7864 		}
7865 	}
7866 	if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
7867 	    ieee80211_node_supports_vht_sgi80(ni))
7868 		cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
7869 
7870 	cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
7871 	return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
7872 }
7873 
7874 int
7875 iwx_rs_init_v4(struct iwx_softc *sc, struct iwx_node *in)
7876 {
7877 	struct ieee80211_node *ni = &in->in_ni;
7878 	struct ieee80211_rateset *rs = &ni->ni_rates;
7879 	struct iwx_tlc_config_cmd_v4 cfg_cmd;
7880 	uint32_t cmd_id;
7881 	int i;
7882 	size_t cmd_size = sizeof(cfg_cmd);
7883 
7884 	memset(&cfg_cmd, 0, sizeof(cfg_cmd));
7885 
7886 	for (i = 0; i < rs->rs_nrates; i++) {
7887 		uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7888 		int idx = iwx_rs_rval2idx(rval);
7889 		if (idx == -1)
7890 			return EINVAL;
7891 		cfg_cmd.non_ht_rates |= (1 << idx);
7892 	}
7893 
7894 	if (ni->ni_flags & IEEE80211_NODE_VHT) {
7895 		cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
7896 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7897 		    htole16(iwx_rs_vht_rates(sc, ni, 1));
7898 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7899 		    htole16(iwx_rs_vht_rates(sc, ni, 2));
7900 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
7901 		cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
7902 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7903 		    htole16(iwx_rs_ht_rates(sc, ni,
7904 		    IEEE80211_HT_RATESET_SISO));
7905 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7906 		    htole16(iwx_rs_ht_rates(sc, ni,
7907 		    IEEE80211_HT_RATESET_MIMO2));
7908 	} else
7909 		cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
7910 
7911 	cfg_cmd.sta_id = IWX_STATION_ID;
7912 	if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
7913 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7914 	else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7915 	    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
7916 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7917 	else
7918 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7919 	cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
7920 	if (ni->ni_flags & IEEE80211_NODE_VHT)
7921 		cfg_cmd.max_mpdu_len = htole16(3895);
7922 	else
7923 		cfg_cmd.max_mpdu_len = htole16(3839);
7924 	if (ni->ni_flags & IEEE80211_NODE_HT) {
7925 		if (ieee80211_node_supports_ht_sgi20(ni)) {
7926 			cfg_cmd.sgi_ch_width_supp |= (1 <<
7927 			    IWX_TLC_MNG_CH_WIDTH_20MHZ);
7928 		}
7929 		if (ieee80211_node_supports_ht_sgi40(ni)) {
7930 			cfg_cmd.sgi_ch_width_supp |= (1 <<
7931 			    IWX_TLC_MNG_CH_WIDTH_40MHZ);
7932 		}
7933 	}
7934 	if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
7935 	    ieee80211_node_supports_vht_sgi80(ni))
7936 		cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
7937 
7938 	cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
7939 	return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
7940 }
7941 
7942 int
7943 iwx_rs_init(struct iwx_softc *sc, struct iwx_node *in)
7944 {
7945 	int cmd_ver;
7946 
7947 	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7948 	    IWX_TLC_MNG_CONFIG_CMD);
7949 	if (cmd_ver == 4)
7950 		return iwx_rs_init_v4(sc, in);
7951 	return iwx_rs_init_v3(sc, in);
7952 }
7953 
7954 void
7955 iwx_rs_update(struct iwx_softc *sc, struct iwx_tlc_update_notif *notif)
7956 {
7957 	struct ieee80211com *ic = &sc->sc_ic;
7958 	struct ieee80211_node *ni = ic->ic_bss;
7959 	struct ieee80211_rateset *rs = &ni->ni_rates;
7960 	uint32_t rate_n_flags;
7961 	uint8_t plcp, rval;
7962 	int i, cmd_ver, rate_n_flags_ver2 = 0;
7963 
7964 	if (notif->sta_id != IWX_STATION_ID ||
7965 	    (le32toh(notif->flags) & IWX_TLC_NOTIF_FLAG_RATE) == 0)
7966 		return;
7967 
7968 	rate_n_flags = le32toh(notif->rate);
7969 
7970 	cmd_ver = iwx_lookup_notif_ver(sc, IWX_DATA_PATH_GROUP,
7971 	    IWX_TLC_MNG_UPDATE_NOTIF);
7972 	if (cmd_ver != IWX_FW_CMD_VER_UNKNOWN && cmd_ver >= 3)
7973 		rate_n_flags_ver2 = 1;
7974 	if (rate_n_flags_ver2) {
7975 		uint32_t mod_type = (rate_n_flags & IWX_RATE_MCS_MOD_TYPE_MSK);
7976 		if (mod_type == IWX_RATE_MCS_VHT_MSK) {
7977 			ni->ni_txmcs = (rate_n_flags &
7978 			    IWX_RATE_HT_MCS_CODE_MSK);
7979 			ni->ni_vht_ss = ((rate_n_flags &
7980 			    IWX_RATE_MCS_NSS_MSK) >>
7981 			    IWX_RATE_MCS_NSS_POS) + 1;
7982 			return;
7983 		} else if (mod_type == IWX_RATE_MCS_HT_MSK) {
7984 			ni->ni_txmcs = IWX_RATE_HT_MCS_INDEX(rate_n_flags);
7985 			return;
7986 		}
7987 	} else {
7988 		if (rate_n_flags & IWX_RATE_MCS_VHT_MSK_V1) {
7989 			ni->ni_txmcs = (rate_n_flags &
7990 			    IWX_RATE_VHT_MCS_RATE_CODE_MSK);
7991 			ni->ni_vht_ss = ((rate_n_flags &
7992 			    IWX_RATE_VHT_MCS_NSS_MSK) >>
7993 			    IWX_RATE_VHT_MCS_NSS_POS) + 1;
7994 			return;
7995 		} else if (rate_n_flags & IWX_RATE_MCS_HT_MSK_V1) {
7996 			ni->ni_txmcs = (rate_n_flags &
7997 			    (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 |
7998 			    IWX_RATE_HT_MCS_NSS_MSK_V1));
7999 			return;
8000 		}
8001 	}
8002 
8003 	if (rate_n_flags_ver2) {
8004 		const struct ieee80211_rateset *rs;
8005 		uint32_t ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
8006 		if (rate_n_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK)
8007 			rs = &ieee80211_std_rateset_11a;
8008 		else
8009 			rs = &ieee80211_std_rateset_11b;
8010 		if (ridx < rs->rs_nrates)
8011 			rval = (rs->rs_rates[ridx] & IEEE80211_RATE_VAL);
8012 		else
8013 			rval = 0;
8014 	} else {
8015 		plcp = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1);
8016 
8017 		rval = 0;
8018 		for (i = IWX_RATE_1M_INDEX; i < nitems(iwx_rates); i++) {
8019 			if (iwx_rates[i].plcp == plcp) {
8020 				rval = iwx_rates[i].rate;
8021 				break;
8022 			}
8023 		}
8024 	}
8025 
8026 	if (rval) {
8027 		uint8_t rv;
8028 		for (i = 0; i < rs->rs_nrates; i++) {
8029 			rv = rs->rs_rates[i] & IEEE80211_RATE_VAL;
8030 			if (rv == rval) {
8031 				ni->ni_txrate = i;
8032 				break;
8033 			}
8034 		}
8035 	}
8036 }
8037 
8038 int
8039 iwx_phy_send_rlc(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
8040     uint8_t chains_static, uint8_t chains_dynamic)
8041 {
8042 	struct iwx_rlc_config_cmd cmd;
8043 	uint32_t cmd_id;
8044 	uint8_t active_cnt, idle_cnt;
8045 
8046 	memset(&cmd, 0, sizeof(cmd));
8047 
8048 	idle_cnt = chains_static;
8049 	active_cnt = chains_dynamic;
8050 
8051 	cmd.phy_id = htole32(phyctxt->id),
8052 	cmd.rlc.rx_chain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
8053 	    IWX_PHY_RX_CHAIN_VALID_POS);
8054 	cmd.rlc.rx_chain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
8055 	cmd.rlc.rx_chain_info |= htole32(active_cnt <<
8056 	    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
8057 
8058 	cmd_id = iwx_cmd_id(IWX_RLC_CONFIG_CMD, IWX_DATA_PATH_GROUP, 2);
8059 	return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
8060 }
8061 
8062 int
8063 iwx_phy_ctxt_update(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
8064     struct ieee80211_channel *chan, uint8_t chains_static,
8065     uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco,
8066     uint8_t vht_chan_width)
8067 {
8068 	uint16_t band_flags = (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
8069 	int err;
8070 
8071 	if (isset(sc->sc_enabled_capa,
8072 	    IWX_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
8073 	    (phyctxt->channel->ic_flags & band_flags) !=
8074 	    (chan->ic_flags & band_flags)) {
8075 		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
8076 		    chains_dynamic, IWX_FW_CTXT_ACTION_REMOVE, apply_time, sco,
8077 		    vht_chan_width);
8078 		if (err) {
8079 			printf("%s: could not remove PHY context "
8080 			    "(error %d)\n", DEVNAME(sc), err);
8081 			return err;
8082 		}
8083 		phyctxt->channel = chan;
8084 		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
8085 		    chains_dynamic, IWX_FW_CTXT_ACTION_ADD, apply_time, sco,
8086 		    vht_chan_width);
8087 		if (err) {
8088 			printf("%s: could not add PHY context "
8089 			    "(error %d)\n", DEVNAME(sc), err);
8090 			return err;
8091 		}
8092 	} else {
8093 		phyctxt->channel = chan;
8094 		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
8095 		    chains_dynamic, IWX_FW_CTXT_ACTION_MODIFY, apply_time, sco,
8096 		    vht_chan_width);
8097 		if (err) {
8098 			printf("%s: could not update PHY context (error %d)\n",
8099 			    DEVNAME(sc), err);
8100 			return err;
8101 		}
8102 	}
8103 
8104 	phyctxt->sco = sco;
8105 	phyctxt->vht_chan_width = vht_chan_width;
8106 
8107 	if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
8108 	    IWX_RLC_CONFIG_CMD) == 2)
8109 		return iwx_phy_send_rlc(sc, phyctxt,
8110 		    chains_static, chains_dynamic);
8111 
8112 	return 0;
8113 }
8114 
8115 int
8116 iwx_auth(struct iwx_softc *sc)
8117 {
8118 	struct ieee80211com *ic = &sc->sc_ic;
8119 	struct iwx_node *in = (void *)ic->ic_bss;
8120 	uint32_t duration;
8121 	int generation = sc->sc_generation, err;
8122 
8123 	splassert(IPL_NET);
8124 
8125 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8126 		err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8127 		    ic->ic_ibss_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
8128 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8129 		if (err)
8130 			return err;
8131 	} else {
8132 		err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8133 		    in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
8134 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8135 		if (err)
8136 			return err;
8137 	}
8138 	in->in_phyctxt = &sc->sc_phyctxt[0];
8139 	IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr);
8140 
8141 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD, 0);
8142 	if (err) {
8143 		printf("%s: could not add MAC context (error %d)\n",
8144 		    DEVNAME(sc), err);
8145 		return err;
8146  	}
8147 	sc->sc_flags |= IWX_FLAG_MAC_ACTIVE;
8148 
8149 	err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD);
8150 	if (err) {
8151 		printf("%s: could not add binding (error %d)\n",
8152 		    DEVNAME(sc), err);
8153 		goto rm_mac_ctxt;
8154 	}
8155 	sc->sc_flags |= IWX_FLAG_BINDING_ACTIVE;
8156 
8157 	err = iwx_add_sta_cmd(sc, in, 0);
8158 	if (err) {
8159 		printf("%s: could not add sta (error %d)\n",
8160 		    DEVNAME(sc), err);
8161 		goto rm_binding;
8162 	}
8163 	sc->sc_flags |= IWX_FLAG_STA_ACTIVE;
8164 
8165 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8166 		err = iwx_enable_txq(sc, IWX_MONITOR_STA_ID,
8167 		    IWX_DQA_INJECT_MONITOR_QUEUE, IWX_MGMT_TID,
8168 		    IWX_TX_RING_COUNT);
8169 		if (err)
8170 			goto rm_sta;
8171 		return 0;
8172 	}
8173 
8174 	err = iwx_enable_mgmt_queue(sc);
8175 	if (err)
8176 		goto rm_sta;
8177 
8178 	err = iwx_clear_statistics(sc);
8179 	if (err)
8180 		goto rm_mgmt_queue;
8181 
8182 	/*
8183 	 * Prevent the FW from wandering off channel during association
8184 	 * by "protecting" the session with a time event.
8185 	 */
8186 	if (in->in_ni.ni_intval)
8187 		duration = in->in_ni.ni_intval * 9;
8188 	else
8189 		duration = 900;
8190 	return iwx_schedule_session_protection(sc, in, duration);
8191 rm_mgmt_queue:
8192 	if (generation == sc->sc_generation)
8193 		iwx_disable_mgmt_queue(sc);
8194 rm_sta:
8195 	if (generation == sc->sc_generation) {
8196 		iwx_rm_sta_cmd(sc, in);
8197 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
8198 	}
8199 rm_binding:
8200 	if (generation == sc->sc_generation) {
8201 		iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
8202 		sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
8203 	}
8204 rm_mac_ctxt:
8205 	if (generation == sc->sc_generation) {
8206 		iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
8207 		sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
8208 	}
8209 	return err;
8210 }
8211 
8212 int
8213 iwx_deauth(struct iwx_softc *sc)
8214 {
8215 	struct ieee80211com *ic = &sc->sc_ic;
8216 	struct iwx_node *in = (void *)ic->ic_bss;
8217 	int err;
8218 
8219 	splassert(IPL_NET);
8220 
8221 	iwx_unprotect_session(sc, in);
8222 
8223 	if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
8224 		err = iwx_rm_sta(sc, in);
8225 		if (err)
8226 			return err;
8227 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
8228 	}
8229 
8230 	if (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE) {
8231 		err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
8232 		if (err) {
8233 			printf("%s: could not remove binding (error %d)\n",
8234 			    DEVNAME(sc), err);
8235 			return err;
8236 		}
8237 		sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
8238 	}
8239 
8240 	if (sc->sc_flags & IWX_FLAG_MAC_ACTIVE) {
8241 		err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
8242 		if (err) {
8243 			printf("%s: could not remove MAC context (error %d)\n",
8244 			    DEVNAME(sc), err);
8245 			return err;
8246 		}
8247 		sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
8248 	}
8249 
8250 	/* Move unused PHY context to a default channel. */
8251 	err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8252 	    &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
8253 	    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8254 	if (err)
8255 		return err;
8256 
8257 	return 0;
8258 }
8259 
8260 int
8261 iwx_run(struct iwx_softc *sc)
8262 {
8263 	struct ieee80211com *ic = &sc->sc_ic;
8264 	struct iwx_node *in = (void *)ic->ic_bss;
8265 	struct ieee80211_node *ni = &in->in_ni;
8266 	int err;
8267 
8268 	splassert(IPL_NET);
8269 
8270 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8271 		/* Add a MAC context and a sniffing STA. */
8272 		err = iwx_auth(sc);
8273 		if (err)
8274 			return err;
8275 	}
8276 
8277 	/* Configure Rx chains for MIMO and configure 40 MHz channel. */
8278 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8279 		uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1;
8280 		err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
8281 		    in->in_phyctxt->channel, chains, chains,
8282 		    0, IEEE80211_HTOP0_SCO_SCN,
8283 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8284 		if (err) {
8285 			printf("%s: failed to update PHY\n", DEVNAME(sc));
8286 			return err;
8287 		}
8288 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
8289 		uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1;
8290 		uint8_t sco, vht_chan_width;
8291 		if (IEEE80211_CHAN_40MHZ_ALLOWED(in->in_ni.ni_chan) &&
8292 		    ieee80211_node_supports_ht_chan40(ni))
8293 			sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
8294 		else
8295 			sco = IEEE80211_HTOP0_SCO_SCN;
8296 		if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
8297 		    IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
8298 		    ieee80211_node_supports_vht_chan80(ni))
8299 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
8300 		else
8301 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
8302 		err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
8303 		    in->in_phyctxt->channel, chains, chains,
8304 		    0, sco, vht_chan_width);
8305 		if (err) {
8306 			printf("%s: failed to update PHY\n", DEVNAME(sc));
8307 			return err;
8308 		}
8309 	}
8310 
8311 	/* Update STA again to apply HT and VHT settings. */
8312 	err = iwx_add_sta_cmd(sc, in, 1);
8313 	if (err) {
8314 		printf("%s: could not update STA (error %d)\n",
8315 		    DEVNAME(sc), err);
8316 		return err;
8317 	}
8318 
8319 	/* We have now been assigned an associd by the AP. */
8320 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
8321 	if (err) {
8322 		printf("%s: failed to update MAC\n", DEVNAME(sc));
8323 		return err;
8324 	}
8325 
8326 	err = iwx_sf_config(sc, IWX_SF_FULL_ON);
8327 	if (err) {
8328 		printf("%s: could not set sf full on (error %d)\n",
8329 		    DEVNAME(sc), err);
8330 		return err;
8331 	}
8332 
8333 	err = iwx_allow_mcast(sc);
8334 	if (err) {
8335 		printf("%s: could not allow mcast (error %d)\n",
8336 		    DEVNAME(sc), err);
8337 		return err;
8338 	}
8339 
8340 	err = iwx_power_update_device(sc);
8341 	if (err) {
8342 		printf("%s: could not send power command (error %d)\n",
8343 		    DEVNAME(sc), err);
8344 		return err;
8345 	}
8346 #ifdef notyet
8347 	/*
8348 	 * Disabled for now. Default beacon filter settings
8349 	 * prevent net80211 from getting ERP and HT protection
8350 	 * updates from beacons.
8351 	 */
8352 	err = iwx_enable_beacon_filter(sc, in);
8353 	if (err) {
8354 		printf("%s: could not enable beacon filter\n",
8355 		    DEVNAME(sc));
8356 		return err;
8357 	}
8358 #endif
8359 	err = iwx_power_mac_update_mode(sc, in);
8360 	if (err) {
8361 		printf("%s: could not update MAC power (error %d)\n",
8362 		    DEVNAME(sc), err);
8363 		return err;
8364 	}
8365 
8366 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
8367 		return 0;
8368 
8369 	/* Start at lowest available bit-rate. Firmware will raise. */
8370 	in->in_ni.ni_txrate = 0;
8371 	in->in_ni.ni_txmcs = 0;
8372 
8373 	err = iwx_rs_init(sc, in);
8374 	if (err) {
8375 		printf("%s: could not init rate scaling (error %d)\n",
8376 		    DEVNAME(sc), err);
8377 		return err;
8378 	}
8379 
8380 	return 0;
8381 }
8382 
8383 int
8384 iwx_run_stop(struct iwx_softc *sc)
8385 {
8386 	struct ieee80211com *ic = &sc->sc_ic;
8387 	struct iwx_node *in = (void *)ic->ic_bss;
8388 	struct ieee80211_node *ni = &in->in_ni;
8389 	int err, i;
8390 
8391 	splassert(IPL_NET);
8392 
8393 	err = iwx_flush_sta(sc, in);
8394 	if (err) {
8395 		printf("%s: could not flush Tx path (error %d)\n",
8396 		    DEVNAME(sc), err);
8397 		return err;
8398 	}
8399 
8400 	/*
8401 	 * Stop Rx BA sessions now. We cannot rely on the BA task
8402 	 * for this when moving out of RUN state since it runs in a
8403 	 * separate thread.
8404 	 * Note that in->in_ni (struct ieee80211_node) already represents
8405 	 * our new access point in case we are roaming between APs.
8406 	 * This means we cannot rely on struct ieee802111_node to tell
8407 	 * us which BA sessions exist.
8408 	 */
8409 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
8410 		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
8411 		if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID)
8412 			continue;
8413 		iwx_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
8414 	}
8415 
8416 	err = iwx_sf_config(sc, IWX_SF_INIT_OFF);
8417 	if (err)
8418 		return err;
8419 
8420 	err = iwx_disable_beacon_filter(sc);
8421 	if (err) {
8422 		printf("%s: could not disable beacon filter (error %d)\n",
8423 		    DEVNAME(sc), err);
8424 		return err;
8425 	}
8426 
8427 	/* Mark station as disassociated. */
8428 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 0);
8429 	if (err) {
8430 		printf("%s: failed to update MAC\n", DEVNAME(sc));
8431 		return err;
8432 	}
8433 
8434 	return 0;
8435 }
8436 
8437 struct ieee80211_node *
8438 iwx_node_alloc(struct ieee80211com *ic)
8439 {
8440 	return malloc(sizeof (struct iwx_node), M_DEVBUF, M_NOWAIT | M_ZERO);
8441 }
8442 
8443 int
8444 iwx_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
8445     struct ieee80211_key *k)
8446 {
8447 	struct iwx_softc *sc = ic->ic_softc;
8448 	struct iwx_node *in = (void *)ni;
8449 	struct iwx_setkey_task_arg *a;
8450 	int err;
8451 
8452 	if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
8453 		/* Fallback to software crypto for other ciphers. */
8454 		err = ieee80211_set_key(ic, ni, k);
8455 		if (!err && in != NULL && (k->k_flags & IEEE80211_KEY_GROUP))
8456 			in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
8457 		return err;
8458 	}
8459 
8460 	if (sc->setkey_nkeys >= nitems(sc->setkey_arg))
8461 		return ENOSPC;
8462 
8463 	a = &sc->setkey_arg[sc->setkey_cur];
8464 	a->sta_id = IWX_STATION_ID;
8465 	a->ni = ni;
8466 	a->k = k;
8467 	sc->setkey_cur = (sc->setkey_cur + 1) % nitems(sc->setkey_arg);
8468 	sc->setkey_nkeys++;
8469 	iwx_add_task(sc, systq, &sc->setkey_task);
8470 	return EBUSY;
8471 }
8472 
8473 int
8474 iwx_add_sta_key(struct iwx_softc *sc, int sta_id, struct ieee80211_node *ni,
8475     struct ieee80211_key *k)
8476 {
8477 	struct ieee80211com *ic = &sc->sc_ic;
8478 	struct iwx_node *in = (void *)ni;
8479 	struct iwx_add_sta_key_cmd cmd;
8480 	uint32_t status;
8481 	const int want_keymask = (IWX_NODE_FLAG_HAVE_PAIRWISE_KEY |
8482 	    IWX_NODE_FLAG_HAVE_GROUP_KEY);
8483 	int err;
8484 
8485 	/*
8486 	 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
8487 	 * Currently we only implement station mode where 'ni' is always
8488 	 * ic->ic_bss so there is no need to validate arguments beyond this:
8489 	 */
8490 	KASSERT(ni == ic->ic_bss);
8491 
8492 	memset(&cmd, 0, sizeof(cmd));
8493 
8494 	cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
8495 	    IWX_STA_KEY_FLG_WEP_KEY_MAP |
8496 	    ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
8497 	    IWX_STA_KEY_FLG_KEYID_MSK));
8498 	if (k->k_flags & IEEE80211_KEY_GROUP) {
8499 		cmd.common.key_offset = 1;
8500 		cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
8501 	} else
8502 		cmd.common.key_offset = 0;
8503 
8504 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
8505 	cmd.common.sta_id = sta_id;
8506 
8507 	cmd.transmit_seq_cnt = htole64(k->k_tsc);
8508 
8509 	status = IWX_ADD_STA_SUCCESS;
8510 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
8511 	    &status);
8512 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
8513 		return ECANCELED;
8514 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
8515 		err = EIO;
8516 	if (err) {
8517 		IEEE80211_SEND_MGMT(ic, ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
8518 		    IEEE80211_REASON_AUTH_LEAVE);
8519 		ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
8520 		return err;
8521 	}
8522 
8523 	if (k->k_flags & IEEE80211_KEY_GROUP)
8524 		in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
8525 	else
8526 		in->in_flags |= IWX_NODE_FLAG_HAVE_PAIRWISE_KEY;
8527 
8528 	if ((in->in_flags & want_keymask) == want_keymask) {
8529 		DPRINTF(("marking port %s valid\n",
8530 		    ether_sprintf(ni->ni_macaddr)));
8531 		ni->ni_port_valid = 1;
8532 		ieee80211_set_link_state(ic, LINK_STATE_UP);
8533 	}
8534 
8535 	return 0;
8536 }
8537 
8538 void
8539 iwx_setkey_task(void *arg)
8540 {
8541 	struct iwx_softc *sc = arg;
8542 	struct iwx_setkey_task_arg *a;
8543 	int err = 0, s = splnet();
8544 
8545 	while (sc->setkey_nkeys > 0) {
8546 		if (err || (sc->sc_flags & IWX_FLAG_SHUTDOWN))
8547 			break;
8548 		a = &sc->setkey_arg[sc->setkey_tail];
8549 		err = iwx_add_sta_key(sc, a->sta_id, a->ni, a->k);
8550 		a->sta_id = 0;
8551 		a->ni = NULL;
8552 		a->k = NULL;
8553 		sc->setkey_tail = (sc->setkey_tail + 1) %
8554 		    nitems(sc->setkey_arg);
8555 		sc->setkey_nkeys--;
8556 	}
8557 
8558 	refcnt_rele_wake(&sc->task_refs);
8559 	splx(s);
8560 }
8561 
8562 void
8563 iwx_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
8564     struct ieee80211_key *k)
8565 {
8566 	struct iwx_softc *sc = ic->ic_softc;
8567 	struct iwx_add_sta_key_cmd cmd;
8568 
8569 	if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
8570 		/* Fallback to software crypto for other ciphers. */
8571                 ieee80211_delete_key(ic, ni, k);
8572 		return;
8573 	}
8574 
8575 	if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
8576 		return;
8577 
8578 	memset(&cmd, 0, sizeof(cmd));
8579 
8580 	cmd.common.key_flags = htole16(IWX_STA_KEY_NOT_VALID |
8581 	    IWX_STA_KEY_FLG_NO_ENC | IWX_STA_KEY_FLG_WEP_KEY_MAP |
8582 	    ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
8583 	    IWX_STA_KEY_FLG_KEYID_MSK));
8584 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
8585 	if (k->k_flags & IEEE80211_KEY_GROUP)
8586 		cmd.common.key_offset = 1;
8587 	else
8588 		cmd.common.key_offset = 0;
8589 	cmd.common.sta_id = IWX_STATION_ID;
8590 
8591 	iwx_send_cmd_pdu(sc, IWX_ADD_STA_KEY, IWX_CMD_ASYNC, sizeof(cmd), &cmd);
8592 }
8593 
8594 int
8595 iwx_media_change(struct ifnet *ifp)
8596 {
8597 	int err;
8598 
8599 	err = ieee80211_media_change(ifp);
8600 	if (err != ENETRESET)
8601 		return err;
8602 
8603 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
8604 	    (IFF_UP | IFF_RUNNING)) {
8605 		iwx_stop(ifp);
8606 		err = iwx_init(ifp);
8607 	}
8608 	return err;
8609 }
8610 
8611 void
8612 iwx_newstate_task(void *psc)
8613 {
8614 	struct iwx_softc *sc = (struct iwx_softc *)psc;
8615 	struct ieee80211com *ic = &sc->sc_ic;
8616 	enum ieee80211_state nstate = sc->ns_nstate;
8617 	enum ieee80211_state ostate = ic->ic_state;
8618 	int arg = sc->ns_arg;
8619 	int err = 0, s = splnet();
8620 
8621 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
8622 		/* iwx_stop() is waiting for us. */
8623 		refcnt_rele_wake(&sc->task_refs);
8624 		splx(s);
8625 		return;
8626 	}
8627 
8628 	if (ostate == IEEE80211_S_SCAN) {
8629 		if (nstate == ostate) {
8630 			if (sc->sc_flags & IWX_FLAG_SCANNING) {
8631 				refcnt_rele_wake(&sc->task_refs);
8632 				splx(s);
8633 				return;
8634 			}
8635 			/* Firmware is no longer scanning. Do another scan. */
8636 			goto next_scan;
8637 		}
8638 	}
8639 
8640 	if (nstate <= ostate) {
8641 		switch (ostate) {
8642 		case IEEE80211_S_RUN:
8643 			err = iwx_run_stop(sc);
8644 			if (err)
8645 				goto out;
8646 			/* FALLTHROUGH */
8647 		case IEEE80211_S_ASSOC:
8648 		case IEEE80211_S_AUTH:
8649 			if (nstate <= IEEE80211_S_AUTH) {
8650 				err = iwx_deauth(sc);
8651 				if (err)
8652 					goto out;
8653 			}
8654 			/* FALLTHROUGH */
8655 		case IEEE80211_S_SCAN:
8656 		case IEEE80211_S_INIT:
8657 			break;
8658 		}
8659 
8660 		/* Die now if iwx_stop() was called while we were sleeping. */
8661 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
8662 			refcnt_rele_wake(&sc->task_refs);
8663 			splx(s);
8664 			return;
8665 		}
8666 	}
8667 
8668 	switch (nstate) {
8669 	case IEEE80211_S_INIT:
8670 		break;
8671 
8672 	case IEEE80211_S_SCAN:
8673 next_scan:
8674 		err = iwx_scan(sc);
8675 		if (err)
8676 			break;
8677 		refcnt_rele_wake(&sc->task_refs);
8678 		splx(s);
8679 		return;
8680 
8681 	case IEEE80211_S_AUTH:
8682 		err = iwx_auth(sc);
8683 		break;
8684 
8685 	case IEEE80211_S_ASSOC:
8686 		break;
8687 
8688 	case IEEE80211_S_RUN:
8689 		err = iwx_run(sc);
8690 		break;
8691 	}
8692 
8693 out:
8694 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
8695 		if (err)
8696 			task_add(systq, &sc->init_task);
8697 		else
8698 			sc->sc_newstate(ic, nstate, arg);
8699 	}
8700 	refcnt_rele_wake(&sc->task_refs);
8701 	splx(s);
8702 }
8703 
8704 int
8705 iwx_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
8706 {
8707 	struct ifnet *ifp = IC2IFP(ic);
8708 	struct iwx_softc *sc = ifp->if_softc;
8709 
8710 	/*
8711 	 * Prevent attempts to transition towards the same state, unless
8712 	 * we are scanning in which case a SCAN -> SCAN transition
8713 	 * triggers another scan iteration. And AUTH -> AUTH is needed
8714 	 * to support band-steering.
8715 	 */
8716 	if (sc->ns_nstate == nstate && nstate != IEEE80211_S_SCAN &&
8717 	    nstate != IEEE80211_S_AUTH)
8718 		return 0;
8719 
8720 	if (ic->ic_state == IEEE80211_S_RUN) {
8721 		iwx_del_task(sc, systq, &sc->ba_task);
8722 		iwx_del_task(sc, systq, &sc->setkey_task);
8723 		memset(sc->setkey_arg, 0, sizeof(sc->setkey_arg));
8724 		sc->setkey_cur = sc->setkey_tail = sc->setkey_nkeys = 0;
8725 		iwx_del_task(sc, systq, &sc->mac_ctxt_task);
8726 		iwx_del_task(sc, systq, &sc->phy_ctxt_task);
8727 		iwx_del_task(sc, systq, &sc->bgscan_done_task);
8728 	}
8729 
8730 	sc->ns_nstate = nstate;
8731 	sc->ns_arg = arg;
8732 
8733 	iwx_add_task(sc, sc->sc_nswq, &sc->newstate_task);
8734 
8735 	return 0;
8736 }
8737 
8738 void
8739 iwx_endscan(struct iwx_softc *sc)
8740 {
8741 	struct ieee80211com *ic = &sc->sc_ic;
8742 
8743 	if ((sc->sc_flags & (IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN)) == 0)
8744 		return;
8745 
8746 	sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
8747 	ieee80211_end_scan(&ic->ic_if);
8748 }
8749 
8750 /*
8751  * Aging and idle timeouts for the different possible scenarios
8752  * in default configuration
8753  */
8754 static const uint32_t
8755 iwx_sf_full_timeout_def[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
8756 	{
8757 		htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
8758 		htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
8759 	},
8760 	{
8761 		htole32(IWX_SF_AGG_UNICAST_AGING_TIMER_DEF),
8762 		htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF)
8763 	},
8764 	{
8765 		htole32(IWX_SF_MCAST_AGING_TIMER_DEF),
8766 		htole32(IWX_SF_MCAST_IDLE_TIMER_DEF)
8767 	},
8768 	{
8769 		htole32(IWX_SF_BA_AGING_TIMER_DEF),
8770 		htole32(IWX_SF_BA_IDLE_TIMER_DEF)
8771 	},
8772 	{
8773 		htole32(IWX_SF_TX_RE_AGING_TIMER_DEF),
8774 		htole32(IWX_SF_TX_RE_IDLE_TIMER_DEF)
8775 	},
8776 };
8777 
8778 /*
8779  * Aging and idle timeouts for the different possible scenarios
8780  * in single BSS MAC configuration.
8781  */
8782 static const uint32_t
8783 iwx_sf_full_timeout[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
8784 	{
8785 		htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER),
8786 		htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER)
8787 	},
8788 	{
8789 		htole32(IWX_SF_AGG_UNICAST_AGING_TIMER),
8790 		htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER)
8791 	},
8792 	{
8793 		htole32(IWX_SF_MCAST_AGING_TIMER),
8794 		htole32(IWX_SF_MCAST_IDLE_TIMER)
8795 	},
8796 	{
8797 		htole32(IWX_SF_BA_AGING_TIMER),
8798 		htole32(IWX_SF_BA_IDLE_TIMER)
8799 	},
8800 	{
8801 		htole32(IWX_SF_TX_RE_AGING_TIMER),
8802 		htole32(IWX_SF_TX_RE_IDLE_TIMER)
8803 	},
8804 };
8805 
8806 void
8807 iwx_fill_sf_command(struct iwx_softc *sc, struct iwx_sf_cfg_cmd *sf_cmd,
8808     struct ieee80211_node *ni)
8809 {
8810 	int i, j, watermark;
8811 
8812 	sf_cmd->watermark[IWX_SF_LONG_DELAY_ON] = htole32(IWX_SF_W_MARK_SCAN);
8813 
8814 	/*
8815 	 * If we are in association flow - check antenna configuration
8816 	 * capabilities of the AP station, and choose the watermark accordingly.
8817 	 */
8818 	if (ni) {
8819 		if (ni->ni_flags & IEEE80211_NODE_HT) {
8820 			if (ni->ni_rxmcs[1] != 0)
8821 				watermark = IWX_SF_W_MARK_MIMO2;
8822 			else
8823 				watermark = IWX_SF_W_MARK_SISO;
8824 		} else {
8825 			watermark = IWX_SF_W_MARK_LEGACY;
8826 		}
8827 	/* default watermark value for unassociated mode. */
8828 	} else {
8829 		watermark = IWX_SF_W_MARK_MIMO2;
8830 	}
8831 	sf_cmd->watermark[IWX_SF_FULL_ON] = htole32(watermark);
8832 
8833 	for (i = 0; i < IWX_SF_NUM_SCENARIO; i++) {
8834 		for (j = 0; j < IWX_SF_NUM_TIMEOUT_TYPES; j++) {
8835 			sf_cmd->long_delay_timeouts[i][j] =
8836 					htole32(IWX_SF_LONG_DELAY_AGING_TIMER);
8837 		}
8838 	}
8839 
8840 	if (ni) {
8841 		memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout,
8842 		       sizeof(iwx_sf_full_timeout));
8843 	} else {
8844 		memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout_def,
8845 		       sizeof(iwx_sf_full_timeout_def));
8846 	}
8847 
8848 }
8849 
8850 int
8851 iwx_sf_config(struct iwx_softc *sc, int new_state)
8852 {
8853 	struct ieee80211com *ic = &sc->sc_ic;
8854 	struct iwx_sf_cfg_cmd sf_cmd = {
8855 		.state = htole32(new_state),
8856 	};
8857 	int err = 0;
8858 
8859 	switch (new_state) {
8860 	case IWX_SF_UNINIT:
8861 	case IWX_SF_INIT_OFF:
8862 		iwx_fill_sf_command(sc, &sf_cmd, NULL);
8863 		break;
8864 	case IWX_SF_FULL_ON:
8865 		iwx_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
8866 		break;
8867 	default:
8868 		return EINVAL;
8869 	}
8870 
8871 	err = iwx_send_cmd_pdu(sc, IWX_REPLY_SF_CFG_CMD, IWX_CMD_ASYNC,
8872 				   sizeof(sf_cmd), &sf_cmd);
8873 	return err;
8874 }
8875 
8876 int
8877 iwx_send_bt_init_conf(struct iwx_softc *sc)
8878 {
8879 	struct iwx_bt_coex_cmd bt_cmd;
8880 
8881 	bt_cmd.mode = htole32(IWX_BT_COEX_WIFI);
8882 	bt_cmd.enabled_modules = 0;
8883 
8884 	return iwx_send_cmd_pdu(sc, IWX_BT_CONFIG, 0, sizeof(bt_cmd),
8885 	    &bt_cmd);
8886 }
8887 
8888 int
8889 iwx_send_soc_conf(struct iwx_softc *sc)
8890 {
8891 	struct iwx_soc_configuration_cmd cmd;
8892 	int err;
8893 	uint32_t cmd_id, flags = 0;
8894 
8895 	memset(&cmd, 0, sizeof(cmd));
8896 
8897 	/*
8898 	 * In VER_1 of this command, the discrete value is considered
8899 	 * an integer; In VER_2, it's a bitmask.  Since we have only 2
8900 	 * values in VER_1, this is backwards-compatible with VER_2,
8901 	 * as long as we don't set any other flag bits.
8902 	 */
8903 	if (!sc->sc_integrated) { /* VER_1 */
8904 		flags = IWX_SOC_CONFIG_CMD_FLAGS_DISCRETE;
8905 	} else { /* VER_2 */
8906 		uint8_t scan_cmd_ver;
8907 		if (sc->sc_ltr_delay != IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
8908 			flags |= (sc->sc_ltr_delay &
8909 			    IWX_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
8910 		scan_cmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP,
8911 		    IWX_SCAN_REQ_UMAC);
8912 		if (scan_cmd_ver != IWX_FW_CMD_VER_UNKNOWN &&
8913 		    scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
8914 			flags |= IWX_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
8915 	}
8916 	cmd.flags = htole32(flags);
8917 
8918 	cmd.latency = htole32(sc->sc_xtal_latency);
8919 
8920 	cmd_id = iwx_cmd_id(IWX_SOC_CONFIGURATION_CMD, IWX_SYSTEM_GROUP, 0);
8921 	err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
8922 	if (err)
8923 		printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
8924 	return err;
8925 }
8926 
8927 int
8928 iwx_send_update_mcc_cmd(struct iwx_softc *sc, const char *alpha2)
8929 {
8930 	struct iwx_mcc_update_cmd mcc_cmd;
8931 	struct iwx_host_cmd hcmd = {
8932 		.id = IWX_MCC_UPDATE_CMD,
8933 		.flags = IWX_CMD_WANT_RESP,
8934 		.data = { &mcc_cmd },
8935 	};
8936 	struct iwx_rx_packet *pkt;
8937 	struct iwx_mcc_update_resp *resp;
8938 	size_t resp_len;
8939 	int err;
8940 
8941 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
8942 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
8943 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
8944 	    isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC))
8945 		mcc_cmd.source_id = IWX_MCC_SOURCE_GET_CURRENT;
8946 	else
8947 		mcc_cmd.source_id = IWX_MCC_SOURCE_OLD_FW;
8948 
8949 	hcmd.len[0] = sizeof(struct iwx_mcc_update_cmd);
8950 	hcmd.resp_pkt_len = IWX_CMD_RESP_MAX;
8951 
8952 	err = iwx_send_cmd(sc, &hcmd);
8953 	if (err)
8954 		return err;
8955 
8956 	pkt = hcmd.resp_pkt;
8957 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
8958 		err = EIO;
8959 		goto out;
8960 	}
8961 
8962 	resp_len = iwx_rx_packet_payload_len(pkt);
8963 	if (resp_len < sizeof(*resp)) {
8964 		err = EIO;
8965 		goto out;
8966 	}
8967 
8968 	resp = (void *)pkt->data;
8969 	if (resp_len != sizeof(*resp) +
8970 	    resp->n_channels * sizeof(resp->channels[0])) {
8971 		err = EIO;
8972 		goto out;
8973 	}
8974 
8975 	DPRINTF(("MCC status=0x%x mcc=0x%x cap=0x%x time=0x%x geo_info=0x%x source_id=0x%d n_channels=%u\n",
8976 	    resp->status, resp->mcc, resp->cap, resp->time, resp->geo_info, resp->source_id, resp->n_channels));
8977 
8978 	/* Update channel map for net80211 and our scan configuration. */
8979 	iwx_init_channel_map(sc, NULL, resp->channels, resp->n_channels);
8980 
8981 out:
8982 	iwx_free_resp(sc, &hcmd);
8983 
8984 	return err;
8985 }
8986 
8987 int
8988 iwx_send_temp_report_ths_cmd(struct iwx_softc *sc)
8989 {
8990 	struct iwx_temp_report_ths_cmd cmd;
8991 	int err;
8992 
8993 	/*
8994 	 * In order to give responsibility for critical-temperature-kill
8995 	 * and TX backoff to FW we need to send an empty temperature
8996 	 * reporting command at init time.
8997 	 */
8998 	memset(&cmd, 0, sizeof(cmd));
8999 
9000 	err = iwx_send_cmd_pdu(sc,
9001 	    IWX_WIDE_ID(IWX_PHY_OPS_GROUP, IWX_TEMP_REPORTING_THRESHOLDS_CMD),
9002 	    0, sizeof(cmd), &cmd);
9003 	if (err)
9004 		printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
9005 		    DEVNAME(sc), err);
9006 
9007 	return err;
9008 }
9009 
9010 int
9011 iwx_init_hw(struct iwx_softc *sc)
9012 {
9013 	struct ieee80211com *ic = &sc->sc_ic;
9014 	int err, i;
9015 
9016 	err = iwx_run_init_mvm_ucode(sc, 0);
9017 	if (err)
9018 		return err;
9019 
9020 	if (!iwx_nic_lock(sc))
9021 		return EBUSY;
9022 
9023 	err = iwx_send_tx_ant_cfg(sc, iwx_fw_valid_tx_ant(sc));
9024 	if (err) {
9025 		printf("%s: could not init tx ant config (error %d)\n",
9026 		    DEVNAME(sc), err);
9027 		goto err;
9028 	}
9029 
9030 	if (sc->sc_tx_with_siso_diversity) {
9031 		err = iwx_send_phy_cfg_cmd(sc);
9032 		if (err) {
9033 			printf("%s: could not send phy config (error %d)\n",
9034 			    DEVNAME(sc), err);
9035 			goto err;
9036 		}
9037 	}
9038 
9039 	err = iwx_send_bt_init_conf(sc);
9040 	if (err) {
9041 		printf("%s: could not init bt coex (error %d)\n",
9042 		    DEVNAME(sc), err);
9043 		return err;
9044 	}
9045 
9046 	err = iwx_send_soc_conf(sc);
9047 	if (err)
9048 		return err;
9049 
9050 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DQA_SUPPORT)) {
9051 		err = iwx_send_dqa_cmd(sc);
9052 		if (err)
9053 			return err;
9054 	}
9055 
9056 	for (i = 0; i < IWX_NUM_PHY_CTX; i++) {
9057 		/*
9058 		 * The channel used here isn't relevant as it's
9059 		 * going to be overwritten in the other flows.
9060 		 * For now use the first channel we have.
9061 		 */
9062 		sc->sc_phyctxt[i].id = i;
9063 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
9064 		err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
9065 		    IWX_FW_CTXT_ACTION_ADD, 0, IEEE80211_HTOP0_SCO_SCN,
9066 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
9067 		if (err) {
9068 			printf("%s: could not add phy context %d (error %d)\n",
9069 			    DEVNAME(sc), i, err);
9070 			goto err;
9071 		}
9072 		if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
9073 		    IWX_RLC_CONFIG_CMD) == 2) {
9074 			err = iwx_phy_send_rlc(sc, &sc->sc_phyctxt[i], 1, 1);
9075 			if (err) {
9076 				printf("%s: could not configure RLC for PHY "
9077 				    "%d (error %d)\n", DEVNAME(sc), i, err);
9078 				goto err;
9079 			}
9080 		}
9081 	}
9082 
9083 	err = iwx_config_ltr(sc);
9084 	if (err) {
9085 		printf("%s: PCIe LTR configuration failed (error %d)\n",
9086 		    DEVNAME(sc), err);
9087 	}
9088 
9089 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
9090 		err = iwx_send_temp_report_ths_cmd(sc);
9091 		if (err)
9092 			goto err;
9093 	}
9094 
9095 	err = iwx_power_update_device(sc);
9096 	if (err) {
9097 		printf("%s: could not send power command (error %d)\n",
9098 		    DEVNAME(sc), err);
9099 		goto err;
9100 	}
9101 
9102 	if (sc->sc_nvm.lar_enabled) {
9103 		err = iwx_send_update_mcc_cmd(sc, "ZZ");
9104 		if (err) {
9105 			printf("%s: could not init LAR (error %d)\n",
9106 			    DEVNAME(sc), err);
9107 			goto err;
9108 		}
9109 	}
9110 
9111 	err = iwx_config_umac_scan_reduced(sc);
9112 	if (err) {
9113 		printf("%s: could not configure scan (error %d)\n",
9114 		    DEVNAME(sc), err);
9115 		goto err;
9116 	}
9117 
9118 	err = iwx_disable_beacon_filter(sc);
9119 	if (err) {
9120 		printf("%s: could not disable beacon filter (error %d)\n",
9121 		    DEVNAME(sc), err);
9122 		goto err;
9123 	}
9124 
9125 err:
9126 	iwx_nic_unlock(sc);
9127 	return err;
9128 }
9129 
9130 /* Allow multicast from our BSSID. */
9131 int
9132 iwx_allow_mcast(struct iwx_softc *sc)
9133 {
9134 	struct ieee80211com *ic = &sc->sc_ic;
9135 	struct iwx_node *in = (void *)ic->ic_bss;
9136 	struct iwx_mcast_filter_cmd *cmd;
9137 	size_t size;
9138 	int err;
9139 
9140 	size = roundup(sizeof(*cmd), 4);
9141 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
9142 	if (cmd == NULL)
9143 		return ENOMEM;
9144 	cmd->filter_own = 1;
9145 	cmd->port_id = 0;
9146 	cmd->count = 0;
9147 	cmd->pass_all = 1;
9148 	IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr);
9149 
9150 	err = iwx_send_cmd_pdu(sc, IWX_MCAST_FILTER_CMD,
9151 	    0, size, cmd);
9152 	free(cmd, M_DEVBUF, size);
9153 	return err;
9154 }
9155 
9156 int
9157 iwx_init(struct ifnet *ifp)
9158 {
9159 	struct iwx_softc *sc = ifp->if_softc;
9160 	struct ieee80211com *ic = &sc->sc_ic;
9161 	int err, generation;
9162 
9163 	rw_assert_wrlock(&sc->ioctl_rwl);
9164 
9165 	generation = ++sc->sc_generation;
9166 
9167 	err = iwx_preinit(sc);
9168 	if (err)
9169 		return err;
9170 
9171 	err = iwx_start_hw(sc);
9172 	if (err) {
9173 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
9174 		return err;
9175 	}
9176 
9177 	err = iwx_init_hw(sc);
9178 	if (err) {
9179 		if (generation == sc->sc_generation)
9180 			iwx_stop_device(sc);
9181 		return err;
9182 	}
9183 
9184 	if (sc->sc_nvm.sku_cap_11n_enable)
9185 		iwx_setup_ht_rates(sc);
9186 	if (sc->sc_nvm.sku_cap_11ac_enable)
9187 		iwx_setup_vht_rates(sc);
9188 
9189 	KASSERT(sc->task_refs.r_refs == 0);
9190 	refcnt_init(&sc->task_refs);
9191 	ifq_clr_oactive(&ifp->if_snd);
9192 	ifp->if_flags |= IFF_RUNNING;
9193 
9194 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
9195 		ic->ic_bss->ni_chan = ic->ic_ibss_chan;
9196 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
9197 		return 0;
9198 	}
9199 
9200 	ieee80211_begin_scan(ifp);
9201 
9202 	/*
9203 	 * ieee80211_begin_scan() ends up scheduling iwx_newstate_task().
9204 	 * Wait until the transition to SCAN state has completed.
9205 	 */
9206 	do {
9207 		err = tsleep_nsec(&ic->ic_state, PCATCH, "iwxinit",
9208 		    SEC_TO_NSEC(1));
9209 		if (generation != sc->sc_generation)
9210 			return ENXIO;
9211 		if (err) {
9212 			iwx_stop(ifp);
9213 			return err;
9214 		}
9215 	} while (ic->ic_state != IEEE80211_S_SCAN);
9216 
9217 	return 0;
9218 }
9219 
9220 void
9221 iwx_start(struct ifnet *ifp)
9222 {
9223 	struct iwx_softc *sc = ifp->if_softc;
9224 	struct ieee80211com *ic = &sc->sc_ic;
9225 	struct ieee80211_node *ni;
9226 	struct ether_header *eh;
9227 	struct mbuf *m;
9228 
9229 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
9230 		return;
9231 
9232 	for (;;) {
9233 		/* why isn't this done per-queue? */
9234 		if (sc->qfullmsk != 0) {
9235 			ifq_set_oactive(&ifp->if_snd);
9236 			break;
9237 		}
9238 
9239 		/* Don't queue additional frames while flushing Tx queues. */
9240 		if (sc->sc_flags & IWX_FLAG_TXFLUSH)
9241 			break;
9242 
9243 		/* need to send management frames even if we're not RUNning */
9244 		m = mq_dequeue(&ic->ic_mgtq);
9245 		if (m) {
9246 			ni = m->m_pkthdr.ph_cookie;
9247 			goto sendit;
9248 		}
9249 
9250 		if (ic->ic_state != IEEE80211_S_RUN ||
9251 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
9252 			break;
9253 
9254 		m = ifq_dequeue(&ifp->if_snd);
9255 		if (!m)
9256 			break;
9257 		if (m->m_len < sizeof (*eh) &&
9258 		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
9259 			ifp->if_oerrors++;
9260 			continue;
9261 		}
9262 #if NBPFILTER > 0
9263 		if (ifp->if_bpf != NULL)
9264 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
9265 #endif
9266 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
9267 			ifp->if_oerrors++;
9268 			continue;
9269 		}
9270 
9271  sendit:
9272 #if NBPFILTER > 0
9273 		if (ic->ic_rawbpf != NULL)
9274 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
9275 #endif
9276 		if (iwx_tx(sc, m, ni) != 0) {
9277 			ieee80211_release_node(ic, ni);
9278 			ifp->if_oerrors++;
9279 			continue;
9280 		}
9281 
9282 		if (ifp->if_flags & IFF_UP)
9283 			ifp->if_timer = 1;
9284 	}
9285 
9286 	return;
9287 }
9288 
9289 void
9290 iwx_stop(struct ifnet *ifp)
9291 {
9292 	struct iwx_softc *sc = ifp->if_softc;
9293 	struct ieee80211com *ic = &sc->sc_ic;
9294 	struct iwx_node *in = (void *)ic->ic_bss;
9295 	int i, s = splnet();
9296 
9297 	rw_assert_wrlock(&sc->ioctl_rwl);
9298 
9299 	sc->sc_flags |= IWX_FLAG_SHUTDOWN; /* Disallow new tasks. */
9300 
9301 	/* Cancel scheduled tasks and let any stale tasks finish up. */
9302 	task_del(systq, &sc->init_task);
9303 	iwx_del_task(sc, sc->sc_nswq, &sc->newstate_task);
9304 	iwx_del_task(sc, systq, &sc->ba_task);
9305 	iwx_del_task(sc, systq, &sc->setkey_task);
9306 	memset(sc->setkey_arg, 0, sizeof(sc->setkey_arg));
9307 	sc->setkey_cur = sc->setkey_tail = sc->setkey_nkeys = 0;
9308 	iwx_del_task(sc, systq, &sc->mac_ctxt_task);
9309 	iwx_del_task(sc, systq, &sc->phy_ctxt_task);
9310 	iwx_del_task(sc, systq, &sc->bgscan_done_task);
9311 	KASSERT(sc->task_refs.r_refs >= 1);
9312 	refcnt_finalize(&sc->task_refs, "iwxstop");
9313 
9314 	iwx_stop_device(sc);
9315 
9316 	free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
9317 	sc->bgscan_unref_arg = NULL;
9318 	sc->bgscan_unref_arg_size = 0;
9319 
9320 	/* Reset soft state. */
9321 
9322 	sc->sc_generation++;
9323 	for (i = 0; i < nitems(sc->sc_cmd_resp_pkt); i++) {
9324 		free(sc->sc_cmd_resp_pkt[i], M_DEVBUF, sc->sc_cmd_resp_len[i]);
9325 		sc->sc_cmd_resp_pkt[i] = NULL;
9326 		sc->sc_cmd_resp_len[i] = 0;
9327 	}
9328 	ifp->if_flags &= ~IFF_RUNNING;
9329 	ifq_clr_oactive(&ifp->if_snd);
9330 
9331 	in->in_phyctxt = NULL;
9332 	in->in_flags = 0;
9333 	IEEE80211_ADDR_COPY(in->in_macaddr, etheranyaddr);
9334 
9335 	sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
9336 	sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
9337 	sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
9338 	sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
9339 	sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
9340 	sc->sc_flags &= ~IWX_FLAG_HW_ERR;
9341 	sc->sc_flags &= ~IWX_FLAG_SHUTDOWN;
9342 	sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
9343 
9344 	sc->sc_rx_ba_sessions = 0;
9345 	sc->ba_rx.start_tidmask = 0;
9346 	sc->ba_rx.stop_tidmask = 0;
9347 	memset(sc->aggqid, 0, sizeof(sc->aggqid));
9348 	sc->ba_tx.start_tidmask = 0;
9349 	sc->ba_tx.stop_tidmask = 0;
9350 
9351 	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
9352 	sc->ns_nstate = IEEE80211_S_INIT;
9353 
9354 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
9355 		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
9356 		iwx_clear_reorder_buffer(sc, rxba);
9357 	}
9358 	memset(sc->sc_tx_timer, 0, sizeof(sc->sc_tx_timer));
9359 	ifp->if_timer = 0;
9360 
9361 	splx(s);
9362 }
9363 
9364 void
9365 iwx_watchdog(struct ifnet *ifp)
9366 {
9367 	struct iwx_softc *sc = ifp->if_softc;
9368 	int i;
9369 
9370 	ifp->if_timer = 0;
9371 
9372 	/*
9373 	 * We maintain a separate timer for each Tx queue because
9374 	 * Tx aggregation queues can get "stuck" while other queues
9375 	 * keep working. The Linux driver uses a similar workaround.
9376 	 */
9377 	for (i = 0; i < nitems(sc->sc_tx_timer); i++) {
9378 		if (sc->sc_tx_timer[i] > 0) {
9379 			if (--sc->sc_tx_timer[i] == 0) {
9380 				printf("%s: device timeout\n", DEVNAME(sc));
9381 				if (ifp->if_flags & IFF_DEBUG) {
9382 					iwx_nic_error(sc);
9383 					iwx_dump_driver_status(sc);
9384 				}
9385 				if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
9386 					task_add(systq, &sc->init_task);
9387 				ifp->if_oerrors++;
9388 				return;
9389 			}
9390 			ifp->if_timer = 1;
9391 		}
9392 	}
9393 
9394 	ieee80211_watchdog(ifp);
9395 }
9396 
9397 int
9398 iwx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
9399 {
9400 	struct iwx_softc *sc = ifp->if_softc;
9401 	int s, err = 0, generation = sc->sc_generation;
9402 
9403 	/*
9404 	 * Prevent processes from entering this function while another
9405 	 * process is tsleep'ing in it.
9406 	 */
9407 	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
9408 	if (err == 0 && generation != sc->sc_generation) {
9409 		rw_exit(&sc->ioctl_rwl);
9410 		return ENXIO;
9411 	}
9412 	if (err)
9413 		return err;
9414 	s = splnet();
9415 
9416 	switch (cmd) {
9417 	case SIOCSIFADDR:
9418 		ifp->if_flags |= IFF_UP;
9419 		/* FALLTHROUGH */
9420 	case SIOCSIFFLAGS:
9421 		if (ifp->if_flags & IFF_UP) {
9422 			if (!(ifp->if_flags & IFF_RUNNING)) {
9423 				/* Force reload of firmware image from disk. */
9424 				sc->sc_fw.fw_status = IWX_FW_STATUS_NONE;
9425 				err = iwx_init(ifp);
9426 			}
9427 		} else {
9428 			if (ifp->if_flags & IFF_RUNNING)
9429 				iwx_stop(ifp);
9430 		}
9431 		break;
9432 
9433 	default:
9434 		err = ieee80211_ioctl(ifp, cmd, data);
9435 	}
9436 
9437 	if (err == ENETRESET) {
9438 		err = 0;
9439 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
9440 		    (IFF_UP | IFF_RUNNING)) {
9441 			iwx_stop(ifp);
9442 			err = iwx_init(ifp);
9443 		}
9444 	}
9445 
9446 	splx(s);
9447 	rw_exit(&sc->ioctl_rwl);
9448 
9449 	return err;
9450 }
9451 
9452 /*
9453  * Note: This structure is read from the device with IO accesses,
9454  * and the reading already does the endian conversion. As it is
9455  * read with uint32_t-sized accesses, any members with a different size
9456  * need to be ordered correctly though!
9457  */
9458 struct iwx_error_event_table {
9459 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
9460 	uint32_t error_id;		/* type of error */
9461 	uint32_t trm_hw_status0;	/* TRM HW status */
9462 	uint32_t trm_hw_status1;	/* TRM HW status */
9463 	uint32_t blink2;		/* branch link */
9464 	uint32_t ilink1;		/* interrupt link */
9465 	uint32_t ilink2;		/* interrupt link */
9466 	uint32_t data1;		/* error-specific data */
9467 	uint32_t data2;		/* error-specific data */
9468 	uint32_t data3;		/* error-specific data */
9469 	uint32_t bcon_time;		/* beacon timer */
9470 	uint32_t tsf_low;		/* network timestamp function timer */
9471 	uint32_t tsf_hi;		/* network timestamp function timer */
9472 	uint32_t gp1;		/* GP1 timer register */
9473 	uint32_t gp2;		/* GP2 timer register */
9474 	uint32_t fw_rev_type;	/* firmware revision type */
9475 	uint32_t major;		/* uCode version major */
9476 	uint32_t minor;		/* uCode version minor */
9477 	uint32_t hw_ver;		/* HW Silicon version */
9478 	uint32_t brd_ver;		/* HW board version */
9479 	uint32_t log_pc;		/* log program counter */
9480 	uint32_t frame_ptr;		/* frame pointer */
9481 	uint32_t stack_ptr;		/* stack pointer */
9482 	uint32_t hcmd;		/* last host command header */
9483 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
9484 				 * rxtx_flag */
9485 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
9486 				 * host_flag */
9487 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
9488 				 * enc_flag */
9489 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
9490 				 * time_flag */
9491 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
9492 				 * wico interrupt */
9493 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
9494 	uint32_t wait_event;		/* wait event() caller address */
9495 	uint32_t l2p_control;	/* L2pControlField */
9496 	uint32_t l2p_duration;	/* L2pDurationField */
9497 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
9498 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
9499 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
9500 				 * (LMPM_PMG_SEL) */
9501 	uint32_t u_timestamp;	/* indicate when the date and time of the
9502 				 * compilation */
9503 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
9504 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
9505 
9506 /*
9507  * UMAC error struct - relevant starting from family 8000 chip.
9508  * Note: This structure is read from the device with IO accesses,
9509  * and the reading already does the endian conversion. As it is
9510  * read with u32-sized accesses, any members with a different size
9511  * need to be ordered correctly though!
9512  */
9513 struct iwx_umac_error_event_table {
9514 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
9515 	uint32_t error_id;	/* type of error */
9516 	uint32_t blink1;	/* branch link */
9517 	uint32_t blink2;	/* branch link */
9518 	uint32_t ilink1;	/* interrupt link */
9519 	uint32_t ilink2;	/* interrupt link */
9520 	uint32_t data1;		/* error-specific data */
9521 	uint32_t data2;		/* error-specific data */
9522 	uint32_t data3;		/* error-specific data */
9523 	uint32_t umac_major;
9524 	uint32_t umac_minor;
9525 	uint32_t frame_pointer;	/* core register 27*/
9526 	uint32_t stack_pointer;	/* core register 28 */
9527 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
9528 	uint32_t nic_isr_pref;	/* ISR status register */
9529 } __packed;
9530 
9531 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
9532 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
9533 
9534 void
9535 iwx_nic_umac_error(struct iwx_softc *sc)
9536 {
9537 	struct iwx_umac_error_event_table table;
9538 	uint32_t base;
9539 
9540 	base = sc->sc_uc.uc_umac_error_event_table;
9541 
9542 	if (base < 0x400000) {
9543 		printf("%s: Invalid error log pointer 0x%08x\n",
9544 		    DEVNAME(sc), base);
9545 		return;
9546 	}
9547 
9548 	if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
9549 		printf("%s: reading errlog failed\n", DEVNAME(sc));
9550 		return;
9551 	}
9552 
9553 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
9554 		printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
9555 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
9556 			sc->sc_flags, table.valid);
9557 	}
9558 
9559 	printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
9560 		iwx_desc_lookup(table.error_id));
9561 	printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
9562 	printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
9563 	printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
9564 	printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
9565 	printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
9566 	printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
9567 	printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
9568 	printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
9569 	printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
9570 	printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
9571 	    table.frame_pointer);
9572 	printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
9573 	    table.stack_pointer);
9574 	printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
9575 	printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
9576 	    table.nic_isr_pref);
9577 }
9578 
9579 #define IWX_FW_SYSASSERT_CPU_MASK 0xf0000000
9580 static struct {
9581 	const char *name;
9582 	uint8_t num;
9583 } advanced_lookup[] = {
9584 	{ "NMI_INTERRUPT_WDG", 0x34 },
9585 	{ "SYSASSERT", 0x35 },
9586 	{ "UCODE_VERSION_MISMATCH", 0x37 },
9587 	{ "BAD_COMMAND", 0x38 },
9588 	{ "BAD_COMMAND", 0x39 },
9589 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
9590 	{ "FATAL_ERROR", 0x3D },
9591 	{ "NMI_TRM_HW_ERR", 0x46 },
9592 	{ "NMI_INTERRUPT_TRM", 0x4C },
9593 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
9594 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
9595 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
9596 	{ "NMI_INTERRUPT_HOST", 0x66 },
9597 	{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
9598 	{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
9599 	{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
9600 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
9601 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
9602 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
9603 	{ "ADVANCED_SYSASSERT", 0 },
9604 };
9605 
9606 const char *
9607 iwx_desc_lookup(uint32_t num)
9608 {
9609 	int i;
9610 
9611 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
9612 		if (advanced_lookup[i].num ==
9613 		    (num & ~IWX_FW_SYSASSERT_CPU_MASK))
9614 			return advanced_lookup[i].name;
9615 
9616 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
9617 	return advanced_lookup[i].name;
9618 }
9619 
9620 /*
9621  * Support for dumping the error log seemed like a good idea ...
9622  * but it's mostly hex junk and the only sensible thing is the
9623  * hw/ucode revision (which we know anyway).  Since it's here,
9624  * I'll just leave it in, just in case e.g. the Intel guys want to
9625  * help us decipher some "ADVANCED_SYSASSERT" later.
9626  */
9627 void
9628 iwx_nic_error(struct iwx_softc *sc)
9629 {
9630 	struct iwx_error_event_table table;
9631 	uint32_t base;
9632 
9633 	printf("%s: dumping device error log\n", DEVNAME(sc));
9634 	base = sc->sc_uc.uc_lmac_error_event_table[0];
9635 	if (base < 0x400000) {
9636 		printf("%s: Invalid error log pointer 0x%08x\n",
9637 		    DEVNAME(sc), base);
9638 		return;
9639 	}
9640 
9641 	if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
9642 		printf("%s: reading errlog failed\n", DEVNAME(sc));
9643 		return;
9644 	}
9645 
9646 	if (!table.valid) {
9647 		printf("%s: errlog not found, skipping\n", DEVNAME(sc));
9648 		return;
9649 	}
9650 
9651 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
9652 		printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
9653 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
9654 		    sc->sc_flags, table.valid);
9655 	}
9656 
9657 	printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
9658 	    iwx_desc_lookup(table.error_id));
9659 	printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
9660 	    table.trm_hw_status0);
9661 	printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
9662 	    table.trm_hw_status1);
9663 	printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
9664 	printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
9665 	printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
9666 	printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
9667 	printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
9668 	printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
9669 	printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
9670 	printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
9671 	printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
9672 	printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
9673 	printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
9674 	printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
9675 	    table.fw_rev_type);
9676 	printf("%s: %08X | uCode version major\n", DEVNAME(sc),
9677 	    table.major);
9678 	printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
9679 	    table.minor);
9680 	printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
9681 	printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
9682 	printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
9683 	printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
9684 	printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
9685 	printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
9686 	printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
9687 	printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
9688 	printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
9689 	printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
9690 	printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
9691 	printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
9692 	printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
9693 	printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
9694 	printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
9695 	printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
9696 	printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
9697 
9698 	if (sc->sc_uc.uc_umac_error_event_table)
9699 		iwx_nic_umac_error(sc);
9700 }
9701 
9702 void
9703 iwx_dump_driver_status(struct iwx_softc *sc)
9704 {
9705 	int i;
9706 
9707 	printf("driver status:\n");
9708 	for (i = 0; i < nitems(sc->txq); i++) {
9709 		struct iwx_tx_ring *ring = &sc->txq[i];
9710 		printf("  tx ring %2d: qid=%-2d cur=%-3d "
9711 		    "cur_hw=%-3d queued=%-3d\n",
9712 		    i, ring->qid, ring->cur, ring->cur_hw,
9713 		    ring->queued);
9714 	}
9715 	printf("  rx ring: cur=%d\n", sc->rxq.cur);
9716 	printf("  802.11 state %s\n",
9717 	    ieee80211_state_name[sc->sc_ic.ic_state]);
9718 }
9719 
9720 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
9721 do {									\
9722 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
9723 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
9724 	_var_ = (void *)((_pkt_)+1);					\
9725 } while (/*CONSTCOND*/0)
9726 
9727 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
9728 do {									\
9729 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
9730 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
9731 	_ptr_ = (void *)((_pkt_)+1);					\
9732 } while (/*CONSTCOND*/0)
9733 
9734 int
9735 iwx_rx_pkt_valid(struct iwx_rx_packet *pkt)
9736 {
9737 	int qid, idx, code;
9738 
9739 	qid = pkt->hdr.qid & ~0x80;
9740 	idx = pkt->hdr.idx;
9741 	code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
9742 
9743 	return (!(qid == 0 && idx == 0 && code == 0) &&
9744 	    pkt->len_n_flags != htole32(IWX_FH_RSCSR_FRAME_INVALID));
9745 }
9746 
9747 void
9748 iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf_list *ml)
9749 {
9750 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
9751 	struct iwx_rx_packet *pkt, *nextpkt;
9752 	uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
9753 	struct mbuf *m0, *m;
9754 	const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
9755 	int qid, idx, code, handled = 1;
9756 
9757 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,
9758 	    BUS_DMASYNC_POSTREAD);
9759 
9760 	m0 = data->m;
9761 	while (m0 && offset + minsz < IWX_RBUF_SIZE) {
9762 		pkt = (struct iwx_rx_packet *)(m0->m_data + offset);
9763 		qid = pkt->hdr.qid;
9764 		idx = pkt->hdr.idx;
9765 
9766 		code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
9767 
9768 		if (!iwx_rx_pkt_valid(pkt))
9769 			break;
9770 
9771 		/*
9772 		 * XXX Intel inside (tm)
9773 		 * Any commands in the LONG_GROUP could actually be in the
9774 		 * LEGACY group. Firmware API versions >= 50 reject commands
9775 		 * in group 0, forcing us to use this hack.
9776 		 */
9777 		if (iwx_cmd_groupid(code) == IWX_LONG_GROUP) {
9778 			struct iwx_tx_ring *ring = &sc->txq[qid];
9779 			struct iwx_tx_data *txdata = &ring->data[idx];
9780 			if (txdata->flags & IWX_TXDATA_FLAG_CMD_IS_NARROW)
9781 				code = iwx_cmd_opcode(code);
9782 		}
9783 
9784 		len = sizeof(pkt->len_n_flags) + iwx_rx_packet_len(pkt);
9785 		if (len < minsz || len > (IWX_RBUF_SIZE - offset))
9786 			break;
9787 
9788 		if (code == IWX_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
9789 			/* Take mbuf m0 off the RX ring. */
9790 			if (iwx_rx_addbuf(sc, IWX_RBUF_SIZE, sc->rxq.cur)) {
9791 				ifp->if_ierrors++;
9792 				break;
9793 			}
9794 			KASSERT(data->m != m0);
9795 		}
9796 
9797 		switch (code) {
9798 		case IWX_REPLY_RX_PHY_CMD:
9799 			iwx_rx_rx_phy_cmd(sc, pkt, data);
9800 			break;
9801 
9802 		case IWX_REPLY_RX_MPDU_CMD: {
9803 			size_t maxlen = IWX_RBUF_SIZE - offset - minsz;
9804 			nextoff = offset +
9805 			    roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
9806 			nextpkt = (struct iwx_rx_packet *)
9807 			    (m0->m_data + nextoff);
9808 			/* AX210 devices ship only one packet per Rx buffer. */
9809 			if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210 ||
9810 			    nextoff + minsz >= IWX_RBUF_SIZE ||
9811 			    !iwx_rx_pkt_valid(nextpkt)) {
9812 				/* No need to copy last frame in buffer. */
9813 				if (offset > 0)
9814 					m_adj(m0, offset);
9815 				iwx_rx_mpdu_mq(sc, m0, pkt->data, maxlen, ml);
9816 				m0 = NULL; /* stack owns m0 now; abort loop */
9817 			} else {
9818 				/*
9819 				 * Create an mbuf which points to the current
9820 				 * packet. Always copy from offset zero to
9821 				 * preserve m_pkthdr.
9822 				 */
9823 				m = m_copym(m0, 0, M_COPYALL, M_DONTWAIT);
9824 				if (m == NULL) {
9825 					ifp->if_ierrors++;
9826 					m_freem(m0);
9827 					m0 = NULL;
9828 					break;
9829 				}
9830 				m_adj(m, offset);
9831 				iwx_rx_mpdu_mq(sc, m, pkt->data, maxlen, ml);
9832 			}
9833  			break;
9834 		}
9835 
9836 		case IWX_BAR_FRAME_RELEASE:
9837 			iwx_rx_bar_frame_release(sc, pkt, ml);
9838 			break;
9839 
9840 		case IWX_TX_CMD:
9841 			iwx_rx_tx_cmd(sc, pkt, data);
9842 			break;
9843 
9844 		case IWX_BA_NOTIF:
9845 			iwx_rx_compressed_ba(sc, pkt);
9846 			break;
9847 
9848 		case IWX_MISSED_BEACONS_NOTIFICATION:
9849 			iwx_rx_bmiss(sc, pkt, data);
9850 			break;
9851 
9852 		case IWX_MFUART_LOAD_NOTIFICATION:
9853 			break;
9854 
9855 		case IWX_ALIVE: {
9856 			struct iwx_alive_resp_v4 *resp4;
9857 			struct iwx_alive_resp_v5 *resp5;
9858 			struct iwx_alive_resp_v6 *resp6;
9859 
9860 			DPRINTF(("%s: firmware alive\n", __func__));
9861 			sc->sc_uc.uc_ok = 0;
9862 
9863 			/*
9864 			 * For v5 and above, we can check the version, for older
9865 			 * versions we need to check the size.
9866 			 */
9867 			 if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
9868 			    IWX_ALIVE) == 6) {
9869 				SYNC_RESP_STRUCT(resp6, pkt);
9870 				if (iwx_rx_packet_payload_len(pkt) !=
9871 				    sizeof(*resp6)) {
9872 					sc->sc_uc.uc_intr = 1;
9873 					wakeup(&sc->sc_uc);
9874 					break;
9875 				}
9876 				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9877 				    resp6->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9878 				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9879 				    resp6->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9880 				sc->sc_uc.uc_log_event_table = le32toh(
9881 				    resp6->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9882 				sc->sc_uc.uc_umac_error_event_table = le32toh(
9883 				    resp6->umac_data.dbg_ptrs.error_info_addr);
9884 				sc->sc_sku_id[0] =
9885 				    le32toh(resp6->sku_id.data[0]);
9886 				sc->sc_sku_id[1] =
9887 				    le32toh(resp6->sku_id.data[1]);
9888 				sc->sc_sku_id[2] =
9889 				    le32toh(resp6->sku_id.data[2]);
9890 				if (resp6->status == IWX_ALIVE_STATUS_OK)
9891 					sc->sc_uc.uc_ok = 1;
9892 			 } else if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
9893 			    IWX_ALIVE) == 5) {
9894 				SYNC_RESP_STRUCT(resp5, pkt);
9895 				if (iwx_rx_packet_payload_len(pkt) !=
9896 				    sizeof(*resp5)) {
9897 					sc->sc_uc.uc_intr = 1;
9898 					wakeup(&sc->sc_uc);
9899 					break;
9900 				}
9901 				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9902 				    resp5->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9903 				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9904 				    resp5->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9905 				sc->sc_uc.uc_log_event_table = le32toh(
9906 				    resp5->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9907 				sc->sc_uc.uc_umac_error_event_table = le32toh(
9908 				    resp5->umac_data.dbg_ptrs.error_info_addr);
9909 				sc->sc_sku_id[0] =
9910 				    le32toh(resp5->sku_id.data[0]);
9911 				sc->sc_sku_id[1] =
9912 				    le32toh(resp5->sku_id.data[1]);
9913 				sc->sc_sku_id[2] =
9914 				    le32toh(resp5->sku_id.data[2]);
9915 				if (resp5->status == IWX_ALIVE_STATUS_OK)
9916 					sc->sc_uc.uc_ok = 1;
9917 			} else if (iwx_rx_packet_payload_len(pkt) == sizeof(*resp4)) {
9918 				SYNC_RESP_STRUCT(resp4, pkt);
9919 				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9920 				    resp4->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9921 				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9922 				    resp4->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9923 				sc->sc_uc.uc_log_event_table = le32toh(
9924 				    resp4->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9925 				sc->sc_uc.uc_umac_error_event_table = le32toh(
9926 				    resp4->umac_data.dbg_ptrs.error_info_addr);
9927 				if (resp4->status == IWX_ALIVE_STATUS_OK)
9928 					sc->sc_uc.uc_ok = 1;
9929 			}
9930 
9931 			sc->sc_uc.uc_intr = 1;
9932 			wakeup(&sc->sc_uc);
9933 			break;
9934 		}
9935 
9936 		case IWX_STATISTICS_NOTIFICATION: {
9937 			struct iwx_notif_statistics *stats;
9938 			SYNC_RESP_STRUCT(stats, pkt);
9939 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
9940 			sc->sc_noise = iwx_get_noise(&stats->rx.general);
9941 			break;
9942 		}
9943 
9944 		case IWX_DTS_MEASUREMENT_NOTIFICATION:
9945 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9946 				 IWX_DTS_MEASUREMENT_NOTIF_WIDE):
9947 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9948 				 IWX_TEMP_REPORTING_THRESHOLDS_CMD):
9949 			break;
9950 
9951 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9952 		    IWX_CT_KILL_NOTIFICATION): {
9953 			struct iwx_ct_kill_notif *notif;
9954 			SYNC_RESP_STRUCT(notif, pkt);
9955 			printf("%s: device at critical temperature (%u degC), "
9956 			    "stopping device\n",
9957 			    DEVNAME(sc), le16toh(notif->temperature));
9958 			sc->sc_flags |= IWX_FLAG_HW_ERR;
9959 			task_add(systq, &sc->init_task);
9960 			break;
9961 		}
9962 
9963 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9964 		    IWX_SCD_QUEUE_CONFIG_CMD):
9965 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9966 		    IWX_RX_BAID_ALLOCATION_CONFIG_CMD):
9967 		case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
9968 		    IWX_SESSION_PROTECTION_CMD):
9969 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9970 		    IWX_NVM_GET_INFO):
9971 		case IWX_ADD_STA_KEY:
9972 		case IWX_PHY_CONFIGURATION_CMD:
9973 		case IWX_TX_ANT_CONFIGURATION_CMD:
9974 		case IWX_ADD_STA:
9975 		case IWX_MAC_CONTEXT_CMD:
9976 		case IWX_REPLY_SF_CFG_CMD:
9977 		case IWX_POWER_TABLE_CMD:
9978 		case IWX_LTR_CONFIG:
9979 		case IWX_PHY_CONTEXT_CMD:
9980 		case IWX_BINDING_CONTEXT_CMD:
9981 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_CFG_CMD):
9982 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_REQ_UMAC):
9983 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC):
9984 		case IWX_REPLY_BEACON_FILTERING_CMD:
9985 		case IWX_MAC_PM_POWER_TABLE:
9986 		case IWX_TIME_QUOTA_CMD:
9987 		case IWX_REMOVE_STA:
9988 		case IWX_TXPATH_FLUSH:
9989 		case IWX_BT_CONFIG:
9990 		case IWX_MCC_UPDATE_CMD:
9991 		case IWX_TIME_EVENT_CMD:
9992 		case IWX_STATISTICS_CMD:
9993 		case IWX_SCD_QUEUE_CFG: {
9994 			size_t pkt_len;
9995 
9996 			if (sc->sc_cmd_resp_pkt[idx] == NULL)
9997 				break;
9998 
9999 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
10000 			    sizeof(*pkt), BUS_DMASYNC_POSTREAD);
10001 
10002 			pkt_len = sizeof(pkt->len_n_flags) +
10003 			    iwx_rx_packet_len(pkt);
10004 
10005 			if ((pkt->hdr.flags & IWX_CMD_FAILED_MSK) ||
10006 			    pkt_len < sizeof(*pkt) ||
10007 			    pkt_len > sc->sc_cmd_resp_len[idx]) {
10008 				free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
10009 				    sc->sc_cmd_resp_len[idx]);
10010 				sc->sc_cmd_resp_pkt[idx] = NULL;
10011 				break;
10012 			}
10013 
10014 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
10015 			    pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD);
10016 			memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
10017 			break;
10018 		}
10019 
10020 		case IWX_INIT_COMPLETE_NOTIF:
10021 			sc->sc_init_complete |= IWX_INIT_COMPLETE;
10022 			wakeup(&sc->sc_init_complete);
10023 			break;
10024 
10025 		case IWX_SCAN_COMPLETE_UMAC: {
10026 			struct iwx_umac_scan_complete *notif;
10027 			SYNC_RESP_STRUCT(notif, pkt);
10028 			iwx_endscan(sc);
10029 			break;
10030 		}
10031 
10032 		case IWX_SCAN_ITERATION_COMPLETE_UMAC: {
10033 			struct iwx_umac_scan_iter_complete_notif *notif;
10034 			SYNC_RESP_STRUCT(notif, pkt);
10035 			iwx_endscan(sc);
10036 			break;
10037 		}
10038 
10039 		case IWX_MCC_CHUB_UPDATE_CMD: {
10040 			struct iwx_mcc_chub_notif *notif;
10041 			SYNC_RESP_STRUCT(notif, pkt);
10042 			iwx_mcc_update(sc, notif);
10043 			break;
10044 		}
10045 
10046 		case IWX_REPLY_ERROR: {
10047 			struct iwx_error_resp *resp;
10048 			SYNC_RESP_STRUCT(resp, pkt);
10049 			printf("%s: firmware error 0x%x, cmd 0x%x\n",
10050 				DEVNAME(sc), le32toh(resp->error_type),
10051 				resp->cmd_id);
10052 			break;
10053 		}
10054 
10055 		case IWX_TIME_EVENT_NOTIFICATION: {
10056 			struct iwx_time_event_notif *notif;
10057 			uint32_t action;
10058 			SYNC_RESP_STRUCT(notif, pkt);
10059 
10060 			if (sc->sc_time_event_uid != le32toh(notif->unique_id))
10061 				break;
10062 			action = le32toh(notif->action);
10063 			if (action & IWX_TE_V2_NOTIF_HOST_EVENT_END)
10064 				sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
10065 			break;
10066 		}
10067 
10068 		case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
10069 		    IWX_SESSION_PROTECTION_NOTIF): {
10070 			struct iwx_session_prot_notif *notif;
10071 			uint32_t status, start, conf_id;
10072 
10073 			SYNC_RESP_STRUCT(notif, pkt);
10074 
10075 			status = le32toh(notif->status);
10076 			start = le32toh(notif->start);
10077 			conf_id = le32toh(notif->conf_id);
10078 			/* Check for end of successful PROTECT_CONF_ASSOC. */
10079 			if (status == 1 && start == 0 &&
10080 			    conf_id == IWX_SESSION_PROTECT_CONF_ASSOC)
10081 				sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
10082 			break;
10083 		}
10084 
10085 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP,
10086 		    IWX_FSEQ_VER_MISMATCH_NOTIFICATION):
10087 		    break;
10088 
10089 		/*
10090 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
10091 		 * messages. Just ignore them for now.
10092 		 */
10093 		case IWX_DEBUG_LOG_MSG:
10094 			break;
10095 
10096 		case IWX_MCAST_FILTER_CMD:
10097 			break;
10098 
10099 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_DQA_ENABLE_CMD):
10100 			break;
10101 
10102 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_SOC_CONFIGURATION_CMD):
10103 			break;
10104 
10105 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_INIT_EXTENDED_CFG_CMD):
10106 			break;
10107 
10108 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
10109 		    IWX_NVM_ACCESS_COMPLETE):
10110 			break;
10111 
10112 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RX_NO_DATA_NOTIF):
10113 			break; /* happens in monitor mode; ignore for now */
10114 
10115 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_TLC_MNG_CONFIG_CMD):
10116 			break;
10117 
10118 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
10119 		    IWX_TLC_MNG_UPDATE_NOTIF): {
10120 			struct iwx_tlc_update_notif *notif;
10121 			SYNC_RESP_STRUCT(notif, pkt);
10122 			if (iwx_rx_packet_payload_len(pkt) == sizeof(*notif))
10123 				iwx_rs_update(sc, notif);
10124 			break;
10125 		}
10126 
10127 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RLC_CONFIG_CMD):
10128 			break;
10129 
10130 		/* undocumented notification from iwx-ty-a0-gf-a0-77 image */
10131 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, 0xf8):
10132 			break;
10133 
10134 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
10135 		    IWX_PNVM_INIT_COMPLETE):
10136 			sc->sc_init_complete |= IWX_PNVM_COMPLETE;
10137 			wakeup(&sc->sc_init_complete);
10138 			break;
10139 
10140 		default:
10141 			handled = 0;
10142 			printf("%s: unhandled firmware response 0x%x/0x%x "
10143 			    "rx ring %d[%d]\n",
10144 			    DEVNAME(sc), code, pkt->len_n_flags,
10145 			    (qid & ~0x80), idx);
10146 			break;
10147 		}
10148 
10149 		/*
10150 		 * uCode sets bit 0x80 when it originates the notification,
10151 		 * i.e. when the notification is not a direct response to a
10152 		 * command sent by the driver.
10153 		 * For example, uCode issues IWX_REPLY_RX when it sends a
10154 		 * received frame to the driver.
10155 		 */
10156 		if (handled && !(qid & (1 << 7))) {
10157 			iwx_cmd_done(sc, qid, idx, code);
10158 		}
10159 
10160 		offset += roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
10161 
10162 		/* AX210 devices ship only one packet per Rx buffer. */
10163 		if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
10164 			break;
10165 	}
10166 
10167 	if (m0 && m0 != data->m)
10168 		m_freem(m0);
10169 }
10170 
10171 void
10172 iwx_notif_intr(struct iwx_softc *sc)
10173 {
10174 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
10175 	uint16_t hw;
10176 
10177 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
10178 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
10179 
10180 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
10181 		uint16_t *status = sc->rxq.stat_dma.vaddr;
10182 		hw = le16toh(*status) & 0xfff;
10183 	} else
10184 		hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
10185 	hw &= (IWX_RX_MQ_RING_COUNT - 1);
10186 	while (sc->rxq.cur != hw) {
10187 		struct iwx_rx_data *data = &sc->rxq.data[sc->rxq.cur];
10188 		iwx_rx_pkt(sc, data, &ml);
10189 		sc->rxq.cur = (sc->rxq.cur + 1) % IWX_RX_MQ_RING_COUNT;
10190 	}
10191 	if_input(&sc->sc_ic.ic_if, &ml);
10192 
10193 	/*
10194 	 * Tell the firmware what we have processed.
10195 	 * Seems like the hardware gets upset unless we align the write by 8??
10196 	 */
10197 	hw = (hw == 0) ? IWX_RX_MQ_RING_COUNT - 1 : hw - 1;
10198 	IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, hw & ~7);
10199 }
10200 
10201 int
10202 iwx_intr(void *arg)
10203 {
10204 	struct iwx_softc *sc = arg;
10205 	struct ieee80211com *ic = &sc->sc_ic;
10206 	struct ifnet *ifp = IC2IFP(ic);
10207 	int handled = 0;
10208 	int r1, r2, rv = 0;
10209 
10210 	IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
10211 
10212 	if (sc->sc_flags & IWX_FLAG_USE_ICT) {
10213 		uint32_t *ict = sc->ict_dma.vaddr;
10214 		int tmp;
10215 
10216 		tmp = htole32(ict[sc->ict_cur]);
10217 		if (!tmp)
10218 			goto out_ena;
10219 
10220 		/*
10221 		 * ok, there was something.  keep plowing until we have all.
10222 		 */
10223 		r1 = r2 = 0;
10224 		while (tmp) {
10225 			r1 |= tmp;
10226 			ict[sc->ict_cur] = 0;
10227 			sc->ict_cur = (sc->ict_cur+1) % IWX_ICT_COUNT;
10228 			tmp = htole32(ict[sc->ict_cur]);
10229 		}
10230 
10231 		/* this is where the fun begins.  don't ask */
10232 		if (r1 == 0xffffffff)
10233 			r1 = 0;
10234 
10235 		/* i am not expected to understand this */
10236 		if (r1 & 0xc0000)
10237 			r1 |= 0x8000;
10238 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
10239 	} else {
10240 		r1 = IWX_READ(sc, IWX_CSR_INT);
10241 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
10242 			goto out;
10243 		r2 = IWX_READ(sc, IWX_CSR_FH_INT_STATUS);
10244 	}
10245 	if (r1 == 0 && r2 == 0) {
10246 		goto out_ena;
10247 	}
10248 
10249 	IWX_WRITE(sc, IWX_CSR_INT, r1 | ~sc->sc_intmask);
10250 
10251 	if (r1 & IWX_CSR_INT_BIT_ALIVE) {
10252 		int i;
10253 
10254 		/* Firmware has now configured the RFH. */
10255 		for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
10256 			iwx_update_rx_desc(sc, &sc->rxq, i);
10257 		IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
10258 	}
10259 
10260 	handled |= (r1 & (IWX_CSR_INT_BIT_ALIVE /*| IWX_CSR_INT_BIT_SCD*/));
10261 
10262 	if (r1 & IWX_CSR_INT_BIT_RF_KILL) {
10263 		handled |= IWX_CSR_INT_BIT_RF_KILL;
10264 		iwx_check_rfkill(sc);
10265 		task_add(systq, &sc->init_task);
10266 		rv = 1;
10267 		goto out_ena;
10268 	}
10269 
10270 	if (r1 & IWX_CSR_INT_BIT_SW_ERR) {
10271 		if (ifp->if_flags & IFF_DEBUG) {
10272 			iwx_nic_error(sc);
10273 			iwx_dump_driver_status(sc);
10274 		}
10275 		printf("%s: fatal firmware error\n", DEVNAME(sc));
10276 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
10277 			task_add(systq, &sc->init_task);
10278 		rv = 1;
10279 		goto out;
10280 
10281 	}
10282 
10283 	if (r1 & IWX_CSR_INT_BIT_HW_ERR) {
10284 		handled |= IWX_CSR_INT_BIT_HW_ERR;
10285 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
10286 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
10287 			sc->sc_flags |= IWX_FLAG_HW_ERR;
10288 			task_add(systq, &sc->init_task);
10289 		}
10290 		rv = 1;
10291 		goto out;
10292 	}
10293 
10294 	/* firmware chunk loaded */
10295 	if (r1 & IWX_CSR_INT_BIT_FH_TX) {
10296 		IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_TX_MASK);
10297 		handled |= IWX_CSR_INT_BIT_FH_TX;
10298 
10299 		sc->sc_fw_chunk_done = 1;
10300 		wakeup(&sc->sc_fw);
10301 	}
10302 
10303 	if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX |
10304 	    IWX_CSR_INT_BIT_RX_PERIODIC)) {
10305 		if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX)) {
10306 			handled |= (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX);
10307 			IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_RX_MASK);
10308 		}
10309 		if (r1 & IWX_CSR_INT_BIT_RX_PERIODIC) {
10310 			handled |= IWX_CSR_INT_BIT_RX_PERIODIC;
10311 			IWX_WRITE(sc, IWX_CSR_INT, IWX_CSR_INT_BIT_RX_PERIODIC);
10312 		}
10313 
10314 		/* Disable periodic interrupt; we use it as just a one-shot. */
10315 		IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG, IWX_CSR_INT_PERIODIC_DIS);
10316 
10317 		/*
10318 		 * Enable periodic interrupt in 8 msec only if we received
10319 		 * real RX interrupt (instead of just periodic int), to catch
10320 		 * any dangling Rx interrupt.  If it was just the periodic
10321 		 * interrupt, there was no dangling Rx activity, and no need
10322 		 * to extend the periodic interrupt; one-shot is enough.
10323 		 */
10324 		if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX))
10325 			IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG,
10326 			    IWX_CSR_INT_PERIODIC_ENA);
10327 
10328 		iwx_notif_intr(sc);
10329 	}
10330 
10331 	rv = 1;
10332 
10333  out_ena:
10334 	iwx_restore_interrupts(sc);
10335  out:
10336 	return rv;
10337 }
10338 
10339 int
10340 iwx_intr_msix(void *arg)
10341 {
10342 	struct iwx_softc *sc = arg;
10343 	struct ieee80211com *ic = &sc->sc_ic;
10344 	struct ifnet *ifp = IC2IFP(ic);
10345 	uint32_t inta_fh, inta_hw;
10346 	int vector = 0;
10347 
10348 	inta_fh = IWX_READ(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD);
10349 	inta_hw = IWX_READ(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD);
10350 	IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
10351 	IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
10352 	inta_fh &= sc->sc_fh_mask;
10353 	inta_hw &= sc->sc_hw_mask;
10354 
10355 	if (inta_fh & IWX_MSIX_FH_INT_CAUSES_Q0 ||
10356 	    inta_fh & IWX_MSIX_FH_INT_CAUSES_Q1) {
10357 		iwx_notif_intr(sc);
10358 	}
10359 
10360 	/* firmware chunk loaded */
10361 	if (inta_fh & IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
10362 		sc->sc_fw_chunk_done = 1;
10363 		wakeup(&sc->sc_fw);
10364 	}
10365 
10366 	if ((inta_fh & IWX_MSIX_FH_INT_CAUSES_FH_ERR) ||
10367 	    (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
10368 	    (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
10369 		if (ifp->if_flags & IFF_DEBUG) {
10370 			iwx_nic_error(sc);
10371 			iwx_dump_driver_status(sc);
10372 		}
10373 		printf("%s: fatal firmware error\n", DEVNAME(sc));
10374 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
10375 			task_add(systq, &sc->init_task);
10376 		return 1;
10377 	}
10378 
10379 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
10380 		iwx_check_rfkill(sc);
10381 		task_add(systq, &sc->init_task);
10382 	}
10383 
10384 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
10385 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
10386 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
10387 			sc->sc_flags |= IWX_FLAG_HW_ERR;
10388 			task_add(systq, &sc->init_task);
10389 		}
10390 		return 1;
10391 	}
10392 
10393 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_ALIVE) {
10394 		int i;
10395 
10396 		/* Firmware has now configured the RFH. */
10397 		for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
10398 			iwx_update_rx_desc(sc, &sc->rxq, i);
10399 		IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
10400 	}
10401 
10402 	/*
10403 	 * Before sending the interrupt the HW disables it to prevent
10404 	 * a nested interrupt. This is done by writing 1 to the corresponding
10405 	 * bit in the mask register. After handling the interrupt, it should be
10406 	 * re-enabled by clearing this bit. This register is defined as
10407 	 * write 1 clear (W1C) register, meaning that it's being clear
10408 	 * by writing 1 to the bit.
10409 	 */
10410 	IWX_WRITE(sc, IWX_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
10411 	return 1;
10412 }
10413 
10414 typedef void *iwx_match_t;
10415 
10416 static const struct pci_matchid iwx_devices[] = {
10417 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_1 },
10418 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_2 },
10419 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_3 },
10420 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_4,},
10421 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_5,},
10422 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_6,},
10423 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_7,},
10424 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_8,},
10425 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_9,},
10426 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_10,},
10427 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_11,},
10428 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_12,},
10429 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_13,},
10430 	/* _14 is an MA device, not yet supported */
10431 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_15,},
10432 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_16,},
10433 };
10434 
10435 
10436 int
10437 iwx_match(struct device *parent, iwx_match_t match __unused, void *aux)
10438 {
10439 	struct pci_attach_args *pa = aux;
10440 	return pci_matchbyid(pa, iwx_devices, nitems(iwx_devices));
10441 }
10442 
10443 /*
10444  * The device info table below contains device-specific config overrides.
10445  * The most important parameter derived from this table is the name of the
10446  * firmware image to load.
10447  *
10448  * The Linux iwlwifi driver uses an "old" and a "new" device info table.
10449  * The "old" table matches devices based on PCI vendor/product IDs only.
10450  * The "new" table extends this with various device parameters derived
10451  * from MAC type, and RF type.
10452  *
10453  * In iwlwifi "old" and "new" tables share the same array, where "old"
10454  * entries contain dummy values for data defined only for "new" entries.
10455  * As of 2022, Linux developers are still in the process of moving entries
10456  * from "old" to "new" style and it looks like this effort has stalled in
10457  * in some work-in-progress state for quite a while. Linux commits moving
10458  * entries from "old" to "new" have at times been reverted due to regressions.
10459  * Part of this complexity comes from iwlwifi supporting both iwm(4) and iwx(4)
10460  * devices in the same driver.
10461  *
10462  * Our table below contains mostly "new" entries declared in iwlwifi
10463  * with the _IWL_DEV_INFO() macro (with a leading underscore).
10464  * Other devices are matched based on PCI vendor/product ID as usual,
10465  * unless matching specific PCI subsystem vendor/product IDs is required.
10466  *
10467  * Some "old"-style entries are required to identify the firmware image to use.
10468  * Others might be used to print a specific marketing name into Linux dmesg,
10469  * but we can't be sure whether the corresponding devices would be matched
10470  * correctly in the absence of their entries. So we include them just in case.
10471  */
10472 
10473 struct iwx_dev_info {
10474 	uint16_t device;
10475 	uint16_t subdevice;
10476 	uint16_t mac_type;
10477 	uint16_t rf_type;
10478 	uint8_t mac_step;
10479 	uint8_t rf_id;
10480 	uint8_t no_160;
10481 	uint8_t cores;
10482 	uint8_t cdb;
10483 	uint8_t jacket;
10484 	const struct iwx_device_cfg *cfg;
10485 };
10486 
10487 #define _IWX_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
10488 		      _rf_id, _no_160, _cores, _cdb, _jacket, _cfg) \
10489 	{ .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg),  \
10490 	  .mac_type = _mac_type, .rf_type = _rf_type,	   \
10491 	  .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id,		   \
10492 	  .mac_step = _mac_step, .cdb = _cdb, .jacket = _jacket }
10493 
10494 #define IWX_DEV_INFO(_device, _subdevice, _cfg) \
10495 	_IWX_DEV_INFO(_device, _subdevice, IWX_CFG_ANY, IWX_CFG_ANY,	   \
10496 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY,  \
10497 		      IWX_CFG_ANY, IWX_CFG_ANY, _cfg)
10498 
10499 /*
10500  * When adding entries to this table keep in mind that entries must
10501  * be listed in the same order as in the Linux driver. Code walks this
10502  * table backwards and uses the first matching entry it finds.
10503  * Device firmware must be available in fw_update(8).
10504  */
10505 static const struct iwx_dev_info iwx_dev_info_table[] = {
10506 	/* So with HR */
10507 	IWX_DEV_INFO(0x2725, 0x0090, iwx_2ax_cfg_so_gf_a0),
10508 	IWX_DEV_INFO(0x2725, 0x0020, iwx_2ax_cfg_ty_gf_a0),
10509 	IWX_DEV_INFO(0x2725, 0x2020, iwx_2ax_cfg_ty_gf_a0),
10510 	IWX_DEV_INFO(0x2725, 0x0024, iwx_2ax_cfg_ty_gf_a0),
10511 	IWX_DEV_INFO(0x2725, 0x0310, iwx_2ax_cfg_ty_gf_a0),
10512 	IWX_DEV_INFO(0x2725, 0x0510, iwx_2ax_cfg_ty_gf_a0),
10513 	IWX_DEV_INFO(0x2725, 0x0A10, iwx_2ax_cfg_ty_gf_a0),
10514 	IWX_DEV_INFO(0x2725, 0xE020, iwx_2ax_cfg_ty_gf_a0),
10515 	IWX_DEV_INFO(0x2725, 0xE024, iwx_2ax_cfg_ty_gf_a0),
10516 	IWX_DEV_INFO(0x2725, 0x4020, iwx_2ax_cfg_ty_gf_a0),
10517 	IWX_DEV_INFO(0x2725, 0x6020, iwx_2ax_cfg_ty_gf_a0),
10518 	IWX_DEV_INFO(0x2725, 0x6024, iwx_2ax_cfg_ty_gf_a0),
10519 	IWX_DEV_INFO(0x2725, 0x1673, iwx_2ax_cfg_ty_gf_a0), /* killer_1675w */
10520 	IWX_DEV_INFO(0x2725, 0x1674, iwx_2ax_cfg_ty_gf_a0), /* killer_1675x */
10521 	IWX_DEV_INFO(0x51f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
10522 	IWX_DEV_INFO(0x51f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
10523 	IWX_DEV_INFO(0x54f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
10524 	IWX_DEV_INFO(0x54f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
10525 	IWX_DEV_INFO(0x7a70, 0x0090, iwx_2ax_cfg_so_gf_a0_long),
10526 	IWX_DEV_INFO(0x7a70, 0x0098, iwx_2ax_cfg_so_gf_a0_long),
10527 	IWX_DEV_INFO(0x7a70, 0x00b0, iwx_2ax_cfg_so_gf4_a0_long),
10528 	IWX_DEV_INFO(0x7a70, 0x0310, iwx_2ax_cfg_so_gf_a0_long),
10529 	IWX_DEV_INFO(0x7a70, 0x0510, iwx_2ax_cfg_so_gf_a0_long),
10530 	IWX_DEV_INFO(0x7a70, 0x0a10, iwx_2ax_cfg_so_gf_a0_long),
10531 	IWX_DEV_INFO(0x7af0, 0x0090, iwx_2ax_cfg_so_gf_a0),
10532 	IWX_DEV_INFO(0x7af0, 0x0098, iwx_2ax_cfg_so_gf_a0),
10533 	IWX_DEV_INFO(0x7af0, 0x00b0, iwx_2ax_cfg_so_gf4_a0),
10534 	IWX_DEV_INFO(0x7a70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
10535 	IWX_DEV_INFO(0x7a70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
10536 	IWX_DEV_INFO(0x7af0, 0x0310, iwx_2ax_cfg_so_gf_a0),
10537 	IWX_DEV_INFO(0x7af0, 0x0510, iwx_2ax_cfg_so_gf_a0),
10538 	IWX_DEV_INFO(0x7af0, 0x0a10, iwx_2ax_cfg_so_gf_a0),
10539 	IWX_DEV_INFO(0x7f70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
10540 	IWX_DEV_INFO(0x7f70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
10541 
10542 	/* So with GF2 */
10543 	IWX_DEV_INFO(0x2726, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10544 	IWX_DEV_INFO(0x2726, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10545 	IWX_DEV_INFO(0x51f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10546 	IWX_DEV_INFO(0x51f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10547 	IWX_DEV_INFO(0x54f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10548 	IWX_DEV_INFO(0x54f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10549 	IWX_DEV_INFO(0x7a70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10550 	IWX_DEV_INFO(0x7a70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10551 	IWX_DEV_INFO(0x7af0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10552 	IWX_DEV_INFO(0x7af0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10553 	IWX_DEV_INFO(0x7f70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10554 	IWX_DEV_INFO(0x7f70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10555 
10556 	/* Qu with Jf, C step */
10557 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10558 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10559 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
10560 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10561 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9461_160 */
10562 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10563 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10564 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
10565 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10566 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* iwl9461 */
10567 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10568 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10569 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10570 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10571 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462_160 */
10572 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10573 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10574 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10575 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10576 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462 */
10577 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10578 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10579 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10580 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10581 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560_160 */
10582 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10583 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10584 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10585 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10586 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560 */
10587 	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
10588 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10589 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10590 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10591 		      IWX_CFG_ANY,
10592 		      iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550s */
10593 	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
10594 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10595 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10596 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10597 		      IWX_CFG_ANY,
10598 		      iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550i */
10599 
10600 	/* QuZ with Jf */
10601 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10602 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10603 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10604 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10605 		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461_160 */
10606 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10607 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10608 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10609 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10610 		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461 */
10611 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10612 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10613 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10614 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10615 		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462_160 */
10616 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10617 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10618 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10619 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10620 		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462 */
10621 	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
10622 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10623 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10624 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10625 		      IWX_CFG_ANY,
10626 		      iwx_9560_quz_a0_jf_b0_cfg), /* killer_1550s */
10627 	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
10628 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10629 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10630 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10631 		      IWX_CFG_ANY,
10632 		      iwx_9560_quz_a0_jf_b0_cfg), /* 9560_killer_1550i */
10633 
10634 	/* Qu with Hr, B step */
10635 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10636 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
10637 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
10638 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10639 		      iwx_qu_b0_hr1_b0), /* AX101 */
10640 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10641 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
10642 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10643 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10644 		      iwx_qu_b0_hr_b0), /* AX203 */
10645 
10646 	/* Qu with Hr, C step */
10647 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10648 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10649 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
10650 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10651 		      iwx_qu_c0_hr1_b0), /* AX101 */
10652 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10653 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10654 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10655 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10656 		      iwx_qu_c0_hr_b0), /* AX203 */
10657 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10658 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10659 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10660 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10661 		      iwx_qu_c0_hr_b0), /* AX201 */
10662 
10663 	/* QuZ with Hr */
10664 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10665 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10666 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
10667 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10668 		      iwx_quz_a0_hr1_b0), /* AX101 */
10669 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10670 		      IWX_CFG_MAC_TYPE_QUZ, IWX_SILICON_B_STEP,
10671 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10672 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10673 		      iwx_cfg_quz_a0_hr_b0), /* AX203 */
10674 
10675 	/* SoF with JF2 */
10676 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10677 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10678 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10679 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10680 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
10681 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10682 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10683 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10684 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10685 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
10686 
10687 	/* SoF with JF */
10688 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10689 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10690 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
10691 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10692 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
10693 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10694 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10695 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10696 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10697 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
10698 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10699 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10700 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
10701 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10702 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_name */
10703 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10704 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10705 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10706 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10707 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
10708 
10709 	/* So with Hr */
10710 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10711 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10712 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10713 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10714 		      iwx_cfg_so_a0_hr_b0), /* AX203 */
10715 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10716 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10717 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
10718 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10719 		      iwx_cfg_so_a0_hr_b0), /* ax101 */
10720 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10721 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10722 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10723 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10724 		      iwx_cfg_so_a0_hr_b0), /* ax201 */
10725 
10726 	/* So-F with Hr */
10727 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10728 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10729 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10730 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10731 		      iwx_cfg_so_a0_hr_b0), /* AX203 */
10732 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10733 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10734 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
10735 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10736 		      iwx_cfg_so_a0_hr_b0), /* AX101 */
10737 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10738 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10739 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10740 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10741 		      iwx_cfg_so_a0_hr_b0), /* AX201 */
10742 
10743 	/* So-F with GF */
10744 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10745 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10746 		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
10747 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10748 		      iwx_2ax_cfg_so_gf_a0), /* AX211 */
10749 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10750 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10751 		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
10752 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
10753 		      iwx_2ax_cfg_so_gf4_a0), /* AX411 */
10754 
10755 	/* So with GF */
10756 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10757 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10758 		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
10759 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10760 		      iwx_2ax_cfg_so_gf_a0), /* AX211 */
10761 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10762 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10763 		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
10764 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
10765 		      iwx_2ax_cfg_so_gf4_a0), /* AX411 */
10766 
10767 	/* So with JF2 */
10768 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10769 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10770 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10771 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10772 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
10773 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10774 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10775 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10776 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10777 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
10778 
10779 	/* So with JF */
10780 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10781 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10782 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
10783 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10784 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
10785 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10786 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10787 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10788 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10789 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
10790 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10791 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10792 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
10793 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10794 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* iwl9461 */
10795 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10796 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10797 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10798 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10799 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
10800 };
10801 
10802 int
10803 iwx_preinit(struct iwx_softc *sc)
10804 {
10805 	struct ieee80211com *ic = &sc->sc_ic;
10806 	struct ifnet *ifp = IC2IFP(ic);
10807 	int err;
10808 
10809 	err = iwx_prepare_card_hw(sc);
10810 	if (err) {
10811 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
10812 		return err;
10813 	}
10814 
10815 	if (sc->attached) {
10816 		/* Update MAC in case the upper layers changed it. */
10817 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
10818 		    ((struct arpcom *)ifp)->ac_enaddr);
10819 		return 0;
10820 	}
10821 
10822 	err = iwx_start_hw(sc);
10823 	if (err) {
10824 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
10825 		return err;
10826 	}
10827 
10828 	err = iwx_run_init_mvm_ucode(sc, 1);
10829 	iwx_stop_device(sc);
10830 	if (err)
10831 		return err;
10832 
10833 	/* Print version info and MAC address on first successful fw load. */
10834 	sc->attached = 1;
10835 	if (sc->sc_pnvm_ver) {
10836 		printf("%s: hw rev 0x%x, fw %s, pnvm %08x, "
10837 		    "address %s\n",
10838 		    DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
10839 		    sc->sc_fwver, sc->sc_pnvm_ver,
10840 		    ether_sprintf(sc->sc_nvm.hw_addr));
10841 	} else {
10842 		printf("%s: hw rev 0x%x, fw %s, address %s\n",
10843 		    DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
10844 		    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
10845 	}
10846 
10847 	if (sc->sc_nvm.sku_cap_11n_enable)
10848 		iwx_setup_ht_rates(sc);
10849 	if (sc->sc_nvm.sku_cap_11ac_enable)
10850 		iwx_setup_vht_rates(sc);
10851 
10852 	/* not all hardware can do 5GHz band */
10853 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
10854 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
10855 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
10856 
10857 	/* Configure channel information obtained from firmware. */
10858 	ieee80211_channel_init(ifp);
10859 
10860 	/* Configure MAC address. */
10861 	err = if_setlladdr(ifp, ic->ic_myaddr);
10862 	if (err)
10863 		printf("%s: could not set MAC address (error %d)\n",
10864 		    DEVNAME(sc), err);
10865 
10866 	ieee80211_media_init(ifp, iwx_media_change, ieee80211_media_status);
10867 
10868 	return 0;
10869 }
10870 
10871 void
10872 iwx_attach_hook(struct device *self)
10873 {
10874 	struct iwx_softc *sc = (void *)self;
10875 
10876 	KASSERT(!cold);
10877 
10878 	iwx_preinit(sc);
10879 }
10880 
10881 const struct iwx_device_cfg *
10882 iwx_find_device_cfg(struct iwx_softc *sc)
10883 {
10884 	pcireg_t sreg;
10885 	pci_product_id_t sdev_id;
10886 	uint16_t mac_type, rf_type;
10887 	uint8_t mac_step, cdb, jacket, rf_id, no_160, cores;
10888 	int i;
10889 
10890 	sreg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_SUBSYS_ID_REG);
10891 	sdev_id = PCI_PRODUCT(sreg);
10892 	mac_type = IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev);
10893 	mac_step = IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2);
10894 	rf_type = IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id);
10895 	cdb = IWX_CSR_HW_RFID_IS_CDB(sc->sc_hw_rf_id);
10896 	jacket = IWX_CSR_HW_RFID_IS_JACKET(sc->sc_hw_rf_id);
10897 
10898 	rf_id = IWX_SUBDEVICE_RF_ID(sdev_id);
10899 	no_160 = IWX_SUBDEVICE_NO_160(sdev_id);
10900 	cores = IWX_SUBDEVICE_CORES(sdev_id);
10901 
10902 	for (i = nitems(iwx_dev_info_table) - 1; i >= 0; i--) {
10903 		 const struct iwx_dev_info *dev_info = &iwx_dev_info_table[i];
10904 
10905 		if (dev_info->device != (uint16_t)IWX_CFG_ANY &&
10906 		    dev_info->device != sc->sc_pid)
10907 			continue;
10908 
10909 		if (dev_info->subdevice != (uint16_t)IWX_CFG_ANY &&
10910 		    dev_info->subdevice != sdev_id)
10911 			continue;
10912 
10913 		if (dev_info->mac_type != (uint16_t)IWX_CFG_ANY &&
10914 		    dev_info->mac_type != mac_type)
10915 			continue;
10916 
10917 		if (dev_info->mac_step != (uint8_t)IWX_CFG_ANY &&
10918 		    dev_info->mac_step != mac_step)
10919 			continue;
10920 
10921 		if (dev_info->rf_type != (uint16_t)IWX_CFG_ANY &&
10922 		    dev_info->rf_type != rf_type)
10923 			continue;
10924 
10925 		if (dev_info->cdb != (uint8_t)IWX_CFG_ANY &&
10926 		    dev_info->cdb != cdb)
10927 			continue;
10928 
10929 		if (dev_info->jacket != (uint8_t)IWX_CFG_ANY &&
10930 		    dev_info->jacket != jacket)
10931 			continue;
10932 
10933 		if (dev_info->rf_id != (uint8_t)IWX_CFG_ANY &&
10934 		    dev_info->rf_id != rf_id)
10935 			continue;
10936 
10937 		if (dev_info->no_160 != (uint8_t)IWX_CFG_ANY &&
10938 		    dev_info->no_160 != no_160)
10939 			continue;
10940 
10941 		if (dev_info->cores != (uint8_t)IWX_CFG_ANY &&
10942 		    dev_info->cores != cores)
10943 			continue;
10944 
10945 		return dev_info->cfg;
10946 	}
10947 
10948 	return NULL;
10949 }
10950 
10951 
10952 void
10953 iwx_attach(struct device *parent, struct device *self, void *aux)
10954 {
10955 	struct iwx_softc *sc = (void *)self;
10956 	struct pci_attach_args *pa = aux;
10957 	pci_intr_handle_t ih;
10958 	pcireg_t reg, memtype;
10959 	struct ieee80211com *ic = &sc->sc_ic;
10960 	struct ifnet *ifp = &ic->ic_if;
10961 	const char *intrstr;
10962 	const struct iwx_device_cfg *cfg;
10963 	int err;
10964 	int txq_i, i, j;
10965 	size_t ctxt_info_size;
10966 
10967 	sc->sc_pid = PCI_PRODUCT(pa->pa_id);
10968 	sc->sc_pct = pa->pa_pc;
10969 	sc->sc_pcitag = pa->pa_tag;
10970 	sc->sc_dmat = pa->pa_dmat;
10971 
10972 	rw_init(&sc->ioctl_rwl, "iwxioctl");
10973 
10974 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
10975 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
10976 	if (err == 0) {
10977 		printf("%s: PCIe capability structure not found!\n",
10978 		    DEVNAME(sc));
10979 		return;
10980 	}
10981 
10982 	/*
10983 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
10984 	 * PCI Tx retries from interfering with C3 CPU state.
10985 	 */
10986 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
10987 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
10988 
10989 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
10990 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
10991 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
10992 	if (err) {
10993 		printf("%s: can't map mem space\n", DEVNAME(sc));
10994 		return;
10995 	}
10996 
10997 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
10998 		sc->sc_msix = 1;
10999 	} else if (pci_intr_map_msi(pa, &ih)) {
11000 		if (pci_intr_map(pa, &ih)) {
11001 			printf("%s: can't map interrupt\n", DEVNAME(sc));
11002 			return;
11003 		}
11004 		/* Hardware bug workaround. */
11005 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
11006 		    PCI_COMMAND_STATUS_REG);
11007 		if (reg & PCI_COMMAND_INTERRUPT_DISABLE)
11008 			reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
11009 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
11010 		    PCI_COMMAND_STATUS_REG, reg);
11011 	}
11012 
11013 	intrstr = pci_intr_string(sc->sc_pct, ih);
11014 	if (sc->sc_msix)
11015 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
11016 		    iwx_intr_msix, sc, DEVNAME(sc));
11017 	else
11018 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
11019 		    iwx_intr, sc, DEVNAME(sc));
11020 
11021 	if (sc->sc_ih == NULL) {
11022 		printf("\n");
11023 		printf("%s: can't establish interrupt", DEVNAME(sc));
11024 		if (intrstr != NULL)
11025 			printf(" at %s", intrstr);
11026 		printf("\n");
11027 		return;
11028 	}
11029 	printf(", %s\n", intrstr);
11030 
11031 	/* Clear pending interrupts. */
11032 	IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
11033 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
11034 	IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
11035 
11036 	sc->sc_hw_rev = IWX_READ(sc, IWX_CSR_HW_REV);
11037 	sc->sc_hw_rf_id = IWX_READ(sc, IWX_CSR_HW_RF_ID);
11038 
11039 	/*
11040 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
11041 	 * changed, and now the revision step also includes bit 0-1 (no more
11042 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
11043 	 * in the old format.
11044 	 */
11045 	sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
11046 			(IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
11047 
11048 	switch (PCI_PRODUCT(pa->pa_id)) {
11049 	case PCI_PRODUCT_INTEL_WL_22500_1:
11050 		sc->sc_fwname = IWX_CC_A_FW;
11051 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
11052 		sc->sc_integrated = 0;
11053 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
11054 		sc->sc_low_latency_xtal = 0;
11055 		sc->sc_xtal_latency = 0;
11056 		sc->sc_tx_with_siso_diversity = 0;
11057 		sc->sc_uhb_supported = 0;
11058 		break;
11059 	case PCI_PRODUCT_INTEL_WL_22500_2:
11060 	case PCI_PRODUCT_INTEL_WL_22500_5:
11061 		/* These devices should be QuZ only. */
11062 		if (sc->sc_hw_rev != IWX_CSR_HW_REV_TYPE_QUZ) {
11063 			printf("%s: unsupported AX201 adapter\n", DEVNAME(sc));
11064 			return;
11065 		}
11066 		sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
11067 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
11068 		sc->sc_integrated = 1;
11069 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
11070 		sc->sc_low_latency_xtal = 0;
11071 		sc->sc_xtal_latency = 500;
11072 		sc->sc_tx_with_siso_diversity = 0;
11073 		sc->sc_uhb_supported = 0;
11074 		break;
11075 	case PCI_PRODUCT_INTEL_WL_22500_3:
11076 		if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
11077 			sc->sc_fwname = IWX_QU_C_HR_B_FW;
11078 		else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
11079 			sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
11080 		else
11081 			sc->sc_fwname = IWX_QU_B_HR_B_FW;
11082 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
11083 		sc->sc_integrated = 1;
11084 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
11085 		sc->sc_low_latency_xtal = 0;
11086 		sc->sc_xtal_latency = 500;
11087 		sc->sc_tx_with_siso_diversity = 0;
11088 		sc->sc_uhb_supported = 0;
11089 		break;
11090 	case PCI_PRODUCT_INTEL_WL_22500_4:
11091 	case PCI_PRODUCT_INTEL_WL_22500_7:
11092 	case PCI_PRODUCT_INTEL_WL_22500_8:
11093 		if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
11094 			sc->sc_fwname = IWX_QU_C_HR_B_FW;
11095 		else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
11096 			sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
11097 		else
11098 			sc->sc_fwname = IWX_QU_B_HR_B_FW;
11099 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
11100 		sc->sc_integrated = 1;
11101 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_1820;
11102 		sc->sc_low_latency_xtal = 0;
11103 		sc->sc_xtal_latency = 1820;
11104 		sc->sc_tx_with_siso_diversity = 0;
11105 		sc->sc_uhb_supported = 0;
11106 		break;
11107 	case PCI_PRODUCT_INTEL_WL_22500_6:
11108 		if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
11109 			sc->sc_fwname = IWX_QU_C_HR_B_FW;
11110 		else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
11111 			sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
11112 		else
11113 			sc->sc_fwname = IWX_QU_B_HR_B_FW;
11114 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
11115 		sc->sc_integrated = 1;
11116 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
11117 		sc->sc_low_latency_xtal = 1;
11118 		sc->sc_xtal_latency = 12000;
11119 		sc->sc_tx_with_siso_diversity = 0;
11120 		sc->sc_uhb_supported = 0;
11121 		break;
11122 	case PCI_PRODUCT_INTEL_WL_22500_9:
11123 	case PCI_PRODUCT_INTEL_WL_22500_10:
11124 	case PCI_PRODUCT_INTEL_WL_22500_11:
11125 	case PCI_PRODUCT_INTEL_WL_22500_12:
11126 	case PCI_PRODUCT_INTEL_WL_22500_13:
11127 	/* _14 is an MA device, not yet supported */
11128 	case PCI_PRODUCT_INTEL_WL_22500_15:
11129 	case PCI_PRODUCT_INTEL_WL_22500_16:
11130 		sc->sc_fwname = IWX_SO_A_GF_A_FW;
11131 		sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM;
11132 		sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
11133 		sc->sc_integrated = 0;
11134 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
11135 		sc->sc_low_latency_xtal = 0;
11136 		sc->sc_xtal_latency = 0;
11137 		sc->sc_tx_with_siso_diversity = 0;
11138 		sc->sc_uhb_supported = 1;
11139 		break;
11140 	default:
11141 		printf("%s: unknown adapter type\n", DEVNAME(sc));
11142 		return;
11143 	}
11144 
11145 	cfg = iwx_find_device_cfg(sc);
11146 	if (cfg) {
11147 		sc->sc_fwname = cfg->fw_name;
11148 		sc->sc_pnvm_name = cfg->pnvm_name;
11149 		sc->sc_tx_with_siso_diversity = cfg->tx_with_siso_diversity;
11150 		sc->sc_uhb_supported = cfg->uhb_supported;
11151 		if (cfg->xtal_latency) {
11152 			sc->sc_xtal_latency = cfg->xtal_latency;
11153 			sc->sc_low_latency_xtal = cfg->low_latency_xtal;
11154 		}
11155 	}
11156 
11157 	sc->mac_addr_from_csr = 0x380; /* differs on BZ hw generation */
11158 
11159 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
11160 		sc->sc_umac_prph_offset = 0x300000;
11161 		sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX_GEN3;
11162 	} else
11163 		sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX;
11164 
11165 	/* Allocate DMA memory for loading firmware. */
11166 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
11167 		ctxt_info_size = sizeof(struct iwx_context_info_gen3);
11168 	else
11169 		ctxt_info_size = sizeof(struct iwx_context_info);
11170 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ctxt_info_dma,
11171 	    ctxt_info_size, 0);
11172 	if (err) {
11173 		printf("%s: could not allocate memory for loading firmware\n",
11174 		    DEVNAME(sc));
11175 		return;
11176 	}
11177 
11178 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
11179 		err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_scratch_dma,
11180 		    sizeof(struct iwx_prph_scratch), 0);
11181 		if (err) {
11182 			printf("%s: could not allocate prph scratch memory\n",
11183 			    DEVNAME(sc));
11184 			goto fail1;
11185 		}
11186 
11187 		/*
11188 		 * Allocate prph information. The driver doesn't use this.
11189 		 * We use the second half of this page to give the device
11190 		 * some dummy TR/CR tail pointers - which shouldn't be
11191 		 * necessary as we don't use this, but the hardware still
11192 		 * reads/writes there and we can't let it go do that with
11193 		 * a NULL pointer.
11194 		 */
11195 		KASSERT(sizeof(struct iwx_prph_info) < PAGE_SIZE / 2);
11196 		err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_info_dma,
11197 		    PAGE_SIZE, 0);
11198 		if (err) {
11199 			printf("%s: could not allocate prph info memory\n",
11200 			    DEVNAME(sc));
11201 			goto fail1;
11202 		}
11203 	}
11204 
11205 	/* Allocate interrupt cause table (ICT).*/
11206 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
11207 	    IWX_ICT_SIZE, 1<<IWX_ICT_PADDR_SHIFT);
11208 	if (err) {
11209 		printf("%s: could not allocate ICT table\n", DEVNAME(sc));
11210 		goto fail1;
11211 	}
11212 
11213 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
11214 		err = iwx_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
11215 		if (err) {
11216 			printf("%s: could not allocate TX ring %d\n",
11217 			    DEVNAME(sc), txq_i);
11218 			goto fail4;
11219 		}
11220 	}
11221 
11222 	err = iwx_alloc_rx_ring(sc, &sc->rxq);
11223 	if (err) {
11224 		printf("%s: could not allocate RX ring\n", DEVNAME(sc));
11225 		goto fail4;
11226 	}
11227 
11228 	sc->sc_nswq = taskq_create("iwxns", 1, IPL_NET, 0);
11229 	if (sc->sc_nswq == NULL)
11230 		goto fail4;
11231 
11232 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
11233 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
11234 	ic->ic_state = IEEE80211_S_INIT;
11235 
11236 	/* Set device capabilities. */
11237 	ic->ic_caps =
11238 	    IEEE80211_C_QOS | IEEE80211_C_TX_AMPDU | /* A-MPDU */
11239 	    IEEE80211_C_ADDBA_OFFLOAD | /* device sends ADDBA/DELBA frames */
11240 	    IEEE80211_C_WEP |		/* WEP */
11241 	    IEEE80211_C_RSN |		/* WPA/RSN */
11242 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
11243 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
11244 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
11245 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
11246 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
11247 
11248 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20 | IEEE80211_HTCAP_SGI40;
11249 	ic->ic_htcaps |= IEEE80211_HTCAP_CBW20_40;
11250 	ic->ic_htcaps |=
11251 	    (IEEE80211_HTCAP_SMPS_DIS << IEEE80211_HTCAP_SMPS_SHIFT);
11252 	ic->ic_htxcaps = 0;
11253 	ic->ic_txbfcaps = 0;
11254 	ic->ic_aselcaps = 0;
11255 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
11256 
11257 	ic->ic_vhtcaps = IEEE80211_VHTCAP_MAX_MPDU_LENGTH_3895 |
11258 	    (IEEE80211_VHTCAP_MAX_AMPDU_LEN_64K <<
11259 	    IEEE80211_VHTCAP_MAX_AMPDU_LEN_SHIFT) |
11260 	    (IEEE80211_VHTCAP_CHAN_WIDTH_80 <<
11261 	     IEEE80211_VHTCAP_CHAN_WIDTH_SHIFT) | IEEE80211_VHTCAP_SGI80 |
11262 	    IEEE80211_VHTCAP_RX_ANT_PATTERN | IEEE80211_VHTCAP_TX_ANT_PATTERN;
11263 
11264 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
11265 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
11266 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
11267 
11268 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
11269 		sc->sc_phyctxt[i].id = i;
11270 		sc->sc_phyctxt[i].sco = IEEE80211_HTOP0_SCO_SCN;
11271 		sc->sc_phyctxt[i].vht_chan_width =
11272 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT;
11273 	}
11274 
11275 	/* IBSS channel undefined for now. */
11276 	ic->ic_ibss_chan = &ic->ic_channels[1];
11277 
11278 	ic->ic_max_rssi = IWX_MAX_DBM - IWX_MIN_DBM;
11279 
11280 	ifp->if_softc = sc;
11281 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
11282 	ifp->if_ioctl = iwx_ioctl;
11283 	ifp->if_start = iwx_start;
11284 	ifp->if_watchdog = iwx_watchdog;
11285 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
11286 
11287 	if_attach(ifp);
11288 	ieee80211_ifattach(ifp);
11289 	ieee80211_media_init(ifp, iwx_media_change, ieee80211_media_status);
11290 
11291 #if NBPFILTER > 0
11292 	iwx_radiotap_attach(sc);
11293 #endif
11294 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
11295 		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
11296 		rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
11297 		rxba->sc = sc;
11298 		timeout_set(&rxba->session_timer, iwx_rx_ba_session_expired,
11299 		    rxba);
11300 		timeout_set(&rxba->reorder_buf.reorder_timer,
11301 		    iwx_reorder_timer_expired, &rxba->reorder_buf);
11302 		for (j = 0; j < nitems(rxba->entries); j++)
11303 			ml_init(&rxba->entries[j].frames);
11304 	}
11305 	task_set(&sc->init_task, iwx_init_task, sc);
11306 	task_set(&sc->newstate_task, iwx_newstate_task, sc);
11307 	task_set(&sc->ba_task, iwx_ba_task, sc);
11308 	task_set(&sc->setkey_task, iwx_setkey_task, sc);
11309 	task_set(&sc->mac_ctxt_task, iwx_mac_ctxt_task, sc);
11310 	task_set(&sc->phy_ctxt_task, iwx_phy_ctxt_task, sc);
11311 	task_set(&sc->bgscan_done_task, iwx_bgscan_done_task, sc);
11312 
11313 	ic->ic_node_alloc = iwx_node_alloc;
11314 	ic->ic_bgscan_start = iwx_bgscan;
11315 	ic->ic_bgscan_done = iwx_bgscan_done;
11316 	ic->ic_set_key = iwx_set_key;
11317 	ic->ic_delete_key = iwx_delete_key;
11318 
11319 	/* Override 802.11 state transition machine. */
11320 	sc->sc_newstate = ic->ic_newstate;
11321 	ic->ic_newstate = iwx_newstate;
11322 	ic->ic_updatechan = iwx_updatechan;
11323 	ic->ic_updateprot = iwx_updateprot;
11324 	ic->ic_updateslot = iwx_updateslot;
11325 	ic->ic_updateedca = iwx_updateedca;
11326 	ic->ic_updatedtim = iwx_updatedtim;
11327 	ic->ic_ampdu_rx_start = iwx_ampdu_rx_start;
11328 	ic->ic_ampdu_rx_stop = iwx_ampdu_rx_stop;
11329 	ic->ic_ampdu_tx_start = iwx_ampdu_tx_start;
11330 	ic->ic_ampdu_tx_stop = NULL;
11331 	/*
11332 	 * We cannot read the MAC address without loading the
11333 	 * firmware from disk. Postpone until mountroot is done.
11334 	 */
11335 	config_mountroot(self, iwx_attach_hook);
11336 
11337 	return;
11338 
11339 fail4:	while (--txq_i >= 0)
11340 		iwx_free_tx_ring(sc, &sc->txq[txq_i]);
11341 	iwx_free_rx_ring(sc, &sc->rxq);
11342 	if (sc->ict_dma.vaddr != NULL)
11343 		iwx_dma_contig_free(&sc->ict_dma);
11344 
11345 fail1:	iwx_dma_contig_free(&sc->ctxt_info_dma);
11346 	iwx_dma_contig_free(&sc->prph_scratch_dma);
11347 	iwx_dma_contig_free(&sc->prph_info_dma);
11348 	return;
11349 }
11350 
11351 #if NBPFILTER > 0
11352 void
11353 iwx_radiotap_attach(struct iwx_softc *sc)
11354 {
11355 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
11356 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
11357 
11358 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
11359 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
11360 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWX_RX_RADIOTAP_PRESENT);
11361 
11362 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
11363 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
11364 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWX_TX_RADIOTAP_PRESENT);
11365 }
11366 #endif
11367 
11368 void
11369 iwx_init_task(void *arg1)
11370 {
11371 	struct iwx_softc *sc = arg1;
11372 	struct ifnet *ifp = &sc->sc_ic.ic_if;
11373 	int s = splnet();
11374 	int generation = sc->sc_generation;
11375 	int fatal = (sc->sc_flags & (IWX_FLAG_HW_ERR | IWX_FLAG_RFKILL));
11376 
11377 	rw_enter_write(&sc->ioctl_rwl);
11378 	if (generation != sc->sc_generation) {
11379 		rw_exit(&sc->ioctl_rwl);
11380 		splx(s);
11381 		return;
11382 	}
11383 
11384 	if (ifp->if_flags & IFF_RUNNING)
11385 		iwx_stop(ifp);
11386 	else
11387 		sc->sc_flags &= ~IWX_FLAG_HW_ERR;
11388 
11389 	if (!fatal && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
11390 		iwx_init(ifp);
11391 
11392 	rw_exit(&sc->ioctl_rwl);
11393 	splx(s);
11394 }
11395 
11396 void
11397 iwx_resume(struct iwx_softc *sc)
11398 {
11399 	pcireg_t reg;
11400 
11401 	/*
11402 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
11403 	 * PCI Tx retries from interfering with C3 CPU state.
11404 	 */
11405 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
11406 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
11407 
11408 	if (!sc->sc_msix) {
11409 		/* Hardware bug workaround. */
11410 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
11411 		    PCI_COMMAND_STATUS_REG);
11412 		if (reg & PCI_COMMAND_INTERRUPT_DISABLE)
11413 			reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
11414 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
11415 		    PCI_COMMAND_STATUS_REG, reg);
11416 	}
11417 
11418 	iwx_disable_interrupts(sc);
11419 }
11420 
11421 int
11422 iwx_wakeup(struct iwx_softc *sc)
11423 {
11424 	struct ieee80211com *ic = &sc->sc_ic;
11425 	struct ifnet *ifp = &sc->sc_ic.ic_if;
11426 	int err;
11427 
11428 	rw_enter_write(&sc->ioctl_rwl);
11429 
11430 	err = iwx_start_hw(sc);
11431 	if (err) {
11432 		rw_exit(&sc->ioctl_rwl);
11433 		return err;
11434 	}
11435 
11436 	err = iwx_init_hw(sc);
11437 	if (err) {
11438 		iwx_stop_device(sc);
11439 		rw_exit(&sc->ioctl_rwl);
11440 		return err;
11441 	}
11442 
11443 	refcnt_init(&sc->task_refs);
11444 	ifq_clr_oactive(&ifp->if_snd);
11445 	ifp->if_flags |= IFF_RUNNING;
11446 
11447 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
11448 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
11449 	else
11450 		ieee80211_begin_scan(ifp);
11451 
11452 	rw_exit(&sc->ioctl_rwl);
11453 	return 0;
11454 }
11455 
11456 int
11457 iwx_activate(struct device *self, int act)
11458 {
11459 	struct iwx_softc *sc = (struct iwx_softc *)self;
11460 	struct ifnet *ifp = &sc->sc_ic.ic_if;
11461 	int err = 0;
11462 
11463 	switch (act) {
11464 	case DVACT_QUIESCE:
11465 		if (ifp->if_flags & IFF_RUNNING) {
11466 			rw_enter_write(&sc->ioctl_rwl);
11467 			iwx_stop(ifp);
11468 			rw_exit(&sc->ioctl_rwl);
11469 		}
11470 		break;
11471 	case DVACT_RESUME:
11472 		iwx_resume(sc);
11473 		break;
11474 	case DVACT_WAKEUP:
11475 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP) {
11476 			err = iwx_wakeup(sc);
11477 			if (err)
11478 				printf("%s: could not initialize hardware\n",
11479 				    DEVNAME(sc));
11480 		}
11481 		break;
11482 	}
11483 
11484 	return 0;
11485 }
11486 
11487 struct cfdriver iwx_cd = {
11488 	NULL, "iwx", DV_IFNET
11489 };
11490 
11491 const struct cfattach iwx_ca = {
11492 	sizeof(struct iwx_softc), iwx_match, iwx_attach,
11493 	NULL, iwx_activate
11494 };
11495