xref: /openbsd-src/sys/dev/pci/if_iwx.c (revision 8550894424f8a4aa4aafb6cd57229dd6ed7cd9dd)
1 /*	$OpenBSD: if_iwx.c,v 1.152 2023/01/24 16:18:22 stsp Exp $	*/
2 
3 /*
4  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5  *   Author: Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2014 Fixup Software Ltd.
7  * Copyright (c) 2017, 2019, 2020 Stefan Sperling <stsp@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ******************************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2017 Intel Deutschland GmbH
34  * Copyright(c) 2018 - 2019 Intel Corporation
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * BSD LICENSE
46  *
47  * Copyright(c) 2017 Intel Deutschland GmbH
48  * Copyright(c) 2018 - 2019 Intel Corporation
49  * All rights reserved.
50  *
51  * Redistribution and use in source and binary forms, with or without
52  * modification, are permitted provided that the following conditions
53  * are met:
54  *
55  *  * Redistributions of source code must retain the above copyright
56  *    notice, this list of conditions and the following disclaimer.
57  *  * Redistributions in binary form must reproduce the above copyright
58  *    notice, this list of conditions and the following disclaimer in
59  *    the documentation and/or other materials provided with the
60  *    distribution.
61  *  * Neither the name Intel Corporation nor the names of its
62  *    contributors may be used to endorse or promote products derived
63  *    from this software without specific prior written permission.
64  *
65  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
66  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
67  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
68  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
69  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
70  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
71  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
72  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
73  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
74  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
75  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
76  *
77  *****************************************************************************
78  */
79 
80 /*-
81  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
82  *
83  * Permission to use, copy, modify, and distribute this software for any
84  * purpose with or without fee is hereby granted, provided that the above
85  * copyright notice and this permission notice appear in all copies.
86  *
87  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
88  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
89  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
90  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
91  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
92  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
93  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
94  */
95 
96 #include "bpfilter.h"
97 
98 #include <sys/param.h>
99 #include <sys/conf.h>
100 #include <sys/kernel.h>
101 #include <sys/malloc.h>
102 #include <sys/mbuf.h>
103 #include <sys/mutex.h>
104 #include <sys/proc.h>
105 #include <sys/rwlock.h>
106 #include <sys/socket.h>
107 #include <sys/sockio.h>
108 #include <sys/systm.h>
109 #include <sys/endian.h>
110 
111 #include <sys/refcnt.h>
112 #include <sys/task.h>
113 #include <machine/bus.h>
114 #include <machine/intr.h>
115 
116 #include <dev/pci/pcireg.h>
117 #include <dev/pci/pcivar.h>
118 #include <dev/pci/pcidevs.h>
119 
120 #if NBPFILTER > 0
121 #include <net/bpf.h>
122 #endif
123 #include <net/if.h>
124 #include <net/if_dl.h>
125 #include <net/if_media.h>
126 
127 #include <netinet/in.h>
128 #include <netinet/if_ether.h>
129 
130 #include <net80211/ieee80211_var.h>
131 #include <net80211/ieee80211_radiotap.h>
132 #include <net80211/ieee80211_priv.h> /* for SEQ_LT */
133 #undef DPRINTF /* defined in ieee80211_priv.h */
134 
135 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
136 
137 #define IC2IFP(_ic_) (&(_ic_)->ic_if)
138 
139 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
140 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
141 
142 #ifdef IWX_DEBUG
143 #define DPRINTF(x)	do { if (iwx_debug > 0) printf x; } while (0)
144 #define DPRINTFN(n, x)	do { if (iwx_debug >= (n)) printf x; } while (0)
145 int iwx_debug = 1;
146 #else
147 #define DPRINTF(x)	do { ; } while (0)
148 #define DPRINTFN(n, x)	do { ; } while (0)
149 #endif
150 
151 #include <dev/pci/if_iwxreg.h>
152 #include <dev/pci/if_iwxvar.h>
153 
154 const uint8_t iwx_nvm_channels_8000[] = {
155 	/* 2.4 GHz */
156 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
157 	/* 5 GHz */
158 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
159 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
160 	149, 153, 157, 161, 165, 169, 173, 177, 181
161 };
162 
163 static const uint8_t iwx_nvm_channels_uhb[] = {
164 	/* 2.4 GHz */
165 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
166 	/* 5 GHz */
167 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
168 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
169 	149, 153, 157, 161, 165, 169, 173, 177, 181,
170 	/* 6-7 GHz */
171 	1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69,
172 	73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129,
173 	133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185,
174 	189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233
175 };
176 
177 #define IWX_NUM_2GHZ_CHANNELS	14
178 #define IWX_NUM_5GHZ_CHANNELS	37
179 
180 const struct iwx_rate {
181 	uint16_t rate;
182 	uint8_t plcp;
183 	uint8_t ht_plcp;
184 } iwx_rates[] = {
185 		/* Legacy */		/* HT */
186 	{   2,	IWX_RATE_1M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
187 	{   4,	IWX_RATE_2M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP },
188 	{  11,	IWX_RATE_5M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
189 	{  22,	IWX_RATE_11M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP },
190 	{  12,	IWX_RATE_6M_PLCP,	IWX_RATE_HT_SISO_MCS_0_PLCP },
191 	{  18,	IWX_RATE_9M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
192 	{  24,	IWX_RATE_12M_PLCP,	IWX_RATE_HT_SISO_MCS_1_PLCP },
193 	{  26,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_8_PLCP },
194 	{  36,	IWX_RATE_18M_PLCP,	IWX_RATE_HT_SISO_MCS_2_PLCP },
195 	{  48,	IWX_RATE_24M_PLCP,	IWX_RATE_HT_SISO_MCS_3_PLCP },
196 	{  52,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_9_PLCP },
197 	{  72,	IWX_RATE_36M_PLCP,	IWX_RATE_HT_SISO_MCS_4_PLCP },
198 	{  78,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_10_PLCP },
199 	{  96,	IWX_RATE_48M_PLCP,	IWX_RATE_HT_SISO_MCS_5_PLCP },
200 	{ 104,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_11_PLCP },
201 	{ 108,	IWX_RATE_54M_PLCP,	IWX_RATE_HT_SISO_MCS_6_PLCP },
202 	{ 128,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_SISO_MCS_7_PLCP },
203 	{ 156,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_12_PLCP },
204 	{ 208,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_13_PLCP },
205 	{ 234,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_14_PLCP },
206 	{ 260,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_15_PLCP },
207 };
208 #define IWX_RIDX_CCK	0
209 #define IWX_RIDX_OFDM	4
210 #define IWX_RIDX_MAX	(nitems(iwx_rates)-1)
211 #define IWX_RIDX_IS_CCK(_i_) ((_i_) < IWX_RIDX_OFDM)
212 #define IWX_RIDX_IS_OFDM(_i_) ((_i_) >= IWX_RIDX_OFDM)
213 #define IWX_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
214 
215 /* Convert an MCS index into an iwx_rates[] index. */
216 const int iwx_mcs2ridx[] = {
217 	IWX_RATE_MCS_0_INDEX,
218 	IWX_RATE_MCS_1_INDEX,
219 	IWX_RATE_MCS_2_INDEX,
220 	IWX_RATE_MCS_3_INDEX,
221 	IWX_RATE_MCS_4_INDEX,
222 	IWX_RATE_MCS_5_INDEX,
223 	IWX_RATE_MCS_6_INDEX,
224 	IWX_RATE_MCS_7_INDEX,
225 	IWX_RATE_MCS_8_INDEX,
226 	IWX_RATE_MCS_9_INDEX,
227 	IWX_RATE_MCS_10_INDEX,
228 	IWX_RATE_MCS_11_INDEX,
229 	IWX_RATE_MCS_12_INDEX,
230 	IWX_RATE_MCS_13_INDEX,
231 	IWX_RATE_MCS_14_INDEX,
232 	IWX_RATE_MCS_15_INDEX,
233 };
234 
235 uint8_t	iwx_lookup_cmd_ver(struct iwx_softc *, uint8_t, uint8_t);
236 uint8_t	iwx_lookup_notif_ver(struct iwx_softc *, uint8_t, uint8_t);
237 int	iwx_is_mimo_ht_plcp(uint8_t);
238 int	iwx_is_mimo_mcs(int);
239 int	iwx_store_cscheme(struct iwx_softc *, uint8_t *, size_t);
240 int	iwx_alloc_fw_monitor_block(struct iwx_softc *, uint8_t, uint8_t);
241 int	iwx_alloc_fw_monitor(struct iwx_softc *, uint8_t);
242 int	iwx_apply_debug_destination(struct iwx_softc *);
243 void	iwx_set_ltr(struct iwx_softc *);
244 int	iwx_ctxt_info_init(struct iwx_softc *, const struct iwx_fw_sects *);
245 int	iwx_ctxt_info_gen3_init(struct iwx_softc *,
246 	    const struct iwx_fw_sects *);
247 void	iwx_ctxt_info_free_fw_img(struct iwx_softc *);
248 void	iwx_ctxt_info_free_paging(struct iwx_softc *);
249 int	iwx_init_fw_sec(struct iwx_softc *, const struct iwx_fw_sects *,
250 	    struct iwx_context_info_dram *);
251 void	iwx_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
252 int	iwx_firmware_store_section(struct iwx_softc *, enum iwx_ucode_type,
253 	    uint8_t *, size_t);
254 int	iwx_set_default_calib(struct iwx_softc *, const void *);
255 void	iwx_fw_info_free(struct iwx_fw_info *);
256 int	iwx_read_firmware(struct iwx_softc *);
257 uint32_t iwx_prph_addr_mask(struct iwx_softc *);
258 uint32_t iwx_read_prph_unlocked(struct iwx_softc *, uint32_t);
259 uint32_t iwx_read_prph(struct iwx_softc *, uint32_t);
260 void	iwx_write_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t);
261 void	iwx_write_prph(struct iwx_softc *, uint32_t, uint32_t);
262 uint32_t iwx_read_umac_prph_unlocked(struct iwx_softc *, uint32_t);
263 uint32_t iwx_read_umac_prph(struct iwx_softc *, uint32_t);
264 void	iwx_write_umac_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t);
265 void	iwx_write_umac_prph(struct iwx_softc *, uint32_t, uint32_t);
266 int	iwx_read_mem(struct iwx_softc *, uint32_t, void *, int);
267 int	iwx_write_mem(struct iwx_softc *, uint32_t, const void *, int);
268 int	iwx_write_mem32(struct iwx_softc *, uint32_t, uint32_t);
269 int	iwx_poll_bit(struct iwx_softc *, int, uint32_t, uint32_t, int);
270 int	iwx_nic_lock(struct iwx_softc *);
271 void	iwx_nic_assert_locked(struct iwx_softc *);
272 void	iwx_nic_unlock(struct iwx_softc *);
273 int	iwx_set_bits_mask_prph(struct iwx_softc *, uint32_t, uint32_t,
274 	    uint32_t);
275 int	iwx_set_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
276 int	iwx_clear_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
277 int	iwx_dma_contig_alloc(bus_dma_tag_t, struct iwx_dma_info *, bus_size_t,
278 	    bus_size_t);
279 void	iwx_dma_contig_free(struct iwx_dma_info *);
280 int	iwx_alloc_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
281 void	iwx_disable_rx_dma(struct iwx_softc *);
282 void	iwx_reset_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
283 void	iwx_free_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
284 int	iwx_alloc_tx_ring(struct iwx_softc *, struct iwx_tx_ring *, int);
285 void	iwx_reset_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
286 void	iwx_free_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
287 void	iwx_enable_rfkill_int(struct iwx_softc *);
288 int	iwx_check_rfkill(struct iwx_softc *);
289 void	iwx_enable_interrupts(struct iwx_softc *);
290 void	iwx_enable_fwload_interrupt(struct iwx_softc *);
291 void	iwx_restore_interrupts(struct iwx_softc *);
292 void	iwx_disable_interrupts(struct iwx_softc *);
293 void	iwx_ict_reset(struct iwx_softc *);
294 int	iwx_set_hw_ready(struct iwx_softc *);
295 int	iwx_prepare_card_hw(struct iwx_softc *);
296 int	iwx_force_power_gating(struct iwx_softc *);
297 void	iwx_apm_config(struct iwx_softc *);
298 int	iwx_apm_init(struct iwx_softc *);
299 void	iwx_apm_stop(struct iwx_softc *);
300 int	iwx_allow_mcast(struct iwx_softc *);
301 void	iwx_init_msix_hw(struct iwx_softc *);
302 void	iwx_conf_msix_hw(struct iwx_softc *, int);
303 int	iwx_clear_persistence_bit(struct iwx_softc *);
304 int	iwx_start_hw(struct iwx_softc *);
305 void	iwx_stop_device(struct iwx_softc *);
306 void	iwx_nic_config(struct iwx_softc *);
307 int	iwx_nic_rx_init(struct iwx_softc *);
308 int	iwx_nic_init(struct iwx_softc *);
309 int	iwx_enable_txq(struct iwx_softc *, int, int, int, int);
310 int	iwx_disable_txq(struct iwx_softc *sc, int, int, uint8_t);
311 void	iwx_post_alive(struct iwx_softc *);
312 int	iwx_schedule_session_protection(struct iwx_softc *, struct iwx_node *,
313 	    uint32_t);
314 void	iwx_unprotect_session(struct iwx_softc *, struct iwx_node *);
315 void	iwx_init_channel_map(struct iwx_softc *, uint16_t *, uint32_t *, int);
316 void	iwx_setup_ht_rates(struct iwx_softc *);
317 void	iwx_setup_vht_rates(struct iwx_softc *);
318 int	iwx_mimo_enabled(struct iwx_softc *);
319 void	iwx_mac_ctxt_task(void *);
320 void	iwx_phy_ctxt_task(void *);
321 void	iwx_updatechan(struct ieee80211com *);
322 void	iwx_updateprot(struct ieee80211com *);
323 void	iwx_updateslot(struct ieee80211com *);
324 void	iwx_updateedca(struct ieee80211com *);
325 void	iwx_updatedtim(struct ieee80211com *);
326 void	iwx_init_reorder_buffer(struct iwx_reorder_buffer *, uint16_t,
327 	    uint16_t);
328 void	iwx_clear_reorder_buffer(struct iwx_softc *, struct iwx_rxba_data *);
329 int	iwx_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
330 	    uint8_t);
331 void	iwx_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
332 	    uint8_t);
333 int	iwx_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
334 	    uint8_t);
335 void	iwx_rx_ba_session_expired(void *);
336 void	iwx_rx_bar_frame_release(struct iwx_softc *, struct iwx_rx_packet *,
337 	    struct mbuf_list *);
338 void	iwx_reorder_timer_expired(void *);
339 void	iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t,
340 	    uint16_t, uint16_t, int, int);
341 void	iwx_sta_tx_agg_start(struct iwx_softc *, struct ieee80211_node *,
342 	    uint8_t);
343 void	iwx_ba_task(void *);
344 
345 void	iwx_set_mac_addr_from_csr(struct iwx_softc *, struct iwx_nvm_data *);
346 int	iwx_is_valid_mac_addr(const uint8_t *);
347 void	iwx_flip_hw_address(uint32_t, uint32_t, uint8_t *);
348 int	iwx_nvm_get(struct iwx_softc *);
349 int	iwx_load_firmware(struct iwx_softc *);
350 int	iwx_start_fw(struct iwx_softc *);
351 int	iwx_pnvm_handle_section(struct iwx_softc *, const uint8_t *, size_t);
352 int	iwx_pnvm_parse(struct iwx_softc *, const uint8_t *, size_t);
353 void	iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *);
354 int	iwx_load_pnvm(struct iwx_softc *);
355 int	iwx_send_tx_ant_cfg(struct iwx_softc *, uint8_t);
356 int	iwx_send_phy_cfg_cmd(struct iwx_softc *);
357 int	iwx_load_ucode_wait_alive(struct iwx_softc *);
358 int	iwx_send_dqa_cmd(struct iwx_softc *);
359 int	iwx_run_init_mvm_ucode(struct iwx_softc *, int);
360 int	iwx_config_ltr(struct iwx_softc *);
361 void	iwx_update_rx_desc(struct iwx_softc *, struct iwx_rx_ring *, int);
362 int	iwx_rx_addbuf(struct iwx_softc *, int, int);
363 int	iwx_rxmq_get_signal_strength(struct iwx_softc *, struct iwx_rx_mpdu_desc *);
364 void	iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *,
365 	    struct iwx_rx_data *);
366 int	iwx_get_noise(const struct iwx_statistics_rx_non_phy *);
367 int	iwx_rx_hwdecrypt(struct iwx_softc *, struct mbuf *, uint32_t,
368 	    struct ieee80211_rxinfo *);
369 int	iwx_ccmp_decap(struct iwx_softc *, struct mbuf *,
370 	    struct ieee80211_node *, struct ieee80211_rxinfo *);
371 void	iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, uint32_t, int, int,
372 	    uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
373 void	iwx_clear_tx_desc(struct iwx_softc *, struct iwx_tx_ring *, int);
374 void	iwx_txd_done(struct iwx_softc *, struct iwx_tx_data *);
375 void	iwx_txq_advance(struct iwx_softc *, struct iwx_tx_ring *, uint16_t);
376 void	iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *,
377 	    struct iwx_rx_data *);
378 void	iwx_clear_oactive(struct iwx_softc *, struct iwx_tx_ring *);
379 void	iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *,
380 	    struct iwx_rx_data *);
381 int	iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t);
382 uint8_t	iwx_get_vht_ctrl_pos(struct ieee80211com *, struct ieee80211_channel *);
383 int	iwx_phy_ctxt_cmd_uhb_v3(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
384 	    uint8_t, uint32_t, uint8_t, uint8_t);
385 int	iwx_phy_ctxt_cmd_v3(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
386 	    uint8_t, uint32_t, uint8_t, uint8_t);
387 int	iwx_phy_ctxt_cmd(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
388 	    uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
389 int	iwx_send_cmd(struct iwx_softc *, struct iwx_host_cmd *);
390 int	iwx_send_cmd_pdu(struct iwx_softc *, uint32_t, uint32_t, uint16_t,
391 	    const void *);
392 int	iwx_send_cmd_status(struct iwx_softc *, struct iwx_host_cmd *,
393 	    uint32_t *);
394 int	iwx_send_cmd_pdu_status(struct iwx_softc *, uint32_t, uint16_t,
395 	    const void *, uint32_t *);
396 void	iwx_free_resp(struct iwx_softc *, struct iwx_host_cmd *);
397 void	iwx_cmd_done(struct iwx_softc *, int, int, int);
398 const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *, struct iwx_node *,
399 	    struct ieee80211_frame *, uint16_t *, uint32_t *);
400 void	iwx_tx_update_byte_tbl(struct iwx_softc *, struct iwx_tx_ring *, int,
401 	    uint16_t, uint16_t);
402 int	iwx_tx(struct iwx_softc *, struct mbuf *, struct ieee80211_node *);
403 int	iwx_flush_sta_tids(struct iwx_softc *, int, uint16_t);
404 int	iwx_wait_tx_queues_empty(struct iwx_softc *);
405 int	iwx_drain_sta(struct iwx_softc *sc, struct iwx_node *, int);
406 int	iwx_flush_sta(struct iwx_softc *, struct iwx_node *);
407 int	iwx_beacon_filter_send_cmd(struct iwx_softc *,
408 	    struct iwx_beacon_filter_cmd *);
409 int	iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *, int);
410 void	iwx_power_build_cmd(struct iwx_softc *, struct iwx_node *,
411 	    struct iwx_mac_power_cmd *);
412 int	iwx_power_mac_update_mode(struct iwx_softc *, struct iwx_node *);
413 int	iwx_power_update_device(struct iwx_softc *);
414 int	iwx_enable_beacon_filter(struct iwx_softc *, struct iwx_node *);
415 int	iwx_disable_beacon_filter(struct iwx_softc *);
416 int	iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int);
417 int	iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *);
418 int	iwx_rm_sta(struct iwx_softc *, struct iwx_node *);
419 int	iwx_fill_probe_req(struct iwx_softc *, struct iwx_scan_probe_req *);
420 int	iwx_config_umac_scan_reduced(struct iwx_softc *);
421 uint16_t iwx_scan_umac_flags_v2(struct iwx_softc *, int);
422 void	iwx_scan_umac_dwell_v10(struct iwx_softc *,
423 	    struct iwx_scan_general_params_v10 *, int);
424 void	iwx_scan_umac_fill_general_p_v10(struct iwx_softc *,
425 	    struct iwx_scan_general_params_v10 *, uint16_t, int);
426 void	iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *,
427 	    struct iwx_scan_channel_params_v6 *, uint32_t, int, int);
428 int	iwx_umac_scan_v14(struct iwx_softc *, int);
429 void	iwx_mcc_update(struct iwx_softc *, struct iwx_mcc_chub_notif *);
430 uint8_t	iwx_ridx2rate(struct ieee80211_rateset *, int);
431 int	iwx_rval2ridx(int);
432 void	iwx_ack_rates(struct iwx_softc *, struct iwx_node *, int *, int *);
433 void	iwx_mac_ctxt_cmd_common(struct iwx_softc *, struct iwx_node *,
434 	    struct iwx_mac_ctx_cmd *, uint32_t);
435 void	iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *, struct iwx_node *,
436 	    struct iwx_mac_data_sta *, int);
437 int	iwx_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *, uint32_t, int);
438 int	iwx_clear_statistics(struct iwx_softc *);
439 void	iwx_add_task(struct iwx_softc *, struct taskq *, struct task *);
440 void	iwx_del_task(struct iwx_softc *, struct taskq *, struct task *);
441 int	iwx_scan(struct iwx_softc *);
442 int	iwx_bgscan(struct ieee80211com *);
443 void	iwx_bgscan_done(struct ieee80211com *,
444 	    struct ieee80211_node_switch_bss_arg *, size_t);
445 void	iwx_bgscan_done_task(void *);
446 int	iwx_umac_scan_abort(struct iwx_softc *);
447 int	iwx_scan_abort(struct iwx_softc *);
448 int	iwx_enable_mgmt_queue(struct iwx_softc *);
449 int	iwx_rs_rval2idx(uint8_t);
450 uint16_t iwx_rs_ht_rates(struct iwx_softc *, struct ieee80211_node *, int);
451 uint16_t iwx_rs_vht_rates(struct iwx_softc *, struct ieee80211_node *, int);
452 int	iwx_rs_init(struct iwx_softc *, struct iwx_node *);
453 int	iwx_enable_data_tx_queues(struct iwx_softc *);
454 int	iwx_phy_ctxt_update(struct iwx_softc *, struct iwx_phy_ctxt *,
455 	    struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t,
456 	    uint8_t);
457 int	iwx_auth(struct iwx_softc *);
458 int	iwx_deauth(struct iwx_softc *);
459 int	iwx_run(struct iwx_softc *);
460 int	iwx_run_stop(struct iwx_softc *);
461 struct ieee80211_node *iwx_node_alloc(struct ieee80211com *);
462 int	iwx_set_key(struct ieee80211com *, struct ieee80211_node *,
463 	    struct ieee80211_key *);
464 void	iwx_setkey_task(void *);
465 void	iwx_delete_key(struct ieee80211com *,
466 	    struct ieee80211_node *, struct ieee80211_key *);
467 int	iwx_media_change(struct ifnet *);
468 void	iwx_newstate_task(void *);
469 int	iwx_newstate(struct ieee80211com *, enum ieee80211_state, int);
470 void	iwx_endscan(struct iwx_softc *);
471 void	iwx_fill_sf_command(struct iwx_softc *, struct iwx_sf_cfg_cmd *,
472 	    struct ieee80211_node *);
473 int	iwx_sf_config(struct iwx_softc *, int);
474 int	iwx_send_bt_init_conf(struct iwx_softc *);
475 int	iwx_send_soc_conf(struct iwx_softc *);
476 int	iwx_send_update_mcc_cmd(struct iwx_softc *, const char *);
477 int	iwx_send_temp_report_ths_cmd(struct iwx_softc *);
478 int	iwx_init_hw(struct iwx_softc *);
479 int	iwx_init(struct ifnet *);
480 void	iwx_start(struct ifnet *);
481 void	iwx_stop(struct ifnet *);
482 void	iwx_watchdog(struct ifnet *);
483 int	iwx_ioctl(struct ifnet *, u_long, caddr_t);
484 const char *iwx_desc_lookup(uint32_t);
485 void	iwx_nic_error(struct iwx_softc *);
486 void	iwx_dump_driver_status(struct iwx_softc *);
487 void	iwx_nic_umac_error(struct iwx_softc *);
488 int	iwx_detect_duplicate(struct iwx_softc *, struct mbuf *,
489 	    struct iwx_rx_mpdu_desc *, struct ieee80211_rxinfo *);
490 int	iwx_is_sn_less(uint16_t, uint16_t, uint16_t);
491 void	iwx_release_frames(struct iwx_softc *, struct ieee80211_node *,
492 	    struct iwx_rxba_data *, struct iwx_reorder_buffer *, uint16_t,
493 	    struct mbuf_list *);
494 int	iwx_oldsn_workaround(struct iwx_softc *, struct ieee80211_node *,
495 	    int, struct iwx_reorder_buffer *, uint32_t, uint32_t);
496 int	iwx_rx_reorder(struct iwx_softc *, struct mbuf *, int,
497 	    struct iwx_rx_mpdu_desc *, int, int, uint32_t,
498 	    struct ieee80211_rxinfo *, struct mbuf_list *);
499 void	iwx_rx_mpdu_mq(struct iwx_softc *, struct mbuf *, void *, size_t,
500 	    struct mbuf_list *);
501 int	iwx_rx_pkt_valid(struct iwx_rx_packet *);
502 void	iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *,
503 	    struct mbuf_list *);
504 void	iwx_notif_intr(struct iwx_softc *);
505 int	iwx_intr(void *);
506 int	iwx_intr_msix(void *);
507 int	iwx_match(struct device *, void *, void *);
508 int	iwx_preinit(struct iwx_softc *);
509 void	iwx_attach_hook(struct device *);
510 const struct iwx_device_cfg *iwx_find_device_cfg(struct iwx_softc *);
511 void	iwx_attach(struct device *, struct device *, void *);
512 void	iwx_init_task(void *);
513 int	iwx_activate(struct device *, int);
514 void	iwx_resume(struct iwx_softc *);
515 int	iwx_wakeup(struct iwx_softc *);
516 
517 #if NBPFILTER > 0
518 void	iwx_radiotap_attach(struct iwx_softc *);
519 #endif
520 
521 uint8_t
522 iwx_lookup_cmd_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
523 {
524 	const struct iwx_fw_cmd_version *entry;
525 	int i;
526 
527 	for (i = 0; i < sc->n_cmd_versions; i++) {
528 		entry = &sc->cmd_versions[i];
529 		if (entry->group == grp && entry->cmd == cmd)
530 			return entry->cmd_ver;
531 	}
532 
533 	return IWX_FW_CMD_VER_UNKNOWN;
534 }
535 
536 uint8_t
537 iwx_lookup_notif_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
538 {
539 	const struct iwx_fw_cmd_version *entry;
540 	int i;
541 
542 	for (i = 0; i < sc->n_cmd_versions; i++) {
543 		entry = &sc->cmd_versions[i];
544 		if (entry->group == grp && entry->cmd == cmd)
545 			return entry->notif_ver;
546 	}
547 
548 	return IWX_FW_CMD_VER_UNKNOWN;
549 }
550 
551 int
552 iwx_is_mimo_ht_plcp(uint8_t ht_plcp)
553 {
554 	return (ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP &&
555 	    (ht_plcp & IWX_RATE_HT_MCS_NSS_MSK));
556 }
557 
558 int
559 iwx_is_mimo_mcs(int mcs)
560 {
561 	int ridx = iwx_mcs2ridx[mcs];
562 	return iwx_is_mimo_ht_plcp(iwx_rates[ridx].ht_plcp);
563 
564 }
565 
566 int
567 iwx_store_cscheme(struct iwx_softc *sc, uint8_t *data, size_t dlen)
568 {
569 	struct iwx_fw_cscheme_list *l = (void *)data;
570 
571 	if (dlen < sizeof(*l) ||
572 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
573 		return EINVAL;
574 
575 	/* we don't actually store anything for now, always use s/w crypto */
576 
577 	return 0;
578 }
579 
580 int
581 iwx_ctxt_info_alloc_dma(struct iwx_softc *sc,
582     const struct iwx_fw_onesect *sec, struct iwx_dma_info *dram)
583 {
584 	int err = iwx_dma_contig_alloc(sc->sc_dmat, dram, sec->fws_len, 0);
585 	if (err) {
586 		printf("%s: could not allocate context info DMA memory\n",
587 		    DEVNAME(sc));
588 		return err;
589 	}
590 
591 	memcpy(dram->vaddr, sec->fws_data, sec->fws_len);
592 
593 	return 0;
594 }
595 
596 void iwx_ctxt_info_free_paging(struct iwx_softc *sc)
597 {
598 	struct iwx_self_init_dram *dram = &sc->init_dram;
599 	int i;
600 
601 	if (!dram->paging)
602 		return;
603 
604 	/* free paging*/
605 	for (i = 0; i < dram->paging_cnt; i++)
606 		iwx_dma_contig_free(&dram->paging[i]);
607 
608 	free(dram->paging, M_DEVBUF, dram->paging_cnt * sizeof(*dram->paging));
609 	dram->paging_cnt = 0;
610 	dram->paging = NULL;
611 }
612 
613 int
614 iwx_get_num_sections(const struct iwx_fw_sects *fws, int start)
615 {
616 	int i = 0;
617 
618 	while (start < fws->fw_count &&
619 	       fws->fw_sect[start].fws_devoff != IWX_CPU1_CPU2_SEPARATOR_SECTION &&
620 	       fws->fw_sect[start].fws_devoff != IWX_PAGING_SEPARATOR_SECTION) {
621 		start++;
622 		i++;
623 	}
624 
625 	return i;
626 }
627 
628 int
629 iwx_init_fw_sec(struct iwx_softc *sc, const struct iwx_fw_sects *fws,
630     struct iwx_context_info_dram *ctxt_dram)
631 {
632 	struct iwx_self_init_dram *dram = &sc->init_dram;
633 	int i, ret, fw_cnt = 0;
634 
635 	KASSERT(dram->paging == NULL);
636 
637 	dram->lmac_cnt = iwx_get_num_sections(fws, 0);
638 	/* add 1 due to separator */
639 	dram->umac_cnt = iwx_get_num_sections(fws, dram->lmac_cnt + 1);
640 	/* add 2 due to separators */
641 	dram->paging_cnt = iwx_get_num_sections(fws,
642 	    dram->lmac_cnt + dram->umac_cnt + 2);
643 
644 	dram->fw = mallocarray(dram->umac_cnt + dram->lmac_cnt,
645 	    sizeof(*dram->fw), M_DEVBUF,  M_ZERO | M_NOWAIT);
646 	if (!dram->fw) {
647 		printf("%s: could not allocate memory for firmware sections\n",
648 		    DEVNAME(sc));
649 		return ENOMEM;
650 	}
651 
652 	dram->paging = mallocarray(dram->paging_cnt, sizeof(*dram->paging),
653 	    M_DEVBUF, M_ZERO | M_NOWAIT);
654 	if (!dram->paging) {
655 		printf("%s: could not allocate memory for firmware paging\n",
656 		    DEVNAME(sc));
657 		return ENOMEM;
658 	}
659 
660 	/* initialize lmac sections */
661 	for (i = 0; i < dram->lmac_cnt; i++) {
662 		ret = iwx_ctxt_info_alloc_dma(sc, &fws->fw_sect[i],
663 						   &dram->fw[fw_cnt]);
664 		if (ret)
665 			return ret;
666 		ctxt_dram->lmac_img[i] =
667 			htole64(dram->fw[fw_cnt].paddr);
668 		DPRINTF(("%s: firmware LMAC section %d at 0x%llx size %lld\n", __func__, i,
669 		    (unsigned long long)dram->fw[fw_cnt].paddr,
670 		    (unsigned long long)dram->fw[fw_cnt].size));
671 		fw_cnt++;
672 	}
673 
674 	/* initialize umac sections */
675 	for (i = 0; i < dram->umac_cnt; i++) {
676 		/* access FW with +1 to make up for lmac separator */
677 		ret = iwx_ctxt_info_alloc_dma(sc,
678 		    &fws->fw_sect[fw_cnt + 1], &dram->fw[fw_cnt]);
679 		if (ret)
680 			return ret;
681 		ctxt_dram->umac_img[i] =
682 			htole64(dram->fw[fw_cnt].paddr);
683 		DPRINTF(("%s: firmware UMAC section %d at 0x%llx size %lld\n", __func__, i,
684 			(unsigned long long)dram->fw[fw_cnt].paddr,
685 			(unsigned long long)dram->fw[fw_cnt].size));
686 		fw_cnt++;
687 	}
688 
689 	/*
690 	 * Initialize paging.
691 	 * Paging memory isn't stored in dram->fw as the umac and lmac - it is
692 	 * stored separately.
693 	 * This is since the timing of its release is different -
694 	 * while fw memory can be released on alive, the paging memory can be
695 	 * freed only when the device goes down.
696 	 * Given that, the logic here in accessing the fw image is a bit
697 	 * different - fw_cnt isn't changing so loop counter is added to it.
698 	 */
699 	for (i = 0; i < dram->paging_cnt; i++) {
700 		/* access FW with +2 to make up for lmac & umac separators */
701 		int fw_idx = fw_cnt + i + 2;
702 
703 		ret = iwx_ctxt_info_alloc_dma(sc,
704 		    &fws->fw_sect[fw_idx], &dram->paging[i]);
705 		if (ret)
706 			return ret;
707 
708 		ctxt_dram->virtual_img[i] = htole64(dram->paging[i].paddr);
709 		DPRINTF(("%s: firmware paging section %d at 0x%llx size %lld\n", __func__, i,
710 		    (unsigned long long)dram->paging[i].paddr,
711 		    (unsigned long long)dram->paging[i].size));
712 	}
713 
714 	return 0;
715 }
716 
717 void
718 iwx_fw_version_str(char *buf, size_t bufsize,
719     uint32_t major, uint32_t minor, uint32_t api)
720 {
721 	/*
722 	 * Starting with major version 35 the Linux driver prints the minor
723 	 * version in hexadecimal.
724 	 */
725 	if (major >= 35)
726 		snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
727 	else
728 		snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
729 }
730 
731 int
732 iwx_alloc_fw_monitor_block(struct iwx_softc *sc, uint8_t max_power,
733     uint8_t min_power)
734 {
735 	struct iwx_dma_info *fw_mon = &sc->fw_mon;
736 	uint32_t size = 0;
737 	uint8_t power;
738 	int err;
739 
740 	if (fw_mon->size)
741 		return 0;
742 
743 	for (power = max_power; power >= min_power; power--) {
744 		size = (1 << power);
745 
746 		err = iwx_dma_contig_alloc(sc->sc_dmat, fw_mon, size, 0);
747 		if (err)
748 			continue;
749 
750 		DPRINTF(("%s: allocated 0x%08x bytes for firmware monitor.\n",
751 			 DEVNAME(sc), size));
752 		break;
753 	}
754 
755 	if (err) {
756 		fw_mon->size = 0;
757 		return err;
758 	}
759 
760 	if (power != max_power)
761 		DPRINTF(("%s: Sorry - debug buffer is only %luK while you requested %luK\n",
762 			DEVNAME(sc), (unsigned long)(1 << (power - 10)),
763 			(unsigned long)(1 << (max_power - 10))));
764 
765 	return 0;
766 }
767 
768 int
769 iwx_alloc_fw_monitor(struct iwx_softc *sc, uint8_t max_power)
770 {
771 	if (!max_power) {
772 		/* default max_power is maximum */
773 		max_power = 26;
774 	} else {
775 		max_power += 11;
776 	}
777 
778 	if (max_power > 26) {
779 		 DPRINTF(("%s: External buffer size for monitor is too big %d, "
780 		     "check the FW TLV\n", DEVNAME(sc), max_power));
781 		return 0;
782 	}
783 
784 	if (sc->fw_mon.size)
785 		return 0;
786 
787 	return iwx_alloc_fw_monitor_block(sc, max_power, 11);
788 }
789 
790 int
791 iwx_apply_debug_destination(struct iwx_softc *sc)
792 {
793 	struct iwx_fw_dbg_dest_tlv_v1 *dest_v1;
794 	int i, err;
795 	uint8_t mon_mode, size_power, base_shift, end_shift;
796 	uint32_t base_reg, end_reg;
797 
798 	dest_v1 = sc->sc_fw.dbg_dest_tlv_v1;
799 	mon_mode = dest_v1->monitor_mode;
800 	size_power = dest_v1->size_power;
801 	base_reg = le32toh(dest_v1->base_reg);
802 	end_reg = le32toh(dest_v1->end_reg);
803 	base_shift = dest_v1->base_shift;
804 	end_shift = dest_v1->end_shift;
805 
806 	DPRINTF(("%s: applying debug destination %d\n", DEVNAME(sc), mon_mode));
807 
808 	if (mon_mode == EXTERNAL_MODE) {
809 		err = iwx_alloc_fw_monitor(sc, size_power);
810 		if (err)
811 			return err;
812 	}
813 
814 	if (!iwx_nic_lock(sc))
815 		return EBUSY;
816 
817 	for (i = 0; i < sc->sc_fw.n_dest_reg; i++) {
818 		uint32_t addr, val;
819 		uint8_t op;
820 
821 		addr = le32toh(dest_v1->reg_ops[i].addr);
822 		val = le32toh(dest_v1->reg_ops[i].val);
823 		op = dest_v1->reg_ops[i].op;
824 
825 		DPRINTF(("%s: op=%u addr=%u val=%u\n", __func__, op, addr, val));
826 		switch (op) {
827 		case CSR_ASSIGN:
828 			IWX_WRITE(sc, addr, val);
829 			break;
830 		case CSR_SETBIT:
831 			IWX_SETBITS(sc, addr, (1 << val));
832 			break;
833 		case CSR_CLEARBIT:
834 			IWX_CLRBITS(sc, addr, (1 << val));
835 			break;
836 		case PRPH_ASSIGN:
837 			iwx_write_prph(sc, addr, val);
838 			break;
839 		case PRPH_SETBIT:
840 			err = iwx_set_bits_prph(sc, addr, (1 << val));
841 			if (err)
842 				return err;
843 			break;
844 		case PRPH_CLEARBIT:
845 			err = iwx_clear_bits_prph(sc, addr, (1 << val));
846 			if (err)
847 				return err;
848 			break;
849 		case PRPH_BLOCKBIT:
850 			if (iwx_read_prph(sc, addr) & (1 << val))
851 				goto monitor;
852 			break;
853 		default:
854 			DPRINTF(("%s: FW debug - unknown OP %d\n",
855 			    DEVNAME(sc), op));
856 			break;
857 		}
858 	}
859 
860 monitor:
861 	if (mon_mode == EXTERNAL_MODE && sc->fw_mon.size) {
862 		iwx_write_prph(sc, le32toh(base_reg),
863 		    sc->fw_mon.paddr >> base_shift);
864 		iwx_write_prph(sc, end_reg,
865 		    (sc->fw_mon.paddr + sc->fw_mon.size - 256)
866 		    >> end_shift);
867 	}
868 
869 	iwx_nic_unlock(sc);
870 	return 0;
871 }
872 
873 void
874 iwx_set_ltr(struct iwx_softc *sc)
875 {
876 	uint32_t ltr_val = IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
877 	    ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
878 	    IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_SHIFT) &
879 	    IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_MASK) |
880 	    ((250 << IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_SHIFT) &
881 	    IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_MASK) |
882 	    IWX_CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
883 	    ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
884 	    IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_SHIFT) &
885 	    IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_MASK) |
886 	    (250 & IWX_CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
887 
888 	/*
889 	 * To workaround hardware latency issues during the boot process,
890 	 * initialize the LTR to ~250 usec (see ltr_val above).
891 	 * The firmware initializes this again later (to a smaller value).
892 	 */
893 	if (!sc->sc_integrated) {
894 		IWX_WRITE(sc, IWX_CSR_LTR_LONG_VAL_AD, ltr_val);
895 	} else if (sc->sc_integrated &&
896 		   sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
897 		iwx_write_prph(sc, IWX_HPM_MAC_LTR_CSR,
898 		    IWX_HPM_MAC_LRT_ENABLE_ALL);
899 		iwx_write_prph(sc, IWX_HPM_UMAC_LTR, ltr_val);
900 	}
901 }
902 
903 int
904 iwx_ctxt_info_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
905 {
906 	struct iwx_context_info *ctxt_info;
907 	struct iwx_context_info_rbd_cfg *rx_cfg;
908 	uint32_t control_flags = 0;
909 	uint64_t paddr;
910 	int err;
911 
912 	ctxt_info = sc->ctxt_info_dma.vaddr;
913 	memset(ctxt_info, 0, sizeof(*ctxt_info));
914 
915 	ctxt_info->version.version = 0;
916 	ctxt_info->version.mac_id =
917 		htole16((uint16_t)IWX_READ(sc, IWX_CSR_HW_REV));
918 	/* size is in DWs */
919 	ctxt_info->version.size = htole16(sizeof(*ctxt_info) / 4);
920 
921 	KASSERT(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF);
922 	control_flags = IWX_CTXT_INFO_TFD_FORMAT_LONG |
923 			(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) <<
924 			 IWX_CTXT_INFO_RB_CB_SIZE_POS) |
925 			(IWX_CTXT_INFO_RB_SIZE_4K << IWX_CTXT_INFO_RB_SIZE_POS);
926 	ctxt_info->control.control_flags = htole32(control_flags);
927 
928 	/* initialize RX default queue */
929 	rx_cfg = &ctxt_info->rbd_cfg;
930 	rx_cfg->free_rbd_addr = htole64(sc->rxq.free_desc_dma.paddr);
931 	rx_cfg->used_rbd_addr = htole64(sc->rxq.used_desc_dma.paddr);
932 	rx_cfg->status_wr_ptr = htole64(sc->rxq.stat_dma.paddr);
933 
934 	/* initialize TX command queue */
935 	ctxt_info->hcmd_cfg.cmd_queue_addr =
936 	    htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
937 	ctxt_info->hcmd_cfg.cmd_queue_size =
938 		IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
939 
940 	/* allocate ucode sections in dram and set addresses */
941 	err = iwx_init_fw_sec(sc, fws, &ctxt_info->dram);
942 	if (err) {
943 		iwx_ctxt_info_free_fw_img(sc);
944 		return err;
945 	}
946 
947 	/* Configure debug, if exists */
948 	if (sc->sc_fw.dbg_dest_tlv_v1) {
949 		err = iwx_apply_debug_destination(sc);
950 		if (err) {
951 			iwx_ctxt_info_free_fw_img(sc);
952 			return err;
953 		}
954 	}
955 
956 	/*
957 	 * Write the context info DMA base address. The device expects a
958 	 * 64-bit address but a simple bus_space_write_8 to this register
959 	 * won't work on some devices, such as the AX201.
960 	 */
961 	paddr = sc->ctxt_info_dma.paddr;
962 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA, paddr & 0xffffffff);
963 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA + 4, paddr >> 32);
964 
965 	/* kick FW self load */
966 	if (!iwx_nic_lock(sc)) {
967 		iwx_ctxt_info_free_fw_img(sc);
968 		return EBUSY;
969 	}
970 
971 	iwx_set_ltr(sc);
972 	iwx_write_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
973 	iwx_nic_unlock(sc);
974 
975 	/* Context info will be released upon alive or failure to get one */
976 
977 	return 0;
978 }
979 
980 int
981 iwx_ctxt_info_gen3_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
982 {
983 	struct iwx_context_info_gen3 *ctxt_info_gen3;
984 	struct iwx_prph_scratch *prph_scratch;
985 	struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
986 	uint16_t cb_size;
987 	uint32_t control_flags, scratch_size;
988 	uint64_t paddr;
989 	int err;
990 
991 	if (sc->sc_fw.iml == NULL || sc->sc_fw.iml_len == 0) {
992 		printf("%s: no image loader found in firmware file\n",
993 		    DEVNAME(sc));
994 		iwx_ctxt_info_free_fw_img(sc);
995 		return EINVAL;
996 	}
997 
998 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->iml_dma,
999 	    sc->sc_fw.iml_len, 0);
1000 	if (err) {
1001 		printf("%s: could not allocate DMA memory for "
1002 		    "firmware image loader\n", DEVNAME(sc));
1003 		iwx_ctxt_info_free_fw_img(sc);
1004 		return ENOMEM;
1005 	}
1006 
1007 	prph_scratch = sc->prph_scratch_dma.vaddr;
1008 	memset(prph_scratch, 0, sizeof(*prph_scratch));
1009 	prph_sc_ctrl = &prph_scratch->ctrl_cfg;
1010 	prph_sc_ctrl->version.version = 0;
1011 	prph_sc_ctrl->version.mac_id = htole16(IWX_READ(sc, IWX_CSR_HW_REV));
1012 	prph_sc_ctrl->version.size = htole16(sizeof(*prph_scratch) / 4);
1013 
1014 	control_flags = IWX_PRPH_SCRATCH_RB_SIZE_4K |
1015 	    IWX_PRPH_SCRATCH_MTR_MODE |
1016 	    (IWX_PRPH_MTR_FORMAT_256B & IWX_PRPH_SCRATCH_MTR_FORMAT);
1017 	if (sc->sc_imr_enabled)
1018 		control_flags |= IWX_PRPH_SCRATCH_IMR_DEBUG_EN;
1019 	prph_sc_ctrl->control.control_flags = htole32(control_flags);
1020 
1021 	/* initialize RX default queue */
1022 	prph_sc_ctrl->rbd_cfg.free_rbd_addr =
1023 	    htole64(sc->rxq.free_desc_dma.paddr);
1024 
1025 	/* allocate ucode sections in dram and set addresses */
1026 	err = iwx_init_fw_sec(sc, fws, &prph_scratch->dram);
1027 	if (err) {
1028 		iwx_dma_contig_free(&sc->iml_dma);
1029 		iwx_ctxt_info_free_fw_img(sc);
1030 		return err;
1031 	}
1032 
1033 	ctxt_info_gen3 = sc->ctxt_info_dma.vaddr;
1034 	memset(ctxt_info_gen3, 0, sizeof(*ctxt_info_gen3));
1035 	ctxt_info_gen3->prph_info_base_addr = htole64(sc->prph_info_dma.paddr);
1036 	ctxt_info_gen3->prph_scratch_base_addr =
1037 	    htole64(sc->prph_scratch_dma.paddr);
1038 	scratch_size = sizeof(*prph_scratch);
1039 	ctxt_info_gen3->prph_scratch_size = htole32(scratch_size);
1040 	ctxt_info_gen3->cr_head_idx_arr_base_addr =
1041 	    htole64(sc->rxq.stat_dma.paddr);
1042 	ctxt_info_gen3->tr_tail_idx_arr_base_addr =
1043 	    htole64(sc->prph_info_dma.paddr + PAGE_SIZE / 2);
1044 	ctxt_info_gen3->cr_tail_idx_arr_base_addr =
1045 	    htole64(sc->prph_info_dma.paddr + 3 * PAGE_SIZE / 4);
1046 	ctxt_info_gen3->mtr_base_addr =
1047 	    htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
1048 	ctxt_info_gen3->mcr_base_addr = htole64(sc->rxq.used_desc_dma.paddr);
1049 	cb_size = IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
1050 	ctxt_info_gen3->mtr_size = htole16(cb_size);
1051 	cb_size = IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE);
1052 	ctxt_info_gen3->mcr_size = htole16(cb_size);
1053 
1054 	memcpy(sc->iml_dma.vaddr, sc->sc_fw.iml, sc->sc_fw.iml_len);
1055 
1056 	paddr = sc->ctxt_info_dma.paddr;
1057 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR, paddr & 0xffffffff);
1058 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR + 4, paddr >> 32);
1059 
1060 	paddr = sc->iml_dma.paddr;
1061 	IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR, paddr & 0xffffffff);
1062 	IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR + 4, paddr >> 32);
1063 	IWX_WRITE(sc, IWX_CSR_IML_SIZE_ADDR, sc->sc_fw.iml_len);
1064 
1065 	IWX_SETBITS(sc, IWX_CSR_CTXT_INFO_BOOT_CTRL,
1066 		    IWX_CSR_AUTO_FUNC_BOOT_ENA);
1067 
1068 	/* kick FW self load */
1069 	if (!iwx_nic_lock(sc)) {
1070 		iwx_dma_contig_free(&sc->iml_dma);
1071 		iwx_ctxt_info_free_fw_img(sc);
1072 		return EBUSY;
1073 	}
1074 	iwx_set_ltr(sc);
1075 	iwx_write_umac_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
1076 	iwx_nic_unlock(sc);
1077 
1078 	/* Context info will be released upon alive or failure to get one */
1079 	return 0;
1080 }
1081 
1082 void
1083 iwx_ctxt_info_free_fw_img(struct iwx_softc *sc)
1084 {
1085 	struct iwx_self_init_dram *dram = &sc->init_dram;
1086 	int i;
1087 
1088 	if (!dram->fw)
1089 		return;
1090 
1091 	for (i = 0; i < dram->lmac_cnt + dram->umac_cnt; i++)
1092 		iwx_dma_contig_free(&dram->fw[i]);
1093 
1094 	free(dram->fw, M_DEVBUF,
1095 	    (dram->lmac_cnt + dram->umac_cnt) * sizeof(*dram->fw));
1096 	dram->lmac_cnt = 0;
1097 	dram->umac_cnt = 0;
1098 	dram->fw = NULL;
1099 }
1100 
1101 int
1102 iwx_firmware_store_section(struct iwx_softc *sc, enum iwx_ucode_type type,
1103     uint8_t *data, size_t dlen)
1104 {
1105 	struct iwx_fw_sects *fws;
1106 	struct iwx_fw_onesect *fwone;
1107 
1108 	if (type >= IWX_UCODE_TYPE_MAX)
1109 		return EINVAL;
1110 	if (dlen < sizeof(uint32_t))
1111 		return EINVAL;
1112 
1113 	fws = &sc->sc_fw.fw_sects[type];
1114 	DPRINTF(("%s: ucode type %d section %d\n", DEVNAME(sc), type, fws->fw_count));
1115 	if (fws->fw_count >= IWX_UCODE_SECT_MAX)
1116 		return EINVAL;
1117 
1118 	fwone = &fws->fw_sect[fws->fw_count];
1119 
1120 	/* first 32bit are device load offset */
1121 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
1122 
1123 	/* rest is data */
1124 	fwone->fws_data = data + sizeof(uint32_t);
1125 	fwone->fws_len = dlen - sizeof(uint32_t);
1126 
1127 	fws->fw_count++;
1128 	fws->fw_totlen += fwone->fws_len;
1129 
1130 	return 0;
1131 }
1132 
1133 #define IWX_DEFAULT_SCAN_CHANNELS	40
1134 /* Newer firmware might support more channels. Raise this value if needed. */
1135 #define IWX_MAX_SCAN_CHANNELS		67 /* as of iwx-cc-a0-62 firmware */
1136 
1137 struct iwx_tlv_calib_data {
1138 	uint32_t ucode_type;
1139 	struct iwx_tlv_calib_ctrl calib;
1140 } __packed;
1141 
1142 int
1143 iwx_set_default_calib(struct iwx_softc *sc, const void *data)
1144 {
1145 	const struct iwx_tlv_calib_data *def_calib = data;
1146 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
1147 
1148 	if (ucode_type >= IWX_UCODE_TYPE_MAX)
1149 		return EINVAL;
1150 
1151 	sc->sc_default_calib[ucode_type].flow_trigger =
1152 	    def_calib->calib.flow_trigger;
1153 	sc->sc_default_calib[ucode_type].event_trigger =
1154 	    def_calib->calib.event_trigger;
1155 
1156 	return 0;
1157 }
1158 
1159 void
1160 iwx_fw_info_free(struct iwx_fw_info *fw)
1161 {
1162 	free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
1163 	fw->fw_rawdata = NULL;
1164 	fw->fw_rawsize = 0;
1165 	/* don't touch fw->fw_status */
1166 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
1167 	free(fw->iml, M_DEVBUF, fw->iml_len);
1168 	fw->iml = NULL;
1169 	fw->iml_len = 0;
1170 }
1171 
1172 #define IWX_FW_ADDR_CACHE_CONTROL 0xC0000000
1173 
1174 int
1175 iwx_read_firmware(struct iwx_softc *sc)
1176 {
1177 	struct ieee80211com *ic = &sc->sc_ic;
1178 	struct iwx_fw_info *fw = &sc->sc_fw;
1179 	struct iwx_tlv_ucode_header *uhdr;
1180 	struct iwx_ucode_tlv tlv;
1181 	uint32_t tlv_type;
1182 	uint8_t *data;
1183 	int err;
1184 	size_t len;
1185 
1186 	if (fw->fw_status == IWX_FW_STATUS_DONE)
1187 		return 0;
1188 
1189 	while (fw->fw_status == IWX_FW_STATUS_INPROGRESS)
1190 		tsleep_nsec(&sc->sc_fw, 0, "iwxfwp", INFSLP);
1191 	fw->fw_status = IWX_FW_STATUS_INPROGRESS;
1192 
1193 	if (fw->fw_rawdata != NULL)
1194 		iwx_fw_info_free(fw);
1195 
1196 	err = loadfirmware(sc->sc_fwname,
1197 	    (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
1198 	if (err) {
1199 		printf("%s: could not read firmware %s (error %d)\n",
1200 		    DEVNAME(sc), sc->sc_fwname, err);
1201 		goto out;
1202 	}
1203 
1204 	if (ic->ic_if.if_flags & IFF_DEBUG)
1205 		printf("%s: using firmware %s\n", DEVNAME(sc), sc->sc_fwname);
1206 
1207 	sc->sc_capaflags = 0;
1208 	sc->sc_capa_n_scan_channels = IWX_DEFAULT_SCAN_CHANNELS;
1209 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
1210 	memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api));
1211 	sc->n_cmd_versions = 0;
1212 
1213 	uhdr = (void *)fw->fw_rawdata;
1214 	if (*(uint32_t *)fw->fw_rawdata != 0
1215 	    || le32toh(uhdr->magic) != IWX_TLV_UCODE_MAGIC) {
1216 		printf("%s: invalid firmware %s\n",
1217 		    DEVNAME(sc), sc->sc_fwname);
1218 		err = EINVAL;
1219 		goto out;
1220 	}
1221 
1222 	iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1223 	    IWX_UCODE_MAJOR(le32toh(uhdr->ver)),
1224 	    IWX_UCODE_MINOR(le32toh(uhdr->ver)),
1225 	    IWX_UCODE_API(le32toh(uhdr->ver)));
1226 
1227 	data = uhdr->data;
1228 	len = fw->fw_rawsize - sizeof(*uhdr);
1229 
1230 	while (len >= sizeof(tlv)) {
1231 		size_t tlv_len;
1232 		void *tlv_data;
1233 
1234 		memcpy(&tlv, data, sizeof(tlv));
1235 		tlv_len = le32toh(tlv.length);
1236 		tlv_type = le32toh(tlv.type);
1237 
1238 		len -= sizeof(tlv);
1239 		data += sizeof(tlv);
1240 		tlv_data = data;
1241 
1242 		if (len < tlv_len) {
1243 			printf("%s: firmware too short: %zu bytes\n",
1244 			    DEVNAME(sc), len);
1245 			err = EINVAL;
1246 			goto parse_out;
1247 		}
1248 
1249 		switch (tlv_type) {
1250 		case IWX_UCODE_TLV_PROBE_MAX_LEN:
1251 			if (tlv_len < sizeof(uint32_t)) {
1252 				err = EINVAL;
1253 				goto parse_out;
1254 			}
1255 			sc->sc_capa_max_probe_len
1256 			    = le32toh(*(uint32_t *)tlv_data);
1257 			if (sc->sc_capa_max_probe_len >
1258 			    IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
1259 				err = EINVAL;
1260 				goto parse_out;
1261 			}
1262 			break;
1263 		case IWX_UCODE_TLV_PAN:
1264 			if (tlv_len) {
1265 				err = EINVAL;
1266 				goto parse_out;
1267 			}
1268 			sc->sc_capaflags |= IWX_UCODE_TLV_FLAGS_PAN;
1269 			break;
1270 		case IWX_UCODE_TLV_FLAGS:
1271 			if (tlv_len < sizeof(uint32_t)) {
1272 				err = EINVAL;
1273 				goto parse_out;
1274 			}
1275 			/*
1276 			 * Apparently there can be many flags, but Linux driver
1277 			 * parses only the first one, and so do we.
1278 			 *
1279 			 * XXX: why does this override IWX_UCODE_TLV_PAN?
1280 			 * Intentional or a bug?  Observations from
1281 			 * current firmware file:
1282 			 *  1) TLV_PAN is parsed first
1283 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
1284 			 * ==> this resets TLV_PAN to itself... hnnnk
1285 			 */
1286 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
1287 			break;
1288 		case IWX_UCODE_TLV_CSCHEME:
1289 			err = iwx_store_cscheme(sc, tlv_data, tlv_len);
1290 			if (err)
1291 				goto parse_out;
1292 			break;
1293 		case IWX_UCODE_TLV_NUM_OF_CPU: {
1294 			uint32_t num_cpu;
1295 			if (tlv_len != sizeof(uint32_t)) {
1296 				err = EINVAL;
1297 				goto parse_out;
1298 			}
1299 			num_cpu = le32toh(*(uint32_t *)tlv_data);
1300 			if (num_cpu < 1 || num_cpu > 2) {
1301 				err = EINVAL;
1302 				goto parse_out;
1303 			}
1304 			break;
1305 		}
1306 		case IWX_UCODE_TLV_SEC_RT:
1307 			err = iwx_firmware_store_section(sc,
1308 			    IWX_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
1309 			if (err)
1310 				goto parse_out;
1311 			break;
1312 		case IWX_UCODE_TLV_SEC_INIT:
1313 			err = iwx_firmware_store_section(sc,
1314 			    IWX_UCODE_TYPE_INIT, tlv_data, tlv_len);
1315 			if (err)
1316 				goto parse_out;
1317 			break;
1318 		case IWX_UCODE_TLV_SEC_WOWLAN:
1319 			err = iwx_firmware_store_section(sc,
1320 			    IWX_UCODE_TYPE_WOW, tlv_data, tlv_len);
1321 			if (err)
1322 				goto parse_out;
1323 			break;
1324 		case IWX_UCODE_TLV_DEF_CALIB:
1325 			if (tlv_len != sizeof(struct iwx_tlv_calib_data)) {
1326 				err = EINVAL;
1327 				goto parse_out;
1328 			}
1329 			err = iwx_set_default_calib(sc, tlv_data);
1330 			if (err)
1331 				goto parse_out;
1332 			break;
1333 		case IWX_UCODE_TLV_PHY_SKU:
1334 			if (tlv_len != sizeof(uint32_t)) {
1335 				err = EINVAL;
1336 				goto parse_out;
1337 			}
1338 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
1339 			break;
1340 
1341 		case IWX_UCODE_TLV_API_CHANGES_SET: {
1342 			struct iwx_ucode_api *api;
1343 			int idx, i;
1344 			if (tlv_len != sizeof(*api)) {
1345 				err = EINVAL;
1346 				goto parse_out;
1347 			}
1348 			api = (struct iwx_ucode_api *)tlv_data;
1349 			idx = le32toh(api->api_index);
1350 			if (idx >= howmany(IWX_NUM_UCODE_TLV_API, 32)) {
1351 				err = EINVAL;
1352 				goto parse_out;
1353 			}
1354 			for (i = 0; i < 32; i++) {
1355 				if ((le32toh(api->api_flags) & (1 << i)) == 0)
1356 					continue;
1357 				setbit(sc->sc_ucode_api, i + (32 * idx));
1358 			}
1359 			break;
1360 		}
1361 
1362 		case IWX_UCODE_TLV_ENABLED_CAPABILITIES: {
1363 			struct iwx_ucode_capa *capa;
1364 			int idx, i;
1365 			if (tlv_len != sizeof(*capa)) {
1366 				err = EINVAL;
1367 				goto parse_out;
1368 			}
1369 			capa = (struct iwx_ucode_capa *)tlv_data;
1370 			idx = le32toh(capa->api_index);
1371 			if (idx >= howmany(IWX_NUM_UCODE_TLV_CAPA, 32)) {
1372 				goto parse_out;
1373 			}
1374 			for (i = 0; i < 32; i++) {
1375 				if ((le32toh(capa->api_capa) & (1 << i)) == 0)
1376 					continue;
1377 				setbit(sc->sc_enabled_capa, i + (32 * idx));
1378 			}
1379 			break;
1380 		}
1381 
1382 		case IWX_UCODE_TLV_SDIO_ADMA_ADDR:
1383 		case IWX_UCODE_TLV_FW_GSCAN_CAPA:
1384 			/* ignore, not used by current driver */
1385 			break;
1386 
1387 		case IWX_UCODE_TLV_SEC_RT_USNIFFER:
1388 			err = iwx_firmware_store_section(sc,
1389 			    IWX_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
1390 			    tlv_len);
1391 			if (err)
1392 				goto parse_out;
1393 			break;
1394 
1395 		case IWX_UCODE_TLV_PAGING:
1396 			if (tlv_len != sizeof(uint32_t)) {
1397 				err = EINVAL;
1398 				goto parse_out;
1399 			}
1400 			break;
1401 
1402 		case IWX_UCODE_TLV_N_SCAN_CHANNELS:
1403 			if (tlv_len != sizeof(uint32_t)) {
1404 				err = EINVAL;
1405 				goto parse_out;
1406 			}
1407 			sc->sc_capa_n_scan_channels =
1408 			  le32toh(*(uint32_t *)tlv_data);
1409 			if (sc->sc_capa_n_scan_channels > IWX_MAX_SCAN_CHANNELS) {
1410 				err = ERANGE;
1411 				goto parse_out;
1412 			}
1413 			break;
1414 
1415 		case IWX_UCODE_TLV_FW_VERSION:
1416 			if (tlv_len != sizeof(uint32_t) * 3) {
1417 				err = EINVAL;
1418 				goto parse_out;
1419 			}
1420 
1421 			iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1422 			    le32toh(((uint32_t *)tlv_data)[0]),
1423 			    le32toh(((uint32_t *)tlv_data)[1]),
1424 			    le32toh(((uint32_t *)tlv_data)[2]));
1425 			break;
1426 
1427 		case IWX_UCODE_TLV_FW_DBG_DEST: {
1428 			struct iwx_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
1429 
1430 			fw->dbg_dest_ver = (uint8_t *)tlv_data;
1431 			if (*fw->dbg_dest_ver != 0) {
1432 				err = EINVAL;
1433 				goto parse_out;
1434 			}
1435 
1436 			if (fw->dbg_dest_tlv_init)
1437 				break;
1438 			fw->dbg_dest_tlv_init = true;
1439 
1440 			dest_v1 = (void *)tlv_data;
1441 			fw->dbg_dest_tlv_v1 = dest_v1;
1442 			fw->n_dest_reg = tlv_len -
1443 			    offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops);
1444 			fw->n_dest_reg /= sizeof(dest_v1->reg_ops[0]);
1445 			DPRINTF(("%s: found debug dest; n_dest_reg=%d\n", __func__, fw->n_dest_reg));
1446 			break;
1447 		}
1448 
1449 		case IWX_UCODE_TLV_FW_DBG_CONF: {
1450 			struct iwx_fw_dbg_conf_tlv *conf = (void *)tlv_data;
1451 
1452 			if (!fw->dbg_dest_tlv_init ||
1453 			    conf->id >= nitems(fw->dbg_conf_tlv) ||
1454 			    fw->dbg_conf_tlv[conf->id] != NULL)
1455 				break;
1456 
1457 			DPRINTF(("Found debug configuration: %d\n", conf->id));
1458 			fw->dbg_conf_tlv[conf->id] = conf;
1459 			fw->dbg_conf_tlv_len[conf->id] = tlv_len;
1460 			break;
1461 		}
1462 
1463 		case IWX_UCODE_TLV_UMAC_DEBUG_ADDRS: {
1464 			struct iwx_umac_debug_addrs *dbg_ptrs =
1465 				(void *)tlv_data;
1466 
1467 			if (tlv_len != sizeof(*dbg_ptrs)) {
1468 				err = EINVAL;
1469 				goto parse_out;
1470 			}
1471 			if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1472 				break;
1473 			sc->sc_uc.uc_umac_error_event_table =
1474 				le32toh(dbg_ptrs->error_info_addr) &
1475 				~IWX_FW_ADDR_CACHE_CONTROL;
1476 			sc->sc_uc.error_event_table_tlv_status |=
1477 				IWX_ERROR_EVENT_TABLE_UMAC;
1478 			break;
1479 		}
1480 
1481 		case IWX_UCODE_TLV_LMAC_DEBUG_ADDRS: {
1482 			struct iwx_lmac_debug_addrs *dbg_ptrs =
1483 				(void *)tlv_data;
1484 
1485 			if (tlv_len != sizeof(*dbg_ptrs)) {
1486 				err = EINVAL;
1487 				goto parse_out;
1488 			}
1489 			if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1490 				break;
1491 			sc->sc_uc.uc_lmac_error_event_table[0] =
1492 				le32toh(dbg_ptrs->error_event_table_ptr) &
1493 				~IWX_FW_ADDR_CACHE_CONTROL;
1494 			sc->sc_uc.error_event_table_tlv_status |=
1495 				IWX_ERROR_EVENT_TABLE_LMAC1;
1496 			break;
1497 		}
1498 
1499 		case IWX_UCODE_TLV_FW_MEM_SEG:
1500 			break;
1501 
1502 		case IWX_UCODE_TLV_IML:
1503 			if (sc->sc_fw.iml != NULL) {
1504 				free(fw->iml, M_DEVBUF, fw->iml_len);
1505 				fw->iml_len = 0;
1506 			}
1507 			sc->sc_fw.iml = malloc(tlv_len, M_DEVBUF,
1508 			    M_WAIT | M_CANFAIL | M_ZERO);
1509 			if (sc->sc_fw.iml == NULL) {
1510 				err = ENOMEM;
1511 				goto parse_out;
1512 			}
1513 			memcpy(sc->sc_fw.iml, tlv_data, tlv_len);
1514 			sc->sc_fw.iml_len = tlv_len;
1515 			break;
1516 
1517 		case IWX_UCODE_TLV_CMD_VERSIONS:
1518 			if (tlv_len % sizeof(struct iwx_fw_cmd_version)) {
1519 				tlv_len /= sizeof(struct iwx_fw_cmd_version);
1520 				tlv_len *= sizeof(struct iwx_fw_cmd_version);
1521 			}
1522 			if (sc->n_cmd_versions != 0) {
1523 				err = EINVAL;
1524 				goto parse_out;
1525 			}
1526 			if (tlv_len > sizeof(sc->cmd_versions)) {
1527 				err = EINVAL;
1528 				goto parse_out;
1529 			}
1530 			memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
1531 			sc->n_cmd_versions = tlv_len / sizeof(struct iwx_fw_cmd_version);
1532 			break;
1533 
1534 		case IWX_UCODE_TLV_FW_RECOVERY_INFO:
1535 			break;
1536 
1537 		case IWX_UCODE_TLV_FW_FSEQ_VERSION:
1538 		case IWX_UCODE_TLV_PHY_INTEGRATION_VERSION:
1539 		case IWX_UCODE_TLV_FW_NUM_STATIONS:
1540 			break;
1541 
1542 		/* undocumented TLVs found in iwx-cc-a0-46 image */
1543 		case 58:
1544 		case 0x1000003:
1545 		case 0x1000004:
1546 			break;
1547 
1548 		/* undocumented TLVs found in iwx-cc-a0-48 image */
1549 		case 0x1000000:
1550 		case 0x1000002:
1551 			break;
1552 
1553 		case IWX_UCODE_TLV_TYPE_DEBUG_INFO:
1554 		case IWX_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
1555 		case IWX_UCODE_TLV_TYPE_HCMD:
1556 		case IWX_UCODE_TLV_TYPE_REGIONS:
1557 		case IWX_UCODE_TLV_TYPE_TRIGGERS:
1558 		case IWX_UCODE_TLV_TYPE_CONF_SET:
1559 			break;
1560 
1561 		/* undocumented TLV found in iwx-cc-a0-67 image */
1562 		case 0x100000b:
1563 			break;
1564 
1565 		default:
1566 			err = EINVAL;
1567 			goto parse_out;
1568 		}
1569 
1570 		/*
1571 		 * Check for size_t overflow and ignore missing padding at
1572 		 * end of firmware file.
1573 		 */
1574 		if (roundup(tlv_len, 4) > len)
1575 			break;
1576 
1577 		len -= roundup(tlv_len, 4);
1578 		data += roundup(tlv_len, 4);
1579 	}
1580 
1581 	KASSERT(err == 0);
1582 
1583  parse_out:
1584 	if (err) {
1585 		printf("%s: firmware parse error %d, "
1586 		    "section type %d\n", DEVNAME(sc), err, tlv_type);
1587 	}
1588 
1589  out:
1590 	if (err) {
1591 		fw->fw_status = IWX_FW_STATUS_NONE;
1592 		if (fw->fw_rawdata != NULL)
1593 			iwx_fw_info_free(fw);
1594 	} else
1595 		fw->fw_status = IWX_FW_STATUS_DONE;
1596 	wakeup(&sc->sc_fw);
1597 
1598 	return err;
1599 }
1600 
1601 uint32_t
1602 iwx_prph_addr_mask(struct iwx_softc *sc)
1603 {
1604 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1605 		return 0x00ffffff;
1606 	else
1607 		return 0x000fffff;
1608 }
1609 
1610 uint32_t
1611 iwx_read_prph_unlocked(struct iwx_softc *sc, uint32_t addr)
1612 {
1613 	uint32_t mask = iwx_prph_addr_mask(sc);
1614 	IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_RADDR, ((addr & mask) | (3 << 24)));
1615 	IWX_BARRIER_READ_WRITE(sc);
1616 	return IWX_READ(sc, IWX_HBUS_TARG_PRPH_RDAT);
1617 }
1618 
1619 uint32_t
1620 iwx_read_prph(struct iwx_softc *sc, uint32_t addr)
1621 {
1622 	iwx_nic_assert_locked(sc);
1623 	return iwx_read_prph_unlocked(sc, addr);
1624 }
1625 
1626 void
1627 iwx_write_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1628 {
1629 	uint32_t mask = iwx_prph_addr_mask(sc);
1630 	IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WADDR, ((addr & mask) | (3 << 24)));
1631 	IWX_BARRIER_WRITE(sc);
1632 	IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WDAT, val);
1633 }
1634 
1635 void
1636 iwx_write_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1637 {
1638 	iwx_nic_assert_locked(sc);
1639 	iwx_write_prph_unlocked(sc, addr, val);
1640 }
1641 
1642 void
1643 iwx_write_prph64(struct iwx_softc *sc, uint64_t addr, uint64_t val)
1644 {
1645 	iwx_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
1646 	iwx_write_prph(sc, (uint32_t)addr + 4, val >> 32);
1647 }
1648 
1649 uint32_t
1650 iwx_read_umac_prph_unlocked(struct iwx_softc *sc, uint32_t addr)
1651 {
1652 	return iwx_read_prph_unlocked(sc, addr + sc->sc_umac_prph_offset);
1653 }
1654 
1655 uint32_t
1656 iwx_read_umac_prph(struct iwx_softc *sc, uint32_t addr)
1657 {
1658 	return iwx_read_prph(sc, addr + sc->sc_umac_prph_offset);
1659 }
1660 
1661 void
1662 iwx_write_umac_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1663 {
1664 	iwx_write_prph_unlocked(sc, addr + sc->sc_umac_prph_offset, val);
1665 }
1666 
1667 void
1668 iwx_write_umac_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1669 {
1670 	iwx_write_prph(sc, addr + sc->sc_umac_prph_offset, val);
1671 }
1672 
1673 int
1674 iwx_read_mem(struct iwx_softc *sc, uint32_t addr, void *buf, int dwords)
1675 {
1676 	int offs, err = 0;
1677 	uint32_t *vals = buf;
1678 
1679 	if (iwx_nic_lock(sc)) {
1680 		IWX_WRITE(sc, IWX_HBUS_TARG_MEM_RADDR, addr);
1681 		for (offs = 0; offs < dwords; offs++)
1682 			vals[offs] = le32toh(IWX_READ(sc, IWX_HBUS_TARG_MEM_RDAT));
1683 		iwx_nic_unlock(sc);
1684 	} else {
1685 		err = EBUSY;
1686 	}
1687 	return err;
1688 }
1689 
1690 int
1691 iwx_write_mem(struct iwx_softc *sc, uint32_t addr, const void *buf, int dwords)
1692 {
1693 	int offs;
1694 	const uint32_t *vals = buf;
1695 
1696 	if (iwx_nic_lock(sc)) {
1697 		IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WADDR, addr);
1698 		/* WADDR auto-increments */
1699 		for (offs = 0; offs < dwords; offs++) {
1700 			uint32_t val = vals ? vals[offs] : 0;
1701 			IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WDAT, val);
1702 		}
1703 		iwx_nic_unlock(sc);
1704 	} else {
1705 		return EBUSY;
1706 	}
1707 	return 0;
1708 }
1709 
1710 int
1711 iwx_write_mem32(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1712 {
1713 	return iwx_write_mem(sc, addr, &val, 1);
1714 }
1715 
1716 int
1717 iwx_poll_bit(struct iwx_softc *sc, int reg, uint32_t bits, uint32_t mask,
1718     int timo)
1719 {
1720 	for (;;) {
1721 		if ((IWX_READ(sc, reg) & mask) == (bits & mask)) {
1722 			return 1;
1723 		}
1724 		if (timo < 10) {
1725 			return 0;
1726 		}
1727 		timo -= 10;
1728 		DELAY(10);
1729 	}
1730 }
1731 
1732 int
1733 iwx_nic_lock(struct iwx_softc *sc)
1734 {
1735 	if (sc->sc_nic_locks > 0) {
1736 		iwx_nic_assert_locked(sc);
1737 		sc->sc_nic_locks++;
1738 		return 1; /* already locked */
1739 	}
1740 
1741 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
1742 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1743 
1744 	DELAY(2);
1745 
1746 	if (iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
1747 	    IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1748 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1749 	     | IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1750 		sc->sc_nic_locks++;
1751 		return 1;
1752 	}
1753 
1754 	printf("%s: acquiring device failed\n", DEVNAME(sc));
1755 	return 0;
1756 }
1757 
1758 void
1759 iwx_nic_assert_locked(struct iwx_softc *sc)
1760 {
1761 	if (sc->sc_nic_locks <= 0)
1762 		panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1763 }
1764 
1765 void
1766 iwx_nic_unlock(struct iwx_softc *sc)
1767 {
1768 	if (sc->sc_nic_locks > 0) {
1769 		if (--sc->sc_nic_locks == 0)
1770 			IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
1771 			    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1772 	} else
1773 		printf("%s: NIC already unlocked\n", DEVNAME(sc));
1774 }
1775 
1776 int
1777 iwx_set_bits_mask_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits,
1778     uint32_t mask)
1779 {
1780 	uint32_t val;
1781 
1782 	if (iwx_nic_lock(sc)) {
1783 		val = iwx_read_prph(sc, reg) & mask;
1784 		val |= bits;
1785 		iwx_write_prph(sc, reg, val);
1786 		iwx_nic_unlock(sc);
1787 		return 0;
1788 	}
1789 	return EBUSY;
1790 }
1791 
1792 int
1793 iwx_set_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1794 {
1795 	return iwx_set_bits_mask_prph(sc, reg, bits, ~0);
1796 }
1797 
1798 int
1799 iwx_clear_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1800 {
1801 	return iwx_set_bits_mask_prph(sc, reg, 0, ~bits);
1802 }
1803 
1804 int
1805 iwx_dma_contig_alloc(bus_dma_tag_t tag, struct iwx_dma_info *dma,
1806     bus_size_t size, bus_size_t alignment)
1807 {
1808 	int nsegs, err;
1809 	caddr_t va;
1810 
1811 	dma->tag = tag;
1812 	dma->size = size;
1813 
1814 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1815 	    &dma->map);
1816 	if (err)
1817 		goto fail;
1818 
1819 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1820 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1821 	if (err)
1822 		goto fail;
1823 
1824 	if (nsegs > 1) {
1825 		err = ENOMEM;
1826 		goto fail;
1827 	}
1828 
1829 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
1830 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1831 	if (err)
1832 		goto fail;
1833 	dma->vaddr = va;
1834 
1835 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1836 	    BUS_DMA_NOWAIT);
1837 	if (err)
1838 		goto fail;
1839 
1840 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1841 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1842 
1843 	return 0;
1844 
1845 fail:	iwx_dma_contig_free(dma);
1846 	return err;
1847 }
1848 
1849 void
1850 iwx_dma_contig_free(struct iwx_dma_info *dma)
1851 {
1852 	if (dma->map != NULL) {
1853 		if (dma->vaddr != NULL) {
1854 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1855 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1856 			bus_dmamap_unload(dma->tag, dma->map);
1857 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1858 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1859 			dma->vaddr = NULL;
1860 		}
1861 		bus_dmamap_destroy(dma->tag, dma->map);
1862 		dma->map = NULL;
1863 	}
1864 }
1865 
1866 int
1867 iwx_alloc_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1868 {
1869 	bus_size_t size;
1870 	int i, err;
1871 
1872 	ring->cur = 0;
1873 
1874 	/* Allocate RX descriptors (256-byte aligned). */
1875 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1876 		size = sizeof(struct iwx_rx_transfer_desc);
1877 	else
1878 		size = sizeof(uint64_t);
1879 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma,
1880 	    size * IWX_RX_MQ_RING_COUNT, 256);
1881 	if (err) {
1882 		printf("%s: could not allocate RX ring DMA memory\n",
1883 		    DEVNAME(sc));
1884 		goto fail;
1885 	}
1886 	ring->desc = ring->free_desc_dma.vaddr;
1887 
1888 	/* Allocate RX status area (16-byte aligned). */
1889 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1890 		size = sizeof(uint16_t);
1891 	else
1892 		size = sizeof(*ring->stat);
1893 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, size, 16);
1894 	if (err) {
1895 		printf("%s: could not allocate RX status DMA memory\n",
1896 		    DEVNAME(sc));
1897 		goto fail;
1898 	}
1899 	ring->stat = ring->stat_dma.vaddr;
1900 
1901 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1902 		size = sizeof(struct iwx_rx_completion_desc);
1903 	else
1904 		size = sizeof(uint32_t);
1905 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1906 	    size * IWX_RX_MQ_RING_COUNT, 256);
1907 	if (err) {
1908 		printf("%s: could not allocate RX ring DMA memory\n",
1909 		    DEVNAME(sc));
1910 		goto fail;
1911 	}
1912 
1913 	for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
1914 		struct iwx_rx_data *data = &ring->data[i];
1915 
1916 		memset(data, 0, sizeof(*data));
1917 		err = bus_dmamap_create(sc->sc_dmat, IWX_RBUF_SIZE, 1,
1918 		    IWX_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1919 		    &data->map);
1920 		if (err) {
1921 			printf("%s: could not create RX buf DMA map\n",
1922 			    DEVNAME(sc));
1923 			goto fail;
1924 		}
1925 
1926 		err = iwx_rx_addbuf(sc, IWX_RBUF_SIZE, i);
1927 		if (err)
1928 			goto fail;
1929 	}
1930 	return 0;
1931 
1932 fail:	iwx_free_rx_ring(sc, ring);
1933 	return err;
1934 }
1935 
1936 void
1937 iwx_disable_rx_dma(struct iwx_softc *sc)
1938 {
1939 	int ntries;
1940 
1941 	if (iwx_nic_lock(sc)) {
1942 		if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1943 			iwx_write_umac_prph(sc, IWX_RFH_RXF_DMA_CFG_GEN3, 0);
1944 		else
1945 			iwx_write_prph(sc, IWX_RFH_RXF_DMA_CFG, 0);
1946 		for (ntries = 0; ntries < 1000; ntries++) {
1947 			if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
1948 				if (iwx_read_umac_prph(sc,
1949 				    IWX_RFH_GEN_STATUS_GEN3) & IWX_RXF_DMA_IDLE)
1950 					break;
1951 			} else {
1952 				if (iwx_read_prph(sc, IWX_RFH_GEN_STATUS) &
1953 				    IWX_RXF_DMA_IDLE)
1954 					break;
1955 			}
1956 			DELAY(10);
1957 		}
1958 		iwx_nic_unlock(sc);
1959 	}
1960 }
1961 
1962 void
1963 iwx_reset_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1964 {
1965 	ring->cur = 0;
1966 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1967 	    ring->stat_dma.size, BUS_DMASYNC_PREWRITE);
1968 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
1969 		uint16_t *status = sc->rxq.stat_dma.vaddr;
1970 		*status = 0;
1971 	} else
1972 		memset(ring->stat, 0, sizeof(*ring->stat));
1973 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1974 	    ring->stat_dma.size, BUS_DMASYNC_POSTWRITE);
1975 
1976 }
1977 
1978 void
1979 iwx_free_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1980 {
1981 	int i;
1982 
1983 	iwx_dma_contig_free(&ring->free_desc_dma);
1984 	iwx_dma_contig_free(&ring->stat_dma);
1985 	iwx_dma_contig_free(&ring->used_desc_dma);
1986 
1987 	for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
1988 		struct iwx_rx_data *data = &ring->data[i];
1989 
1990 		if (data->m != NULL) {
1991 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1992 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1993 			bus_dmamap_unload(sc->sc_dmat, data->map);
1994 			m_freem(data->m);
1995 			data->m = NULL;
1996 		}
1997 		if (data->map != NULL)
1998 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1999 	}
2000 }
2001 
2002 int
2003 iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid)
2004 {
2005 	bus_addr_t paddr;
2006 	bus_size_t size;
2007 	int i, err;
2008 	size_t bc_tbl_size;
2009 	bus_size_t bc_align;
2010 
2011 	ring->qid = qid;
2012 	ring->queued = 0;
2013 	ring->cur = 0;
2014 	ring->cur_hw = 0;
2015 	ring->tail = 0;
2016 	ring->tail_hw = 0;
2017 
2018 	/* Allocate TX descriptors (256-byte aligned). */
2019 	size = IWX_TX_RING_COUNT * sizeof(struct iwx_tfh_tfd);
2020 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
2021 	if (err) {
2022 		printf("%s: could not allocate TX ring DMA memory\n",
2023 		    DEVNAME(sc));
2024 		goto fail;
2025 	}
2026 	ring->desc = ring->desc_dma.vaddr;
2027 
2028 	/*
2029 	 * The hardware supports up to 512 Tx rings which is more
2030 	 * than we currently need.
2031 	 *
2032 	 * In DQA mode we use 1 command queue + 1 default queue for
2033 	 * management, control, and non-QoS data frames.
2034 	 * The command is queue sc->txq[0], our default queue is sc->txq[1].
2035 	 *
2036 	 * Tx aggregation requires additional queues, one queue per TID for
2037 	 * which aggregation is enabled. We map TID 0-7 to sc->txq[2:9].
2038 	 * Firmware may assign its own internal IDs for these queues
2039 	 * depending on which TID gets aggregation enabled first.
2040 	 * The driver maintains a table mapping driver-side queue IDs
2041 	 * to firmware-side queue IDs.
2042 	 */
2043 
2044 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2045 		bc_tbl_size = sizeof(struct iwx_gen3_bc_tbl_entry) *
2046 		    IWX_TFD_QUEUE_BC_SIZE_GEN3_AX210;
2047 		bc_align = 128;
2048 	} else {
2049 		bc_tbl_size = sizeof(struct iwx_agn_scd_bc_tbl);
2050 		bc_align = 64;
2051 	}
2052 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl, bc_tbl_size,
2053 	    bc_align);
2054 	if (err) {
2055 		printf("%s: could not allocate byte count table DMA memory\n",
2056 		    DEVNAME(sc));
2057 		goto fail;
2058 	}
2059 
2060 	size = IWX_TX_RING_COUNT * sizeof(struct iwx_device_cmd);
2061 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size,
2062 	    IWX_FIRST_TB_SIZE_ALIGN);
2063 	if (err) {
2064 		printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc));
2065 		goto fail;
2066 	}
2067 	ring->cmd = ring->cmd_dma.vaddr;
2068 
2069 	paddr = ring->cmd_dma.paddr;
2070 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2071 		struct iwx_tx_data *data = &ring->data[i];
2072 		size_t mapsize;
2073 
2074 		data->cmd_paddr = paddr;
2075 		paddr += sizeof(struct iwx_device_cmd);
2076 
2077 		/* FW commands may require more mapped space than packets. */
2078 		if (qid == IWX_DQA_CMD_QUEUE)
2079 			mapsize = (sizeof(struct iwx_cmd_header) +
2080 			    IWX_MAX_CMD_PAYLOAD_SIZE);
2081 		else
2082 			mapsize = MCLBYTES;
2083 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
2084 		    IWX_TFH_NUM_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,
2085 		    &data->map);
2086 		if (err) {
2087 			printf("%s: could not create TX buf DMA map\n",
2088 			    DEVNAME(sc));
2089 			goto fail;
2090 		}
2091 	}
2092 	KASSERT(paddr == ring->cmd_dma.paddr + size);
2093 	return 0;
2094 
2095 fail:	iwx_free_tx_ring(sc, ring);
2096 	return err;
2097 }
2098 
2099 void
2100 iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
2101 {
2102 	int i;
2103 
2104 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2105 		struct iwx_tx_data *data = &ring->data[i];
2106 
2107 		if (data->m != NULL) {
2108 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
2109 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2110 			bus_dmamap_unload(sc->sc_dmat, data->map);
2111 			m_freem(data->m);
2112 			data->m = NULL;
2113 		}
2114 	}
2115 
2116 	/* Clear byte count table. */
2117 	memset(ring->bc_tbl.vaddr, 0, ring->bc_tbl.size);
2118 
2119 	/* Clear TX descriptors. */
2120 	memset(ring->desc, 0, ring->desc_dma.size);
2121 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
2122 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
2123 	sc->qfullmsk &= ~(1 << ring->qid);
2124 	sc->qenablemsk &= ~(1 << ring->qid);
2125 	for (i = 0; i < nitems(sc->aggqid); i++) {
2126 		if (sc->aggqid[i] == ring->qid) {
2127 			sc->aggqid[i] = 0;
2128 			break;
2129 		}
2130 	}
2131 	ring->queued = 0;
2132 	ring->cur = 0;
2133 	ring->cur_hw = 0;
2134 	ring->tail = 0;
2135 	ring->tail_hw = 0;
2136 	ring->tid = 0;
2137 }
2138 
2139 void
2140 iwx_free_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
2141 {
2142 	int i;
2143 
2144 	iwx_dma_contig_free(&ring->desc_dma);
2145 	iwx_dma_contig_free(&ring->cmd_dma);
2146 	iwx_dma_contig_free(&ring->bc_tbl);
2147 
2148 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2149 		struct iwx_tx_data *data = &ring->data[i];
2150 
2151 		if (data->m != NULL) {
2152 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
2153 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2154 			bus_dmamap_unload(sc->sc_dmat, data->map);
2155 			m_freem(data->m);
2156 			data->m = NULL;
2157 		}
2158 		if (data->map != NULL)
2159 			bus_dmamap_destroy(sc->sc_dmat, data->map);
2160 	}
2161 }
2162 
2163 void
2164 iwx_enable_rfkill_int(struct iwx_softc *sc)
2165 {
2166 	if (!sc->sc_msix) {
2167 		sc->sc_intmask = IWX_CSR_INT_BIT_RF_KILL;
2168 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2169 	} else {
2170 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2171 		    sc->sc_fh_init_mask);
2172 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2173 		    ~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL);
2174 		sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL;
2175 	}
2176 
2177 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2178 	    IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
2179 }
2180 
2181 int
2182 iwx_check_rfkill(struct iwx_softc *sc)
2183 {
2184 	uint32_t v;
2185 	int rv;
2186 
2187 	/*
2188 	 * "documentation" is not really helpful here:
2189 	 *  27:	HW_RF_KILL_SW
2190 	 *	Indicates state of (platform's) hardware RF-Kill switch
2191 	 *
2192 	 * But apparently when it's off, it's on ...
2193 	 */
2194 	v = IWX_READ(sc, IWX_CSR_GP_CNTRL);
2195 	rv = (v & IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
2196 	if (rv) {
2197 		sc->sc_flags |= IWX_FLAG_RFKILL;
2198 	} else {
2199 		sc->sc_flags &= ~IWX_FLAG_RFKILL;
2200 	}
2201 
2202 	return rv;
2203 }
2204 
2205 void
2206 iwx_enable_interrupts(struct iwx_softc *sc)
2207 {
2208 	if (!sc->sc_msix) {
2209 		sc->sc_intmask = IWX_CSR_INI_SET_MASK;
2210 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2211 	} else {
2212 		/*
2213 		 * fh/hw_mask keeps all the unmasked causes.
2214 		 * Unlike msi, in msix cause is enabled when it is unset.
2215 		 */
2216 		sc->sc_hw_mask = sc->sc_hw_init_mask;
2217 		sc->sc_fh_mask = sc->sc_fh_init_mask;
2218 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2219 		    ~sc->sc_fh_mask);
2220 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2221 		    ~sc->sc_hw_mask);
2222 	}
2223 }
2224 
2225 void
2226 iwx_enable_fwload_interrupt(struct iwx_softc *sc)
2227 {
2228 	if (!sc->sc_msix) {
2229 		sc->sc_intmask = IWX_CSR_INT_BIT_ALIVE | IWX_CSR_INT_BIT_FH_RX;
2230 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2231 	} else {
2232 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2233 		    ~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE);
2234 		sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_ALIVE;
2235 		/*
2236 		 * Leave all the FH causes enabled to get the ALIVE
2237 		 * notification.
2238 		 */
2239 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2240 		    ~sc->sc_fh_init_mask);
2241 		sc->sc_fh_mask = sc->sc_fh_init_mask;
2242 	}
2243 }
2244 
2245 void
2246 iwx_restore_interrupts(struct iwx_softc *sc)
2247 {
2248 	IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2249 }
2250 
2251 void
2252 iwx_disable_interrupts(struct iwx_softc *sc)
2253 {
2254 	if (!sc->sc_msix) {
2255 		IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
2256 
2257 		/* acknowledge all interrupts */
2258 		IWX_WRITE(sc, IWX_CSR_INT, ~0);
2259 		IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
2260 	} else {
2261 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2262 		    sc->sc_fh_init_mask);
2263 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2264 		    sc->sc_hw_init_mask);
2265 	}
2266 }
2267 
2268 void
2269 iwx_ict_reset(struct iwx_softc *sc)
2270 {
2271 	iwx_disable_interrupts(sc);
2272 
2273 	memset(sc->ict_dma.vaddr, 0, IWX_ICT_SIZE);
2274 	sc->ict_cur = 0;
2275 
2276 	/* Set physical address of ICT (4KB aligned). */
2277 	IWX_WRITE(sc, IWX_CSR_DRAM_INT_TBL_REG,
2278 	    IWX_CSR_DRAM_INT_TBL_ENABLE
2279 	    | IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK
2280 	    | IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER
2281 	    | sc->ict_dma.paddr >> IWX_ICT_PADDR_SHIFT);
2282 
2283 	/* Switch to ICT interrupt mode in driver. */
2284 	sc->sc_flags |= IWX_FLAG_USE_ICT;
2285 
2286 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
2287 	iwx_enable_interrupts(sc);
2288 }
2289 
2290 #define IWX_HW_READY_TIMEOUT 50
2291 int
2292 iwx_set_hw_ready(struct iwx_softc *sc)
2293 {
2294 	int ready;
2295 
2296 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2297 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
2298 
2299 	ready = iwx_poll_bit(sc, IWX_CSR_HW_IF_CONFIG_REG,
2300 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2301 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2302 	    IWX_HW_READY_TIMEOUT);
2303 	if (ready)
2304 		IWX_SETBITS(sc, IWX_CSR_MBOX_SET_REG,
2305 		    IWX_CSR_MBOX_SET_REG_OS_ALIVE);
2306 
2307 	return ready;
2308 }
2309 #undef IWX_HW_READY_TIMEOUT
2310 
2311 int
2312 iwx_prepare_card_hw(struct iwx_softc *sc)
2313 {
2314 	int t = 0;
2315 	int ntries;
2316 
2317 	if (iwx_set_hw_ready(sc))
2318 		return 0;
2319 
2320 	IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2321 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2322 	DELAY(1000);
2323 
2324 	for (ntries = 0; ntries < 10; ntries++) {
2325 		/* If HW is not ready, prepare the conditions to check again */
2326 		IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2327 		    IWX_CSR_HW_IF_CONFIG_REG_PREPARE);
2328 
2329 		do {
2330 			if (iwx_set_hw_ready(sc))
2331 				return 0;
2332 			DELAY(200);
2333 			t += 200;
2334 		} while (t < 150000);
2335 		DELAY(25000);
2336 	}
2337 
2338 	return ETIMEDOUT;
2339 }
2340 
2341 int
2342 iwx_force_power_gating(struct iwx_softc *sc)
2343 {
2344 	int err;
2345 
2346 	err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2347 	    IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2348 	if (err)
2349 		return err;
2350 	DELAY(20);
2351 	err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2352 	    IWX_HPM_HIPM_GEN_CFG_CR_PG_EN |
2353 	    IWX_HPM_HIPM_GEN_CFG_CR_SLP_EN);
2354 	if (err)
2355 		return err;
2356 	DELAY(20);
2357 	err = iwx_clear_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2358 	    IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2359 	return err;
2360 }
2361 
2362 void
2363 iwx_apm_config(struct iwx_softc *sc)
2364 {
2365 	pcireg_t lctl, cap;
2366 
2367 	/*
2368 	 * L0S states have been found to be unstable with our devices
2369 	 * and in newer hardware they are not officially supported at
2370 	 * all, so we must always set the L0S_DISABLED bit.
2371 	 */
2372 	IWX_SETBITS(sc, IWX_CSR_GIO_REG, IWX_CSR_GIO_REG_VAL_L0S_DISABLED);
2373 
2374 	lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
2375 	    sc->sc_cap_off + PCI_PCIE_LCSR);
2376 	sc->sc_pm_support = !(lctl & PCI_PCIE_LCSR_ASPM_L0S);
2377 	cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
2378 	    sc->sc_cap_off + PCI_PCIE_DCSR2);
2379 	sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
2380 	DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
2381 	    DEVNAME(sc),
2382 	    (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
2383 	    sc->sc_ltr_enabled ? "En" : "Dis"));
2384 }
2385 
2386 /*
2387  * Start up NIC's basic functionality after it has been reset
2388  * e.g. after platform boot or shutdown.
2389  * NOTE:  This does not load uCode nor start the embedded processor
2390  */
2391 int
2392 iwx_apm_init(struct iwx_softc *sc)
2393 {
2394 	int err = 0;
2395 
2396 	/*
2397 	 * Disable L0s without affecting L1;
2398 	 *  don't wait for ICH L0s (ICH bug W/A)
2399 	 */
2400 	IWX_SETBITS(sc, IWX_CSR_GIO_CHICKEN_BITS,
2401 	    IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
2402 
2403 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
2404 	IWX_SETBITS(sc, IWX_CSR_DBG_HPET_MEM_REG, IWX_CSR_DBG_HPET_MEM_REG_VAL);
2405 
2406 	/*
2407 	 * Enable HAP INTA (interrupt from management bus) to
2408 	 * wake device's PCI Express link L1a -> L0s
2409 	 */
2410 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2411 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
2412 
2413 	iwx_apm_config(sc);
2414 
2415 	/*
2416 	 * Set "initialization complete" bit to move adapter from
2417 	 * D0U* --> D0A* (powered-up active) state.
2418 	 */
2419 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2420 
2421 	/*
2422 	 * Wait for clock stabilization; once stabilized, access to
2423 	 * device-internal resources is supported, e.g. iwx_write_prph()
2424 	 * and accesses to uCode SRAM.
2425 	 */
2426 	if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2427 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2428 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2429 		printf("%s: timeout waiting for clock stabilization\n",
2430 		    DEVNAME(sc));
2431 		err = ETIMEDOUT;
2432 		goto out;
2433 	}
2434  out:
2435 	if (err)
2436 		printf("%s: apm init error %d\n", DEVNAME(sc), err);
2437 	return err;
2438 }
2439 
2440 void
2441 iwx_apm_stop(struct iwx_softc *sc)
2442 {
2443 	IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2444 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2445 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2446 	    IWX_CSR_HW_IF_CONFIG_REG_PREPARE |
2447 	    IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
2448 	DELAY(1000);
2449 	IWX_CLRBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2450 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2451 	DELAY(5000);
2452 
2453 	/* stop device's busmaster DMA activity */
2454 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_STOP_MASTER);
2455 
2456 	if (!iwx_poll_bit(sc, IWX_CSR_RESET,
2457 	    IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED,
2458 	    IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
2459 		printf("%s: timeout waiting for master\n", DEVNAME(sc));
2460 
2461 	/*
2462 	 * Clear "initialization complete" bit to move adapter from
2463 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
2464 	 */
2465 	IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2466 	    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2467 }
2468 
2469 void
2470 iwx_init_msix_hw(struct iwx_softc *sc)
2471 {
2472 	iwx_conf_msix_hw(sc, 0);
2473 
2474 	if (!sc->sc_msix)
2475 		return;
2476 
2477 	sc->sc_fh_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_FH_INT_MASK_AD);
2478 	sc->sc_fh_mask = sc->sc_fh_init_mask;
2479 	sc->sc_hw_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_HW_INT_MASK_AD);
2480 	sc->sc_hw_mask = sc->sc_hw_init_mask;
2481 }
2482 
2483 void
2484 iwx_conf_msix_hw(struct iwx_softc *sc, int stopped)
2485 {
2486 	int vector = 0;
2487 
2488 	if (!sc->sc_msix) {
2489 		/* Newer chips default to MSIX. */
2490 		if (!stopped && iwx_nic_lock(sc)) {
2491 			iwx_write_umac_prph(sc, IWX_UREG_CHICK,
2492 			    IWX_UREG_CHICK_MSI_ENABLE);
2493 			iwx_nic_unlock(sc);
2494 		}
2495 		return;
2496 	}
2497 
2498 	if (!stopped && iwx_nic_lock(sc)) {
2499 		iwx_write_umac_prph(sc, IWX_UREG_CHICK,
2500 		    IWX_UREG_CHICK_MSIX_ENABLE);
2501 		iwx_nic_unlock(sc);
2502 	}
2503 
2504 	/* Disable all interrupts */
2505 	IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, ~0);
2506 	IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, ~0);
2507 
2508 	/* Map fallback-queue (command/mgmt) to a single vector */
2509 	IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(0),
2510 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2511 	/* Map RSS queue (data) to the same vector */
2512 	IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(1),
2513 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2514 
2515 	/* Enable the RX queues cause interrupts */
2516 	IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2517 	    IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1);
2518 
2519 	/* Map non-RX causes to the same vector */
2520 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
2521 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2522 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
2523 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2524 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_S2D),
2525 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2526 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_FH_ERR),
2527 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2528 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_ALIVE),
2529 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2530 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_WAKEUP),
2531 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2532 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RESET_DONE),
2533 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2534 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_CT_KILL),
2535 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2536 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RF_KILL),
2537 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2538 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_PERIODIC),
2539 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2540 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SW_ERR),
2541 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2542 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SCD),
2543 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2544 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_FH_TX),
2545 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2546 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HW_ERR),
2547 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2548 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HAP),
2549 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2550 
2551 	/* Enable non-RX causes interrupts */
2552 	IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2553 	    IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
2554 	    IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
2555 	    IWX_MSIX_FH_INT_CAUSES_S2D |
2556 	    IWX_MSIX_FH_INT_CAUSES_FH_ERR);
2557 	IWX_CLRBITS(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2558 	    IWX_MSIX_HW_INT_CAUSES_REG_ALIVE |
2559 	    IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP |
2560 	    IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE |
2561 	    IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL |
2562 	    IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL |
2563 	    IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC |
2564 	    IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR |
2565 	    IWX_MSIX_HW_INT_CAUSES_REG_SCD |
2566 	    IWX_MSIX_HW_INT_CAUSES_REG_FH_TX |
2567 	    IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR |
2568 	    IWX_MSIX_HW_INT_CAUSES_REG_HAP);
2569 }
2570 
2571 int
2572 iwx_clear_persistence_bit(struct iwx_softc *sc)
2573 {
2574 	uint32_t hpm, wprot;
2575 
2576 	hpm = iwx_read_prph_unlocked(sc, IWX_HPM_DEBUG);
2577 	if (hpm != 0xa5a5a5a0 && (hpm & IWX_PERSISTENCE_BIT)) {
2578 		wprot = iwx_read_prph_unlocked(sc, IWX_PREG_PRPH_WPROT_22000);
2579 		if (wprot & IWX_PREG_WFPM_ACCESS) {
2580 			printf("%s: cannot clear persistence bit\n",
2581 			    DEVNAME(sc));
2582 			return EPERM;
2583 		}
2584 		iwx_write_prph_unlocked(sc, IWX_HPM_DEBUG,
2585 		    hpm & ~IWX_PERSISTENCE_BIT);
2586 	}
2587 
2588 	return 0;
2589 }
2590 
2591 int
2592 iwx_start_hw(struct iwx_softc *sc)
2593 {
2594 	int err;
2595 
2596 	err = iwx_prepare_card_hw(sc);
2597 	if (err)
2598 		return err;
2599 
2600 	if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
2601 		err = iwx_clear_persistence_bit(sc);
2602 		if (err)
2603 			return err;
2604 	}
2605 
2606 	/* Reset the entire device */
2607 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2608 	DELAY(5000);
2609 
2610 	if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000 &&
2611 	    sc->sc_integrated) {
2612 		IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2613 		    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2614 		DELAY(20);
2615 		if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2616 		    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2617 		    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2618 			printf("%s: timeout waiting for clock stabilization\n",
2619 			    DEVNAME(sc));
2620 			return ETIMEDOUT;
2621 		}
2622 
2623 		err = iwx_force_power_gating(sc);
2624 		if (err)
2625 			return err;
2626 
2627 		/* Reset the entire device */
2628 		IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2629 		DELAY(5000);
2630 	}
2631 
2632 	err = iwx_apm_init(sc);
2633 	if (err)
2634 		return err;
2635 
2636 	iwx_init_msix_hw(sc);
2637 
2638 	iwx_enable_rfkill_int(sc);
2639 	iwx_check_rfkill(sc);
2640 
2641 	return 0;
2642 }
2643 
2644 void
2645 iwx_stop_device(struct iwx_softc *sc)
2646 {
2647 	struct ieee80211com *ic = &sc->sc_ic;
2648 	struct ieee80211_node *ni = ic->ic_bss;
2649 	int i;
2650 
2651 	iwx_disable_interrupts(sc);
2652 	sc->sc_flags &= ~IWX_FLAG_USE_ICT;
2653 
2654 	iwx_disable_rx_dma(sc);
2655 	iwx_reset_rx_ring(sc, &sc->rxq);
2656 	for (i = 0; i < nitems(sc->txq); i++)
2657 		iwx_reset_tx_ring(sc, &sc->txq[i]);
2658 	for (i = 0; i < IEEE80211_NUM_TID; i++) {
2659 		struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
2660 		if (ba->ba_state != IEEE80211_BA_AGREED)
2661 			continue;
2662 		ieee80211_delba_request(ic, ni, 0, 1, i);
2663 	}
2664 
2665 	/* Make sure (redundant) we've released our request to stay awake */
2666 	IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2667 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2668 	if (sc->sc_nic_locks > 0)
2669 		printf("%s: %d active NIC locks forcefully cleared\n",
2670 		    DEVNAME(sc), sc->sc_nic_locks);
2671 	sc->sc_nic_locks = 0;
2672 
2673 	/* Stop the device, and put it in low power state */
2674 	iwx_apm_stop(sc);
2675 
2676 	/* Reset the on-board processor. */
2677 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2678 	DELAY(5000);
2679 
2680 	/*
2681 	 * Upon stop, the IVAR table gets erased, so msi-x won't
2682 	 * work. This causes a bug in RF-KILL flows, since the interrupt
2683 	 * that enables radio won't fire on the correct irq, and the
2684 	 * driver won't be able to handle the interrupt.
2685 	 * Configure the IVAR table again after reset.
2686 	 */
2687 	iwx_conf_msix_hw(sc, 1);
2688 
2689 	/*
2690 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2691 	 * Clear the interrupt again.
2692 	 */
2693 	iwx_disable_interrupts(sc);
2694 
2695 	/* Even though we stop the HW we still want the RF kill interrupt. */
2696 	iwx_enable_rfkill_int(sc);
2697 	iwx_check_rfkill(sc);
2698 
2699 	iwx_prepare_card_hw(sc);
2700 
2701 	iwx_ctxt_info_free_paging(sc);
2702 	iwx_dma_contig_free(&sc->pnvm_dma);
2703 }
2704 
2705 void
2706 iwx_nic_config(struct iwx_softc *sc)
2707 {
2708 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2709 	uint32_t mask, val, reg_val = 0;
2710 
2711 	radio_cfg_type = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_TYPE) >>
2712 	    IWX_FW_PHY_CFG_RADIO_TYPE_POS;
2713 	radio_cfg_step = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_STEP) >>
2714 	    IWX_FW_PHY_CFG_RADIO_STEP_POS;
2715 	radio_cfg_dash = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_DASH) >>
2716 	    IWX_FW_PHY_CFG_RADIO_DASH_POS;
2717 
2718 	reg_val |= IWX_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2719 	    IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2720 	reg_val |= IWX_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2721 	    IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2722 
2723 	/* radio configuration */
2724 	reg_val |= radio_cfg_type << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2725 	reg_val |= radio_cfg_step << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2726 	reg_val |= radio_cfg_dash << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2727 
2728 	mask = IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2729 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2730 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2731 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2732 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2733 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2734 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2735 
2736 	val = IWX_READ(sc, IWX_CSR_HW_IF_CONFIG_REG);
2737 	val &= ~mask;
2738 	val |= reg_val;
2739 	IWX_WRITE(sc, IWX_CSR_HW_IF_CONFIG_REG, val);
2740 }
2741 
2742 int
2743 iwx_nic_rx_init(struct iwx_softc *sc)
2744 {
2745 	IWX_WRITE_1(sc, IWX_CSR_INT_COALESCING, IWX_HOST_INT_TIMEOUT_DEF);
2746 
2747 	/*
2748 	 * We don't configure the RFH; the firmware will do that.
2749 	 * Rx descriptors are set when firmware sends an ALIVE interrupt.
2750 	 */
2751 	return 0;
2752 }
2753 
2754 int
2755 iwx_nic_init(struct iwx_softc *sc)
2756 {
2757 	int err;
2758 
2759 	iwx_apm_init(sc);
2760 	if (sc->sc_device_family < IWX_DEVICE_FAMILY_AX210)
2761 		iwx_nic_config(sc);
2762 
2763 	err = iwx_nic_rx_init(sc);
2764 	if (err)
2765 		return err;
2766 
2767 	IWX_SETBITS(sc, IWX_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2768 
2769 	return 0;
2770 }
2771 
2772 /* Map a TID to an ieee80211_edca_ac category. */
2773 const uint8_t iwx_tid_to_ac[IWX_MAX_TID_COUNT] = {
2774 	EDCA_AC_BE,
2775 	EDCA_AC_BK,
2776 	EDCA_AC_BK,
2777 	EDCA_AC_BE,
2778 	EDCA_AC_VI,
2779 	EDCA_AC_VI,
2780 	EDCA_AC_VO,
2781 	EDCA_AC_VO,
2782 };
2783 
2784 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2785 const uint8_t iwx_ac_to_tx_fifo[] = {
2786 	IWX_GEN2_EDCA_TX_FIFO_BE,
2787 	IWX_GEN2_EDCA_TX_FIFO_BK,
2788 	IWX_GEN2_EDCA_TX_FIFO_VI,
2789 	IWX_GEN2_EDCA_TX_FIFO_VO,
2790 };
2791 
2792 int
2793 iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid,
2794     int num_slots)
2795 {
2796 	struct iwx_tx_queue_cfg_cmd cmd;
2797 	struct iwx_rx_packet *pkt;
2798 	struct iwx_tx_queue_cfg_rsp *resp;
2799 	struct iwx_host_cmd hcmd = {
2800 		.id = IWX_SCD_QUEUE_CFG,
2801 		.flags = IWX_CMD_WANT_RESP,
2802 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2803 	};
2804 	struct iwx_tx_ring *ring = &sc->txq[qid];
2805 	int err, fwqid;
2806 	uint32_t wr_idx;
2807 	size_t resp_len;
2808 
2809 	iwx_reset_tx_ring(sc, ring);
2810 
2811 	memset(&cmd, 0, sizeof(cmd));
2812 	cmd.sta_id = sta_id;
2813 	cmd.tid = tid;
2814 	cmd.flags = htole16(IWX_TX_QUEUE_CFG_ENABLE_QUEUE);
2815 	cmd.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2816 	cmd.byte_cnt_addr = htole64(ring->bc_tbl.paddr);
2817 	cmd.tfdq_addr = htole64(ring->desc_dma.paddr);
2818 
2819 	hcmd.data[0] = &cmd;
2820 	hcmd.len[0] = sizeof(cmd);
2821 
2822 	err = iwx_send_cmd(sc, &hcmd);
2823 	if (err)
2824 		return err;
2825 
2826 	pkt = hcmd.resp_pkt;
2827 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2828 		DPRINTF(("SCD_QUEUE_CFG command failed\n"));
2829 		err = EIO;
2830 		goto out;
2831 	}
2832 
2833 	resp_len = iwx_rx_packet_payload_len(pkt);
2834 	if (resp_len != sizeof(*resp)) {
2835 		DPRINTF(("SCD_QUEUE_CFG returned %zu bytes, expected %zu bytes\n", resp_len, sizeof(*resp)));
2836 		err = EIO;
2837 		goto out;
2838 	}
2839 
2840 	resp = (void *)pkt->data;
2841 	fwqid = le16toh(resp->queue_number);
2842 	wr_idx = le16toh(resp->write_pointer);
2843 
2844 	/* Unlike iwlwifi, we do not support dynamic queue ID assignment. */
2845 	if (fwqid != qid) {
2846 		DPRINTF(("requested qid %d but %d was assigned\n", qid, fwqid));
2847 		err = EIO;
2848 		goto out;
2849 	}
2850 
2851 	if (wr_idx != ring->cur_hw) {
2852 		DPRINTF(("fw write index is %d but ring is %d\n",
2853 		    wr_idx, ring->cur_hw));
2854 		err = EIO;
2855 		goto out;
2856 	}
2857 
2858 	sc->qenablemsk |= (1 << qid);
2859 	ring->tid = tid;
2860 out:
2861 	iwx_free_resp(sc, &hcmd);
2862 	return err;
2863 }
2864 
2865 int
2866 iwx_disable_txq(struct iwx_softc *sc, int sta_id, int qid, uint8_t tid)
2867 {
2868 	struct iwx_tx_queue_cfg_cmd cmd;
2869 	struct iwx_rx_packet *pkt;
2870 	struct iwx_tx_queue_cfg_rsp *resp;
2871 	struct iwx_host_cmd hcmd = {
2872 		.id = IWX_SCD_QUEUE_CFG,
2873 		.flags = IWX_CMD_WANT_RESP,
2874 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2875 	};
2876 	struct iwx_tx_ring *ring = &sc->txq[qid];
2877 	int err;
2878 
2879 	memset(&cmd, 0, sizeof(cmd));
2880 	cmd.sta_id = sta_id;
2881 	cmd.tid = tid;
2882 	cmd.flags = htole16(0); /* clear "queue enabled" flag */
2883 	cmd.cb_size = htole32(0);
2884 	cmd.byte_cnt_addr = htole64(0);
2885 	cmd.tfdq_addr = htole64(0);
2886 
2887 	hcmd.data[0] = &cmd;
2888 	hcmd.len[0] = sizeof(cmd);
2889 
2890 	err = iwx_send_cmd(sc, &hcmd);
2891 	if (err)
2892 		return err;
2893 
2894 	pkt = hcmd.resp_pkt;
2895 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2896 		DPRINTF(("SCD_QUEUE_CFG command failed\n"));
2897 		err = EIO;
2898 		goto out;
2899 	}
2900 
2901 	sc->qenablemsk &= ~(1 << qid);
2902 	iwx_reset_tx_ring(sc, ring);
2903 out:
2904 	iwx_free_resp(sc, &hcmd);
2905 	return err;
2906 }
2907 
2908 void
2909 iwx_post_alive(struct iwx_softc *sc)
2910 {
2911 	iwx_ict_reset(sc);
2912 }
2913 
2914 int
2915 iwx_schedule_session_protection(struct iwx_softc *sc, struct iwx_node *in,
2916     uint32_t duration)
2917 {
2918 	struct iwx_session_prot_cmd cmd = {
2919 		.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
2920 		    in->in_color)),
2921 		.action = htole32(IWX_FW_CTXT_ACTION_ADD),
2922 		.conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
2923 		.duration_tu = htole32(duration * IEEE80211_DUR_TU),
2924 	};
2925 	uint32_t cmd_id;
2926 	int err;
2927 
2928 	cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
2929 	err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
2930 	if (!err)
2931 		sc->sc_flags |= IWX_FLAG_TE_ACTIVE;
2932 	return err;
2933 }
2934 
2935 void
2936 iwx_unprotect_session(struct iwx_softc *sc, struct iwx_node *in)
2937 {
2938 	struct iwx_session_prot_cmd cmd = {
2939 		.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
2940 		    in->in_color)),
2941 		.action = htole32(IWX_FW_CTXT_ACTION_REMOVE),
2942 		.conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
2943 		.duration_tu = 0,
2944 	};
2945 	uint32_t cmd_id;
2946 
2947 	/* Do nothing if the time event has already ended. */
2948 	if ((sc->sc_flags & IWX_FLAG_TE_ACTIVE) == 0)
2949 		return;
2950 
2951 	cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
2952 	if (iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd) == 0)
2953 		sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
2954 }
2955 
2956 /*
2957  * NVM read access and content parsing.  We do not support
2958  * external NVM or writing NVM.
2959  */
2960 
2961 uint8_t
2962 iwx_fw_valid_tx_ant(struct iwx_softc *sc)
2963 {
2964 	uint8_t tx_ant;
2965 
2966 	tx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_TX_CHAIN)
2967 	    >> IWX_FW_PHY_CFG_TX_CHAIN_POS);
2968 
2969 	if (sc->sc_nvm.valid_tx_ant)
2970 		tx_ant &= sc->sc_nvm.valid_tx_ant;
2971 
2972 	return tx_ant;
2973 }
2974 
2975 uint8_t
2976 iwx_fw_valid_rx_ant(struct iwx_softc *sc)
2977 {
2978 	uint8_t rx_ant;
2979 
2980 	rx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RX_CHAIN)
2981 	    >> IWX_FW_PHY_CFG_RX_CHAIN_POS);
2982 
2983 	if (sc->sc_nvm.valid_rx_ant)
2984 		rx_ant &= sc->sc_nvm.valid_rx_ant;
2985 
2986 	return rx_ant;
2987 }
2988 
2989 void
2990 iwx_init_channel_map(struct iwx_softc *sc, uint16_t *channel_profile_v3,
2991     uint32_t *channel_profile_v4, int nchan_profile)
2992 {
2993 	struct ieee80211com *ic = &sc->sc_ic;
2994 	struct iwx_nvm_data *data = &sc->sc_nvm;
2995 	int ch_idx;
2996 	struct ieee80211_channel *channel;
2997 	uint32_t ch_flags;
2998 	int is_5ghz;
2999 	int flags, hw_value;
3000 	int nchan;
3001 	const uint8_t *nvm_channels;
3002 
3003 	if (sc->sc_uhb_supported) {
3004 		nchan = nitems(iwx_nvm_channels_uhb);
3005 		nvm_channels = iwx_nvm_channels_uhb;
3006 	} else {
3007 		nchan = nitems(iwx_nvm_channels_8000);
3008 		nvm_channels = iwx_nvm_channels_8000;
3009 	}
3010 
3011 	for (ch_idx = 0; ch_idx < nchan && ch_idx < nchan_profile; ch_idx++) {
3012 		if (channel_profile_v4)
3013 			ch_flags = le32_to_cpup(channel_profile_v4 + ch_idx);
3014 		else
3015 			ch_flags = le16_to_cpup(channel_profile_v3 + ch_idx);
3016 
3017 		/* net80211 cannot handle 6 GHz channel numbers yet */
3018 		if (ch_idx >= IWX_NUM_2GHZ_CHANNELS + IWX_NUM_5GHZ_CHANNELS)
3019 			break;
3020 
3021 		is_5ghz = ch_idx >= IWX_NUM_2GHZ_CHANNELS;
3022 		if (is_5ghz && !data->sku_cap_band_52GHz_enable)
3023 			ch_flags &= ~IWX_NVM_CHANNEL_VALID;
3024 
3025 		hw_value = nvm_channels[ch_idx];
3026 		channel = &ic->ic_channels[hw_value];
3027 
3028 		if (!(ch_flags & IWX_NVM_CHANNEL_VALID)) {
3029 			channel->ic_freq = 0;
3030 			channel->ic_flags = 0;
3031 			continue;
3032 		}
3033 
3034 		if (!is_5ghz) {
3035 			flags = IEEE80211_CHAN_2GHZ;
3036 			channel->ic_flags
3037 			    = IEEE80211_CHAN_CCK
3038 			    | IEEE80211_CHAN_OFDM
3039 			    | IEEE80211_CHAN_DYN
3040 			    | IEEE80211_CHAN_2GHZ;
3041 		} else {
3042 			flags = IEEE80211_CHAN_5GHZ;
3043 			channel->ic_flags =
3044 			    IEEE80211_CHAN_A;
3045 		}
3046 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
3047 
3048 		if (!(ch_flags & IWX_NVM_CHANNEL_ACTIVE))
3049 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
3050 
3051 		if (data->sku_cap_11n_enable) {
3052 			channel->ic_flags |= IEEE80211_CHAN_HT;
3053 			if (ch_flags & IWX_NVM_CHANNEL_40MHZ)
3054 				channel->ic_flags |= IEEE80211_CHAN_40MHZ;
3055 		}
3056 
3057 		if (is_5ghz && data->sku_cap_11ac_enable) {
3058 			channel->ic_flags |= IEEE80211_CHAN_VHT;
3059 			if (ch_flags & IWX_NVM_CHANNEL_80MHZ)
3060 				channel->ic_xflags |= IEEE80211_CHANX_80MHZ;
3061 		}
3062 	}
3063 }
3064 
3065 int
3066 iwx_mimo_enabled(struct iwx_softc *sc)
3067 {
3068 	struct ieee80211com *ic = &sc->sc_ic;
3069 
3070 	return !sc->sc_nvm.sku_cap_mimo_disable &&
3071 	    (ic->ic_userflags & IEEE80211_F_NOMIMO) == 0;
3072 }
3073 
3074 void
3075 iwx_setup_ht_rates(struct iwx_softc *sc)
3076 {
3077 	struct ieee80211com *ic = &sc->sc_ic;
3078 	uint8_t rx_ant;
3079 
3080 	/* TX is supported with the same MCS as RX. */
3081 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
3082 
3083 	memset(ic->ic_sup_mcs, 0, sizeof(ic->ic_sup_mcs));
3084 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
3085 
3086 	if (!iwx_mimo_enabled(sc))
3087 		return;
3088 
3089 	rx_ant = iwx_fw_valid_rx_ant(sc);
3090 	if ((rx_ant & IWX_ANT_AB) == IWX_ANT_AB ||
3091 	    (rx_ant & IWX_ANT_BC) == IWX_ANT_BC)
3092 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
3093 }
3094 
3095 void
3096 iwx_setup_vht_rates(struct iwx_softc *sc)
3097 {
3098 	struct ieee80211com *ic = &sc->sc_ic;
3099 	uint8_t rx_ant = iwx_fw_valid_rx_ant(sc);
3100 	int n;
3101 
3102 	ic->ic_vht_rxmcs = (IEEE80211_VHT_MCS_0_9 <<
3103 	    IEEE80211_VHT_MCS_FOR_SS_SHIFT(1));
3104 
3105 	if (iwx_mimo_enabled(sc) &&
3106 	    ((rx_ant & IWX_ANT_AB) == IWX_ANT_AB ||
3107 	    (rx_ant & IWX_ANT_BC) == IWX_ANT_BC)) {
3108 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_0_9 <<
3109 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2));
3110 	} else {
3111 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_SS_NOT_SUPP <<
3112 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2));
3113 	}
3114 
3115 	for (n = 3; n <= IEEE80211_VHT_NUM_SS; n++) {
3116 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_SS_NOT_SUPP <<
3117 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(n));
3118 	}
3119 
3120 	ic->ic_vht_txmcs = ic->ic_vht_rxmcs;
3121 }
3122 
3123 void
3124 iwx_init_reorder_buffer(struct iwx_reorder_buffer *reorder_buf,
3125     uint16_t ssn, uint16_t buf_size)
3126 {
3127 	reorder_buf->head_sn = ssn;
3128 	reorder_buf->num_stored = 0;
3129 	reorder_buf->buf_size = buf_size;
3130 	reorder_buf->last_amsdu = 0;
3131 	reorder_buf->last_sub_index = 0;
3132 	reorder_buf->removed = 0;
3133 	reorder_buf->valid = 0;
3134 	reorder_buf->consec_oldsn_drops = 0;
3135 	reorder_buf->consec_oldsn_ampdu_gp2 = 0;
3136 	reorder_buf->consec_oldsn_prev_drop = 0;
3137 }
3138 
3139 void
3140 iwx_clear_reorder_buffer(struct iwx_softc *sc, struct iwx_rxba_data *rxba)
3141 {
3142 	int i;
3143 	struct iwx_reorder_buffer *reorder_buf = &rxba->reorder_buf;
3144 	struct iwx_reorder_buf_entry *entry;
3145 
3146 	for (i = 0; i < reorder_buf->buf_size; i++) {
3147 		entry = &rxba->entries[i];
3148 		ml_purge(&entry->frames);
3149 		timerclear(&entry->reorder_time);
3150 	}
3151 
3152 	reorder_buf->removed = 1;
3153 	timeout_del(&reorder_buf->reorder_timer);
3154 	timerclear(&rxba->last_rx);
3155 	timeout_del(&rxba->session_timer);
3156 	rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
3157 }
3158 
3159 #define RX_REORDER_BUF_TIMEOUT_MQ_USEC (100000ULL)
3160 
3161 void
3162 iwx_rx_ba_session_expired(void *arg)
3163 {
3164 	struct iwx_rxba_data *rxba = arg;
3165 	struct iwx_softc *sc = rxba->sc;
3166 	struct ieee80211com *ic = &sc->sc_ic;
3167 	struct ieee80211_node *ni = ic->ic_bss;
3168 	struct timeval now, timeout, expiry;
3169 	int s;
3170 
3171 	s = splnet();
3172 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0 &&
3173 	    ic->ic_state == IEEE80211_S_RUN &&
3174 	    rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
3175 		getmicrouptime(&now);
3176 		USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
3177 		timeradd(&rxba->last_rx, &timeout, &expiry);
3178 		if (timercmp(&now, &expiry, <)) {
3179 			timeout_add_usec(&rxba->session_timer, rxba->timeout);
3180 		} else {
3181 			ic->ic_stats.is_ht_rx_ba_timeout++;
3182 			ieee80211_delba_request(ic, ni,
3183 			    IEEE80211_REASON_TIMEOUT, 0, rxba->tid);
3184 		}
3185 	}
3186 	splx(s);
3187 }
3188 
3189 void
3190 iwx_rx_bar_frame_release(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
3191     struct mbuf_list *ml)
3192 {
3193 	struct ieee80211com *ic = &sc->sc_ic;
3194 	struct ieee80211_node *ni = ic->ic_bss;
3195 	struct iwx_bar_frame_release *release = (void *)pkt->data;
3196 	struct iwx_reorder_buffer *buf;
3197 	struct iwx_rxba_data *rxba;
3198 	unsigned int baid, nssn, sta_id, tid;
3199 
3200 	if (iwx_rx_packet_payload_len(pkt) < sizeof(*release))
3201 		return;
3202 
3203 	baid = (le32toh(release->ba_info) & IWX_BAR_FRAME_RELEASE_BAID_MASK) >>
3204 	    IWX_BAR_FRAME_RELEASE_BAID_SHIFT;
3205 	if (baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
3206 	    baid >= nitems(sc->sc_rxba_data))
3207 		return;
3208 
3209 	rxba = &sc->sc_rxba_data[baid];
3210 	if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID)
3211 		return;
3212 
3213 	tid = le32toh(release->sta_tid) & IWX_BAR_FRAME_RELEASE_TID_MASK;
3214 	sta_id = (le32toh(release->sta_tid) &
3215 	    IWX_BAR_FRAME_RELEASE_STA_MASK) >> IWX_BAR_FRAME_RELEASE_STA_SHIFT;
3216 	if (tid != rxba->tid || rxba->sta_id != IWX_STATION_ID)
3217 		return;
3218 
3219 	nssn = le32toh(release->ba_info) & IWX_BAR_FRAME_RELEASE_NSSN_MASK;
3220 	buf = &rxba->reorder_buf;
3221 	iwx_release_frames(sc, ni, rxba, buf, nssn, ml);
3222 }
3223 
3224 void
3225 iwx_reorder_timer_expired(void *arg)
3226 {
3227 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
3228 	struct iwx_reorder_buffer *buf = arg;
3229 	struct iwx_rxba_data *rxba = iwx_rxba_data_from_reorder_buf(buf);
3230 	struct iwx_reorder_buf_entry *entries = &rxba->entries[0];
3231 	struct iwx_softc *sc = rxba->sc;
3232 	struct ieee80211com *ic = &sc->sc_ic;
3233 	struct ieee80211_node *ni = ic->ic_bss;
3234 	int i, s;
3235 	uint16_t sn = 0, index = 0;
3236 	int expired = 0;
3237 	int cont = 0;
3238 	struct timeval now, timeout, expiry;
3239 
3240 	if (!buf->num_stored || buf->removed)
3241 		return;
3242 
3243 	s = splnet();
3244 	getmicrouptime(&now);
3245 	USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
3246 
3247 	for (i = 0; i < buf->buf_size ; i++) {
3248 		index = (buf->head_sn + i) % buf->buf_size;
3249 
3250 		if (ml_empty(&entries[index].frames)) {
3251 			/*
3252 			 * If there is a hole and the next frame didn't expire
3253 			 * we want to break and not advance SN.
3254 			 */
3255 			cont = 0;
3256 			continue;
3257 		}
3258 		timeradd(&entries[index].reorder_time, &timeout, &expiry);
3259 		if (!cont && timercmp(&now, &expiry, <))
3260 			break;
3261 
3262 		expired = 1;
3263 		/* continue until next hole after this expired frame */
3264 		cont = 1;
3265 		sn = (buf->head_sn + (i + 1)) & 0xfff;
3266 	}
3267 
3268 	if (expired) {
3269 		/* SN is set to the last expired frame + 1 */
3270 		iwx_release_frames(sc, ni, rxba, buf, sn, &ml);
3271 		if_input(&sc->sc_ic.ic_if, &ml);
3272 		ic->ic_stats.is_ht_rx_ba_window_gap_timeout++;
3273 	} else {
3274 		/*
3275 		 * If no frame expired and there are stored frames, index is now
3276 		 * pointing to the first unexpired frame - modify reorder timeout
3277 		 * accordingly.
3278 		 */
3279 		timeout_add_usec(&buf->reorder_timer,
3280 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
3281 	}
3282 
3283 	splx(s);
3284 }
3285 
3286 #define IWX_MAX_RX_BA_SESSIONS 16
3287 
3288 void
3289 iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3290     uint16_t ssn, uint16_t winsize, int timeout_val, int start)
3291 {
3292 	struct ieee80211com *ic = &sc->sc_ic;
3293 	struct iwx_add_sta_cmd cmd;
3294 	struct iwx_node *in = (void *)ni;
3295 	int err, s;
3296 	uint32_t status;
3297 	struct iwx_rxba_data *rxba = NULL;
3298 	uint8_t baid = 0;
3299 
3300 	s = splnet();
3301 
3302 	if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS) {
3303 		ieee80211_addba_req_refuse(ic, ni, tid);
3304 		splx(s);
3305 		return;
3306 	}
3307 
3308 	memset(&cmd, 0, sizeof(cmd));
3309 
3310 	cmd.sta_id = IWX_STATION_ID;
3311 	cmd.mac_id_n_color
3312 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3313 	cmd.add_modify = IWX_STA_MODE_MODIFY;
3314 
3315 	if (start) {
3316 		cmd.add_immediate_ba_tid = (uint8_t)tid;
3317 		cmd.add_immediate_ba_ssn = htole16(ssn);
3318 		cmd.rx_ba_window = htole16(winsize);
3319 	} else {
3320 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
3321 	}
3322 	cmd.modify_mask = start ? IWX_STA_MODIFY_ADD_BA_TID :
3323 	    IWX_STA_MODIFY_REMOVE_BA_TID;
3324 
3325 	status = IWX_ADD_STA_SUCCESS;
3326 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(cmd), &cmd,
3327 	    &status);
3328 
3329 	if (err || (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS) {
3330 		if (start)
3331 			ieee80211_addba_req_refuse(ic, ni, tid);
3332 		splx(s);
3333 		return;
3334 	}
3335 
3336 	/* Deaggregation is done in hardware. */
3337 	if (start) {
3338 		if (!(status & IWX_ADD_STA_BAID_VALID_MASK)) {
3339 			ieee80211_addba_req_refuse(ic, ni, tid);
3340 			splx(s);
3341 			return;
3342 		}
3343 		baid = (status & IWX_ADD_STA_BAID_MASK) >>
3344 		    IWX_ADD_STA_BAID_SHIFT;
3345 		if (baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
3346 		    baid >= nitems(sc->sc_rxba_data)) {
3347 			ieee80211_addba_req_refuse(ic, ni, tid);
3348 			splx(s);
3349 			return;
3350 		}
3351 		rxba = &sc->sc_rxba_data[baid];
3352 		if (rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
3353 			ieee80211_addba_req_refuse(ic, ni, tid);
3354 			splx(s);
3355 			return;
3356 		}
3357 		rxba->sta_id = IWX_STATION_ID;
3358 		rxba->tid = tid;
3359 		rxba->baid = baid;
3360 		rxba->timeout = timeout_val;
3361 		getmicrouptime(&rxba->last_rx);
3362 		iwx_init_reorder_buffer(&rxba->reorder_buf, ssn,
3363 		    winsize);
3364 		if (timeout_val != 0) {
3365 			struct ieee80211_rx_ba *ba;
3366 			timeout_add_usec(&rxba->session_timer,
3367 			    timeout_val);
3368 			/* XXX disable net80211's BA timeout handler */
3369 			ba = &ni->ni_rx_ba[tid];
3370 			ba->ba_timeout_val = 0;
3371 		}
3372 	} else {
3373 		int i;
3374 		for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
3375 			rxba = &sc->sc_rxba_data[i];
3376 			if (rxba->baid ==
3377 			    IWX_RX_REORDER_DATA_INVALID_BAID)
3378 				continue;
3379 			if (rxba->tid != tid)
3380 				continue;
3381 			iwx_clear_reorder_buffer(sc, rxba);
3382 			break;
3383 		}
3384 	}
3385 
3386 	if (start) {
3387 		sc->sc_rx_ba_sessions++;
3388 		ieee80211_addba_req_accept(ic, ni, tid);
3389 	} else if (sc->sc_rx_ba_sessions > 0)
3390 		sc->sc_rx_ba_sessions--;
3391 
3392 	splx(s);
3393 }
3394 
3395 void
3396 iwx_mac_ctxt_task(void *arg)
3397 {
3398 	struct iwx_softc *sc = arg;
3399 	struct ieee80211com *ic = &sc->sc_ic;
3400 	struct iwx_node *in = (void *)ic->ic_bss;
3401 	int err, s = splnet();
3402 
3403 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) ||
3404 	    ic->ic_state != IEEE80211_S_RUN) {
3405 		refcnt_rele_wake(&sc->task_refs);
3406 		splx(s);
3407 		return;
3408 	}
3409 
3410 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
3411 	if (err)
3412 		printf("%s: failed to update MAC\n", DEVNAME(sc));
3413 
3414 	iwx_unprotect_session(sc, in);
3415 
3416 	refcnt_rele_wake(&sc->task_refs);
3417 	splx(s);
3418 }
3419 
3420 void
3421 iwx_phy_ctxt_task(void *arg)
3422 {
3423 	struct iwx_softc *sc = arg;
3424 	struct ieee80211com *ic = &sc->sc_ic;
3425 	struct iwx_node *in = (void *)ic->ic_bss;
3426 	struct ieee80211_node *ni = &in->in_ni;
3427 	uint8_t chains, sco, vht_chan_width;
3428 	int err, s = splnet();
3429 
3430 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) ||
3431 	    ic->ic_state != IEEE80211_S_RUN ||
3432 	    in->in_phyctxt == NULL) {
3433 		refcnt_rele_wake(&sc->task_refs);
3434 		splx(s);
3435 		return;
3436 	}
3437 
3438 	chains = iwx_mimo_enabled(sc) ? 2 : 1;
3439 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
3440 	    IEEE80211_CHAN_40MHZ_ALLOWED(ni->ni_chan) &&
3441 	    ieee80211_node_supports_ht_chan40(ni))
3442 		sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
3443 	else
3444 		sco = IEEE80211_HTOP0_SCO_SCN;
3445 	if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
3446 	    IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
3447 	    ieee80211_node_supports_vht_chan80(ni))
3448 		vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
3449 	else
3450 		vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
3451 	if (in->in_phyctxt->sco != sco ||
3452 	    in->in_phyctxt->vht_chan_width != vht_chan_width) {
3453 		err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
3454 		    in->in_phyctxt->channel, chains, chains, 0, sco,
3455 		    vht_chan_width);
3456 		if (err)
3457 			printf("%s: failed to update PHY\n", DEVNAME(sc));
3458 	}
3459 
3460 	refcnt_rele_wake(&sc->task_refs);
3461 	splx(s);
3462 }
3463 
3464 void
3465 iwx_updatechan(struct ieee80211com *ic)
3466 {
3467 	struct iwx_softc *sc = ic->ic_softc;
3468 
3469 	if (ic->ic_state == IEEE80211_S_RUN &&
3470 	    !task_pending(&sc->newstate_task))
3471 		iwx_add_task(sc, systq, &sc->phy_ctxt_task);
3472 }
3473 
3474 void
3475 iwx_updateprot(struct ieee80211com *ic)
3476 {
3477 	struct iwx_softc *sc = ic->ic_softc;
3478 
3479 	if (ic->ic_state == IEEE80211_S_RUN &&
3480 	    !task_pending(&sc->newstate_task))
3481 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3482 }
3483 
3484 void
3485 iwx_updateslot(struct ieee80211com *ic)
3486 {
3487 	struct iwx_softc *sc = ic->ic_softc;
3488 
3489 	if (ic->ic_state == IEEE80211_S_RUN &&
3490 	    !task_pending(&sc->newstate_task))
3491 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3492 }
3493 
3494 void
3495 iwx_updateedca(struct ieee80211com *ic)
3496 {
3497 	struct iwx_softc *sc = ic->ic_softc;
3498 
3499 	if (ic->ic_state == IEEE80211_S_RUN &&
3500 	    !task_pending(&sc->newstate_task))
3501 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3502 }
3503 
3504 void
3505 iwx_updatedtim(struct ieee80211com *ic)
3506 {
3507 	struct iwx_softc *sc = ic->ic_softc;
3508 
3509 	if (ic->ic_state == IEEE80211_S_RUN &&
3510 	    !task_pending(&sc->newstate_task))
3511 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3512 }
3513 
3514 void
3515 iwx_sta_tx_agg_start(struct iwx_softc *sc, struct ieee80211_node *ni,
3516     uint8_t tid)
3517 {
3518 	struct ieee80211com *ic = &sc->sc_ic;
3519 	struct ieee80211_tx_ba *ba;
3520 	int err, qid;
3521 	struct iwx_tx_ring *ring;
3522 
3523 	/* Ensure we can map this TID to an aggregation queue. */
3524 	if (tid >= IWX_MAX_TID_COUNT)
3525 		return;
3526 
3527 	ba = &ni->ni_tx_ba[tid];
3528 	if (ba->ba_state != IEEE80211_BA_REQUESTED)
3529 		return;
3530 
3531 	qid = sc->aggqid[tid];
3532 	if (qid == 0) {
3533 		/* Firmware should pick the next unused Tx queue. */
3534 		qid = fls(sc->qenablemsk);
3535 	}
3536 
3537 	/*
3538 	 * Simply enable the queue.
3539 	 * Firmware handles Tx Ba session setup and teardown.
3540 	 */
3541 	if ((sc->qenablemsk & (1 << qid)) == 0) {
3542 		if (!iwx_nic_lock(sc)) {
3543 			ieee80211_addba_resp_refuse(ic, ni, tid,
3544 			    IEEE80211_STATUS_UNSPECIFIED);
3545 			return;
3546 		}
3547 		err = iwx_enable_txq(sc, IWX_STATION_ID, qid, tid,
3548 		    IWX_TX_RING_COUNT);
3549 		iwx_nic_unlock(sc);
3550 		if (err) {
3551 			printf("%s: could not enable Tx queue %d "
3552 			    "(error %d)\n", DEVNAME(sc), qid, err);
3553 			ieee80211_addba_resp_refuse(ic, ni, tid,
3554 			    IEEE80211_STATUS_UNSPECIFIED);
3555 			return;
3556 		}
3557 
3558 		ba->ba_winstart = 0;
3559 	} else
3560 		ba->ba_winstart = ni->ni_qos_txseqs[tid];
3561 
3562 	ba->ba_winend = (ba->ba_winstart + ba->ba_winsize - 1) & 0xfff;
3563 
3564 	ring = &sc->txq[qid];
3565 	ba->ba_timeout_val = 0;
3566 	ieee80211_addba_resp_accept(ic, ni, tid);
3567 	sc->aggqid[tid] = qid;
3568 }
3569 
3570 void
3571 iwx_ba_task(void *arg)
3572 {
3573 	struct iwx_softc *sc = arg;
3574 	struct ieee80211com *ic = &sc->sc_ic;
3575 	struct ieee80211_node *ni = ic->ic_bss;
3576 	int s = splnet();
3577 	int tid;
3578 
3579 	for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3580 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3581 			break;
3582 		if (sc->ba_rx.start_tidmask & (1 << tid)) {
3583 			struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
3584 			iwx_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3585 			    ba->ba_winsize, ba->ba_timeout_val, 1);
3586 			sc->ba_rx.start_tidmask &= ~(1 << tid);
3587 		} else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3588 			iwx_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3589 			sc->ba_rx.stop_tidmask &= ~(1 << tid);
3590 		}
3591 	}
3592 
3593 	for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3594 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3595 			break;
3596 		if (sc->ba_tx.start_tidmask & (1 << tid)) {
3597 			iwx_sta_tx_agg_start(sc, ni, tid);
3598 			sc->ba_tx.start_tidmask &= ~(1 << tid);
3599 		}
3600 	}
3601 
3602 	refcnt_rele_wake(&sc->task_refs);
3603 	splx(s);
3604 }
3605 
3606 /*
3607  * This function is called by upper layer when an ADDBA request is received
3608  * from another STA and before the ADDBA response is sent.
3609  */
3610 int
3611 iwx_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3612     uint8_t tid)
3613 {
3614 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
3615 
3616 	if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS ||
3617 	    tid >= IWX_MAX_TID_COUNT)
3618 		return ENOSPC;
3619 
3620 	if (sc->ba_rx.start_tidmask & (1 << tid))
3621 		return EBUSY;
3622 
3623 	sc->ba_rx.start_tidmask |= (1 << tid);
3624 	iwx_add_task(sc, systq, &sc->ba_task);
3625 
3626 	return EBUSY;
3627 }
3628 
3629 /*
3630  * This function is called by upper layer on teardown of an HT-immediate
3631  * Block Ack agreement (eg. upon receipt of a DELBA frame).
3632  */
3633 void
3634 iwx_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3635     uint8_t tid)
3636 {
3637 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
3638 
3639 	if (tid >= IWX_MAX_TID_COUNT || sc->ba_rx.stop_tidmask & (1 << tid))
3640 		return;
3641 
3642 	sc->ba_rx.stop_tidmask |= (1 << tid);
3643 	iwx_add_task(sc, systq, &sc->ba_task);
3644 }
3645 
3646 int
3647 iwx_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3648     uint8_t tid)
3649 {
3650 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
3651 	struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3652 
3653 	/*
3654 	 * Require a firmware version which uses an internal AUX queue.
3655 	 * The value of IWX_FIRST_AGG_TX_QUEUE would be incorrect otherwise.
3656 	 */
3657 	if (sc->first_data_qid != IWX_DQA_CMD_QUEUE + 1)
3658 		return ENOTSUP;
3659 
3660 	/* Ensure we can map this TID to an aggregation queue. */
3661 	if (tid >= IWX_MAX_TID_COUNT)
3662 		return EINVAL;
3663 
3664 	/* We only support a fixed Tx aggregation window size, for now. */
3665 	if (ba->ba_winsize != IWX_FRAME_LIMIT)
3666 		return ENOTSUP;
3667 
3668 	/* Is firmware already using an agg queue with this TID? */
3669 	if (sc->aggqid[tid] != 0)
3670 		return ENOSPC;
3671 
3672 	/* Are we already processing an ADDBA request? */
3673 	if (sc->ba_tx.start_tidmask & (1 << tid))
3674 		return EBUSY;
3675 
3676 	sc->ba_tx.start_tidmask |= (1 << tid);
3677 	iwx_add_task(sc, systq, &sc->ba_task);
3678 
3679 	return EBUSY;
3680 }
3681 
3682 void
3683 iwx_set_mac_addr_from_csr(struct iwx_softc *sc, struct iwx_nvm_data *data)
3684 {
3685 	uint32_t mac_addr0, mac_addr1;
3686 
3687 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
3688 
3689 	if (!iwx_nic_lock(sc))
3690 		return;
3691 
3692 	mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_STRAP(sc)));
3693 	mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_STRAP(sc)));
3694 
3695 	iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3696 
3697 	/* If OEM fused a valid address, use it instead of the one in OTP. */
3698 	if (iwx_is_valid_mac_addr(data->hw_addr)) {
3699 		iwx_nic_unlock(sc);
3700 		return;
3701 	}
3702 
3703 	mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_OTP(sc)));
3704 	mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_OTP(sc)));
3705 
3706 	iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3707 
3708 	iwx_nic_unlock(sc);
3709 }
3710 
3711 int
3712 iwx_is_valid_mac_addr(const uint8_t *addr)
3713 {
3714 	static const uint8_t reserved_mac[] = {
3715 		0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3716 	};
3717 
3718 	return (memcmp(reserved_mac, addr, ETHER_ADDR_LEN) != 0 &&
3719 	    memcmp(etherbroadcastaddr, addr, sizeof(etherbroadcastaddr)) != 0 &&
3720 	    memcmp(etheranyaddr, addr, sizeof(etheranyaddr)) != 0 &&
3721 	    !ETHER_IS_MULTICAST(addr));
3722 }
3723 
3724 void
3725 iwx_flip_hw_address(uint32_t mac_addr0, uint32_t mac_addr1, uint8_t *dest)
3726 {
3727 	const uint8_t *hw_addr;
3728 
3729 	hw_addr = (const uint8_t *)&mac_addr0;
3730 	dest[0] = hw_addr[3];
3731 	dest[1] = hw_addr[2];
3732 	dest[2] = hw_addr[1];
3733 	dest[3] = hw_addr[0];
3734 
3735 	hw_addr = (const uint8_t *)&mac_addr1;
3736 	dest[4] = hw_addr[1];
3737 	dest[5] = hw_addr[0];
3738 }
3739 
3740 int
3741 iwx_nvm_get(struct iwx_softc *sc)
3742 {
3743 	struct iwx_nvm_get_info cmd = {};
3744 	struct iwx_nvm_data *nvm = &sc->sc_nvm;
3745 	struct iwx_host_cmd hcmd = {
3746 		.flags = IWX_CMD_WANT_RESP | IWX_CMD_SEND_IN_RFKILL,
3747 		.data = { &cmd, },
3748 		.len = { sizeof(cmd) },
3749 		.id = IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
3750 		    IWX_NVM_GET_INFO)
3751 	};
3752 	int err;
3753 	uint32_t mac_flags;
3754 	/*
3755 	 * All the values in iwx_nvm_get_info_rsp v4 are the same as
3756 	 * in v3, except for the channel profile part of the
3757 	 * regulatory.  So we can just access the new struct, with the
3758 	 * exception of the latter.
3759 	 */
3760 	struct iwx_nvm_get_info_rsp *rsp;
3761 	struct iwx_nvm_get_info_rsp_v3 *rsp_v3;
3762 	int v4 = isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REGULATORY_NVM_INFO);
3763 	size_t resp_len = v4 ? sizeof(*rsp) : sizeof(*rsp_v3);
3764 
3765 	hcmd.resp_pkt_len = sizeof(struct iwx_rx_packet) + resp_len;
3766 	err = iwx_send_cmd(sc, &hcmd);
3767 	if (err)
3768 		return err;
3769 
3770 	if (iwx_rx_packet_payload_len(hcmd.resp_pkt) != resp_len) {
3771 		err = EIO;
3772 		goto out;
3773 	}
3774 
3775 	memset(nvm, 0, sizeof(*nvm));
3776 
3777 	iwx_set_mac_addr_from_csr(sc, nvm);
3778 	if (!iwx_is_valid_mac_addr(nvm->hw_addr)) {
3779 		printf("%s: no valid mac address was found\n", DEVNAME(sc));
3780 		err = EINVAL;
3781 		goto out;
3782 	}
3783 
3784 	rsp = (void *)hcmd.resp_pkt->data;
3785 
3786 	/* Initialize general data */
3787 	nvm->nvm_version = le16toh(rsp->general.nvm_version);
3788 	nvm->n_hw_addrs = rsp->general.n_hw_addrs;
3789 
3790 	/* Initialize MAC sku data */
3791 	mac_flags = le32toh(rsp->mac_sku.mac_sku_flags);
3792 	nvm->sku_cap_11ac_enable =
3793 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
3794 	nvm->sku_cap_11n_enable =
3795 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
3796 	nvm->sku_cap_11ax_enable =
3797 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
3798 	nvm->sku_cap_band_24GHz_enable =
3799 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
3800 	nvm->sku_cap_band_52GHz_enable =
3801 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
3802 	nvm->sku_cap_mimo_disable =
3803 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
3804 
3805 	/* Initialize PHY sku data */
3806 	nvm->valid_tx_ant = (uint8_t)le32toh(rsp->phy_sku.tx_chains);
3807 	nvm->valid_rx_ant = (uint8_t)le32toh(rsp->phy_sku.rx_chains);
3808 
3809 	if (le32toh(rsp->regulatory.lar_enabled) &&
3810 	    isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_SUPPORT)) {
3811 		nvm->lar_enabled = 1;
3812 	}
3813 
3814 	if (v4) {
3815 		iwx_init_channel_map(sc, NULL,
3816 		    rsp->regulatory.channel_profile, IWX_NUM_CHANNELS);
3817 	} else {
3818 		rsp_v3 = (void *)rsp;
3819 		iwx_init_channel_map(sc, rsp_v3->regulatory.channel_profile,
3820 		    NULL, IWX_NUM_CHANNELS_V1);
3821 	}
3822 out:
3823 	iwx_free_resp(sc, &hcmd);
3824 	return err;
3825 }
3826 
3827 int
3828 iwx_load_firmware(struct iwx_softc *sc)
3829 {
3830 	struct iwx_fw_sects *fws;
3831 	int err;
3832 
3833 	splassert(IPL_NET);
3834 
3835 	sc->sc_uc.uc_intr = 0;
3836 	sc->sc_uc.uc_ok = 0;
3837 
3838 	fws = &sc->sc_fw.fw_sects[IWX_UCODE_TYPE_REGULAR];
3839 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
3840 		err = iwx_ctxt_info_gen3_init(sc, fws);
3841 	else
3842 		err = iwx_ctxt_info_init(sc, fws);
3843 	if (err) {
3844 		printf("%s: could not init context info\n", DEVNAME(sc));
3845 		return err;
3846 	}
3847 
3848 	/* wait for the firmware to load */
3849 	err = tsleep_nsec(&sc->sc_uc, 0, "iwxuc", SEC_TO_NSEC(1));
3850 	if (err || !sc->sc_uc.uc_ok) {
3851 		printf("%s: could not load firmware, %d\n", DEVNAME(sc), err);
3852 		iwx_ctxt_info_free_paging(sc);
3853 	}
3854 
3855 	iwx_dma_contig_free(&sc->iml_dma);
3856 	iwx_ctxt_info_free_fw_img(sc);
3857 
3858 	if (!sc->sc_uc.uc_ok)
3859 		return EINVAL;
3860 
3861 	return err;
3862 }
3863 
3864 int
3865 iwx_start_fw(struct iwx_softc *sc)
3866 {
3867 	int err;
3868 
3869 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
3870 
3871 	iwx_disable_interrupts(sc);
3872 
3873 	/* make sure rfkill handshake bits are cleared */
3874 	IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, IWX_CSR_UCODE_SW_BIT_RFKILL);
3875 	IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR,
3876 	    IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3877 
3878 	/* clear (again), then enable firmware load interrupt */
3879 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
3880 
3881 	err = iwx_nic_init(sc);
3882 	if (err) {
3883 		printf("%s: unable to init nic\n", DEVNAME(sc));
3884 		return err;
3885 	}
3886 
3887 	iwx_enable_fwload_interrupt(sc);
3888 
3889 	return iwx_load_firmware(sc);
3890 }
3891 
3892 int
3893 iwx_pnvm_handle_section(struct iwx_softc *sc, const uint8_t *data,
3894     size_t len)
3895 {
3896 	const struct iwx_ucode_tlv *tlv;
3897 	uint32_t sha1 = 0;
3898 	uint16_t mac_type = 0, rf_id = 0;
3899 	uint8_t *pnvm_data = NULL, *tmp;
3900 	int hw_match = 0;
3901 	uint32_t size = 0;
3902 	int err;
3903 
3904 	while (len >= sizeof(*tlv)) {
3905 		uint32_t tlv_len, tlv_type;
3906 
3907 		len -= sizeof(*tlv);
3908 		tlv = (const void *)data;
3909 
3910 		tlv_len = le32toh(tlv->length);
3911 		tlv_type = le32toh(tlv->type);
3912 
3913 		if (len < tlv_len) {
3914 			printf("%s: invalid TLV len: %zd/%u\n",
3915 			    DEVNAME(sc), len, tlv_len);
3916 			err = EINVAL;
3917 			goto out;
3918 		}
3919 
3920 		data += sizeof(*tlv);
3921 
3922 		switch (tlv_type) {
3923 		case IWX_UCODE_TLV_PNVM_VERSION:
3924 			if (tlv_len < sizeof(uint32_t))
3925 				break;
3926 
3927 			sha1 = le32_to_cpup((const uint32_t *)data);
3928 			break;
3929 		case IWX_UCODE_TLV_HW_TYPE:
3930 			if (tlv_len < 2 * sizeof(uint16_t))
3931 				break;
3932 
3933 			if (hw_match)
3934 				break;
3935 
3936 			mac_type = le16_to_cpup((const uint16_t *)data);
3937 			rf_id = le16_to_cpup((const uint16_t *)(data +
3938 			    sizeof(uint16_t)));
3939 
3940 			if (mac_type == IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev) &&
3941 			    rf_id == IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id))
3942 				hw_match = 1;
3943 			break;
3944 		case IWX_UCODE_TLV_SEC_RT: {
3945 			const struct iwx_pnvm_section *section;
3946 			uint32_t data_len;
3947 
3948 			section = (const void *)data;
3949 			data_len = tlv_len - sizeof(*section);
3950 
3951 			/* TODO: remove, this is a deprecated separator */
3952 			if (le32_to_cpup((const uint32_t *)data) == 0xddddeeee)
3953 				break;
3954 
3955 			tmp = malloc(size + data_len, M_DEVBUF,
3956 			    M_WAITOK | M_CANFAIL | M_ZERO);
3957 			if (tmp == NULL) {
3958 				err = ENOMEM;
3959 				goto out;
3960 			}
3961 			memcpy(tmp, pnvm_data, size);
3962 			memcpy(tmp + size, section->data, data_len);
3963 			free(pnvm_data, M_DEVBUF, size);
3964 			pnvm_data = tmp;
3965 			size += data_len;
3966 			break;
3967 		}
3968 		case IWX_UCODE_TLV_PNVM_SKU:
3969 			/* New PNVM section started, stop parsing. */
3970 			goto done;
3971 		default:
3972 			break;
3973 		}
3974 
3975 		if (roundup(tlv_len, 4) > len)
3976 			break;
3977 		len -= roundup(tlv_len, 4);
3978 		data += roundup(tlv_len, 4);
3979 	}
3980 done:
3981 	if (!hw_match || size == 0) {
3982 		err = ENOENT;
3983 		goto out;
3984 	}
3985 
3986 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->pnvm_dma, size, 0);
3987 	if (err) {
3988 		printf("%s: could not allocate DMA memory for PNVM\n",
3989 		    DEVNAME(sc));
3990 		err = ENOMEM;
3991 		goto out;
3992 	}
3993 	memcpy(sc->pnvm_dma.vaddr, pnvm_data, size);
3994 	iwx_ctxt_info_gen3_set_pnvm(sc);
3995 	sc->sc_pnvm_ver = sha1;
3996 out:
3997 	free(pnvm_data, M_DEVBUF, size);
3998 	return err;
3999 }
4000 
4001 int
4002 iwx_pnvm_parse(struct iwx_softc *sc, const uint8_t *data, size_t len)
4003 {
4004 	const struct iwx_ucode_tlv *tlv;
4005 
4006 	while (len >= sizeof(*tlv)) {
4007 		uint32_t tlv_len, tlv_type;
4008 
4009 		len -= sizeof(*tlv);
4010 		tlv = (const void *)data;
4011 
4012 		tlv_len = le32toh(tlv->length);
4013 		tlv_type = le32toh(tlv->type);
4014 
4015 		if (len < tlv_len || roundup(tlv_len, 4) > len)
4016 			return EINVAL;
4017 
4018 		if (tlv_type == IWX_UCODE_TLV_PNVM_SKU) {
4019 			const struct iwx_sku_id *sku_id =
4020 				(const void *)(data + sizeof(*tlv));
4021 
4022 			data += sizeof(*tlv) + roundup(tlv_len, 4);
4023 			len -= roundup(tlv_len, 4);
4024 
4025 			if (sc->sc_sku_id[0] == le32toh(sku_id->data[0]) &&
4026 			    sc->sc_sku_id[1] == le32toh(sku_id->data[1]) &&
4027 			    sc->sc_sku_id[2] == le32toh(sku_id->data[2]) &&
4028 			    iwx_pnvm_handle_section(sc, data, len) == 0)
4029 				return 0;
4030 		} else {
4031 			data += sizeof(*tlv) + roundup(tlv_len, 4);
4032 			len -= roundup(tlv_len, 4);
4033 		}
4034 	}
4035 
4036 	return ENOENT;
4037 }
4038 
4039 /* Make AX210 firmware loading context point at PNVM image in DMA memory. */
4040 void
4041 iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *sc)
4042 {
4043 	struct iwx_prph_scratch *prph_scratch;
4044 	struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
4045 
4046 	prph_scratch = sc->prph_scratch_dma.vaddr;
4047 	prph_sc_ctrl = &prph_scratch->ctrl_cfg;
4048 
4049 	prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = htole64(sc->pnvm_dma.paddr);
4050 	prph_sc_ctrl->pnvm_cfg.pnvm_size = htole32(sc->pnvm_dma.size);
4051 
4052 	bus_dmamap_sync(sc->sc_dmat, sc->pnvm_dma.map, 0, sc->pnvm_dma.size,
4053 	    BUS_DMASYNC_PREWRITE);
4054 }
4055 
4056 /*
4057  * Load platform-NVM (non-volatile-memory) data from the filesystem.
4058  * This data apparently contains regulatory information and affects device
4059  * channel configuration.
4060  * The SKU of AX210 devices tells us which PNVM file section is needed.
4061  * Pre-AX210 devices store NVM data onboard.
4062  */
4063 int
4064 iwx_load_pnvm(struct iwx_softc *sc)
4065 {
4066 	const int wait_flags = IWX_PNVM_COMPLETE;
4067 	int s, err = 0;
4068 	u_char *pnvm_data = NULL;
4069 	size_t pnvm_size = 0;
4070 
4071 	if (sc->sc_sku_id[0] == 0 &&
4072 	    sc->sc_sku_id[1] == 0 &&
4073 	    sc->sc_sku_id[2] == 0)
4074 		return 0;
4075 
4076 	if (sc->sc_pnvm_name) {
4077 		if (sc->pnvm_dma.vaddr == NULL) {
4078 			err = loadfirmware(sc->sc_pnvm_name,
4079 			    &pnvm_data, &pnvm_size);
4080 			if (err) {
4081 				printf("%s: could not read %s (error %d)\n",
4082 				    DEVNAME(sc), sc->sc_pnvm_name, err);
4083 				return err;
4084 			}
4085 
4086 			err = iwx_pnvm_parse(sc, pnvm_data, pnvm_size);
4087 			if (err && err != ENOENT) {
4088 				free(pnvm_data, M_DEVBUF, pnvm_size);
4089 				return err;
4090 			}
4091 		} else
4092 			iwx_ctxt_info_gen3_set_pnvm(sc);
4093 	}
4094 
4095 	s = splnet();
4096 
4097 	if (!iwx_nic_lock(sc)) {
4098 		splx(s);
4099 		free(pnvm_data, M_DEVBUF, pnvm_size);
4100 		return EBUSY;
4101 	}
4102 
4103 	/*
4104 	 * If we don't have a platform NVM file simply ask firmware
4105 	 * to proceed without it.
4106 	 */
4107 
4108 	iwx_write_umac_prph(sc, IWX_UREG_DOORBELL_TO_ISR6,
4109 	    IWX_UREG_DOORBELL_TO_ISR6_PNVM);
4110 
4111 	/* Wait for the pnvm complete notification from firmware. */
4112 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4113 		err = tsleep_nsec(&sc->sc_init_complete, 0, "iwxinit",
4114 		    SEC_TO_NSEC(2));
4115 		if (err)
4116 			break;
4117 	}
4118 
4119 	splx(s);
4120 	iwx_nic_unlock(sc);
4121 	free(pnvm_data, M_DEVBUF, pnvm_size);
4122 	return err;
4123 }
4124 
4125 int
4126 iwx_send_tx_ant_cfg(struct iwx_softc *sc, uint8_t valid_tx_ant)
4127 {
4128 	struct iwx_tx_ant_cfg_cmd tx_ant_cmd = {
4129 		.valid = htole32(valid_tx_ant),
4130 	};
4131 
4132 	return iwx_send_cmd_pdu(sc, IWX_TX_ANT_CONFIGURATION_CMD,
4133 	    0, sizeof(tx_ant_cmd), &tx_ant_cmd);
4134 }
4135 
4136 int
4137 iwx_send_phy_cfg_cmd(struct iwx_softc *sc)
4138 {
4139 	struct iwx_phy_cfg_cmd phy_cfg_cmd;
4140 
4141 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
4142 	phy_cfg_cmd.calib_control.event_trigger =
4143 	    sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].event_trigger;
4144 	phy_cfg_cmd.calib_control.flow_trigger =
4145 	    sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].flow_trigger;
4146 
4147 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONFIGURATION_CMD, 0,
4148 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
4149 }
4150 
4151 int
4152 iwx_send_dqa_cmd(struct iwx_softc *sc)
4153 {
4154 	struct iwx_dqa_enable_cmd dqa_cmd = {
4155 		.cmd_queue = htole32(IWX_DQA_CMD_QUEUE),
4156 	};
4157 	uint32_t cmd_id;
4158 
4159 	cmd_id = iwx_cmd_id(IWX_DQA_ENABLE_CMD, IWX_DATA_PATH_GROUP, 0);
4160 	return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
4161 }
4162 
4163 int
4164 iwx_load_ucode_wait_alive(struct iwx_softc *sc)
4165 {
4166 	int err;
4167 
4168 	err = iwx_read_firmware(sc);
4169 	if (err)
4170 		return err;
4171 
4172 	err = iwx_start_fw(sc);
4173 	if (err)
4174 		return err;
4175 
4176 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4177 		err = iwx_load_pnvm(sc);
4178 		if (err)
4179 			return err;
4180 	}
4181 
4182 	iwx_post_alive(sc);
4183 
4184 	return 0;
4185 }
4186 
4187 int
4188 iwx_run_init_mvm_ucode(struct iwx_softc *sc, int readnvm)
4189 {
4190 	const int wait_flags = IWX_INIT_COMPLETE;
4191 	struct iwx_nvm_access_complete_cmd nvm_complete = {};
4192 	struct iwx_init_extended_cfg_cmd init_cfg = {
4193 		.init_flags = htole32(IWX_INIT_NVM),
4194 	};
4195 	int err, s;
4196 
4197 	if ((sc->sc_flags & IWX_FLAG_RFKILL) && !readnvm) {
4198 		printf("%s: radio is disabled by hardware switch\n",
4199 		    DEVNAME(sc));
4200 		return EPERM;
4201 	}
4202 
4203 	s = splnet();
4204 	sc->sc_init_complete = 0;
4205 	err = iwx_load_ucode_wait_alive(sc);
4206 	if (err) {
4207 		printf("%s: failed to load init firmware\n", DEVNAME(sc));
4208 		splx(s);
4209 		return err;
4210 	}
4211 
4212 	/*
4213 	 * Send init config command to mark that we are sending NVM
4214 	 * access commands
4215 	 */
4216 	err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_SYSTEM_GROUP,
4217 	    IWX_INIT_EXTENDED_CFG_CMD), 0, sizeof(init_cfg), &init_cfg);
4218 	if (err) {
4219 		splx(s);
4220 		return err;
4221 	}
4222 
4223 	err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
4224 	    IWX_NVM_ACCESS_COMPLETE), 0, sizeof(nvm_complete), &nvm_complete);
4225 	if (err) {
4226 		splx(s);
4227 		return err;
4228 	}
4229 
4230 	/* Wait for the init complete notification from the firmware. */
4231 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4232 		err = tsleep_nsec(&sc->sc_init_complete, 0, "iwxinit",
4233 		    SEC_TO_NSEC(2));
4234 		if (err) {
4235 			splx(s);
4236 			return err;
4237 		}
4238 	}
4239 	splx(s);
4240 	if (readnvm) {
4241 		err = iwx_nvm_get(sc);
4242 		if (err) {
4243 			printf("%s: failed to read nvm\n", DEVNAME(sc));
4244 			return err;
4245 		}
4246 		if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
4247 			IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
4248 			    sc->sc_nvm.hw_addr);
4249 
4250 	}
4251 	return 0;
4252 }
4253 
4254 int
4255 iwx_config_ltr(struct iwx_softc *sc)
4256 {
4257 	struct iwx_ltr_config_cmd cmd = {
4258 		.flags = htole32(IWX_LTR_CFG_FLAG_FEATURE_ENABLE),
4259 	};
4260 
4261 	if (!sc->sc_ltr_enabled)
4262 		return 0;
4263 
4264 	return iwx_send_cmd_pdu(sc, IWX_LTR_CONFIG, 0, sizeof(cmd), &cmd);
4265 }
4266 
4267 void
4268 iwx_update_rx_desc(struct iwx_softc *sc, struct iwx_rx_ring *ring, int idx)
4269 {
4270 	struct iwx_rx_data *data = &ring->data[idx];
4271 
4272 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4273 		struct iwx_rx_transfer_desc *desc = ring->desc;
4274 		desc[idx].rbid = htole16(idx & 0xffff);
4275 		desc[idx].addr = htole64(data->map->dm_segs[0].ds_addr);
4276 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
4277 		    idx * sizeof(*desc), sizeof(*desc),
4278 		    BUS_DMASYNC_PREWRITE);
4279 	} else {
4280 		((uint64_t *)ring->desc)[idx] =
4281 		    htole64(data->map->dm_segs[0].ds_addr | (idx & 0x0fff));
4282 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
4283 		    idx * sizeof(uint64_t), sizeof(uint64_t),
4284 		    BUS_DMASYNC_PREWRITE);
4285 	}
4286 }
4287 
4288 int
4289 iwx_rx_addbuf(struct iwx_softc *sc, int size, int idx)
4290 {
4291 	struct iwx_rx_ring *ring = &sc->rxq;
4292 	struct iwx_rx_data *data = &ring->data[idx];
4293 	struct mbuf *m;
4294 	int err;
4295 	int fatal = 0;
4296 
4297 	m = m_gethdr(M_DONTWAIT, MT_DATA);
4298 	if (m == NULL)
4299 		return ENOBUFS;
4300 
4301 	if (size <= MCLBYTES) {
4302 		MCLGET(m, M_DONTWAIT);
4303 	} else {
4304 		MCLGETL(m, M_DONTWAIT, IWX_RBUF_SIZE);
4305 	}
4306 	if ((m->m_flags & M_EXT) == 0) {
4307 		m_freem(m);
4308 		return ENOBUFS;
4309 	}
4310 
4311 	if (data->m != NULL) {
4312 		bus_dmamap_unload(sc->sc_dmat, data->map);
4313 		fatal = 1;
4314 	}
4315 
4316 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4317 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4318 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
4319 	if (err) {
4320 		/* XXX */
4321 		if (fatal)
4322 			panic("%s: could not load RX mbuf", DEVNAME(sc));
4323 		m_freem(m);
4324 		return err;
4325 	}
4326 	data->m = m;
4327 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
4328 
4329 	/* Update RX descriptor. */
4330 	iwx_update_rx_desc(sc, ring, idx);
4331 
4332 	return 0;
4333 }
4334 
4335 int
4336 iwx_rxmq_get_signal_strength(struct iwx_softc *sc,
4337     struct iwx_rx_mpdu_desc *desc)
4338 {
4339 	int energy_a, energy_b;
4340 
4341 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4342 		energy_a = desc->v3.energy_a;
4343 		energy_b = desc->v3.energy_b;
4344 	} else {
4345 		energy_a = desc->v1.energy_a;
4346 		energy_b = desc->v1.energy_b;
4347 	}
4348 	energy_a = energy_a ? -energy_a : -256;
4349 	energy_b = energy_b ? -energy_b : -256;
4350 	return MAX(energy_a, energy_b);
4351 }
4352 
4353 void
4354 iwx_rx_rx_phy_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4355     struct iwx_rx_data *data)
4356 {
4357 	struct iwx_rx_phy_info *phy_info = (void *)pkt->data;
4358 
4359 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
4360 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
4361 
4362 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
4363 }
4364 
4365 /*
4366  * Retrieve the average noise (in dBm) among receivers.
4367  */
4368 int
4369 iwx_get_noise(const struct iwx_statistics_rx_non_phy *stats)
4370 {
4371 	int i, total, nbant, noise;
4372 
4373 	total = nbant = noise = 0;
4374 	for (i = 0; i < 3; i++) {
4375 		noise = letoh32(stats->beacon_silence_rssi[i]) & 0xff;
4376 		if (noise) {
4377 			total += noise;
4378 			nbant++;
4379 		}
4380 	}
4381 
4382 	/* There should be at least one antenna but check anyway. */
4383 	return (nbant == 0) ? -127 : (total / nbant) - 107;
4384 }
4385 
4386 int
4387 iwx_ccmp_decap(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
4388     struct ieee80211_rxinfo *rxi)
4389 {
4390 	struct ieee80211com *ic = &sc->sc_ic;
4391 	struct ieee80211_key *k;
4392 	struct ieee80211_frame *wh;
4393 	uint64_t pn, *prsc;
4394 	uint8_t *ivp;
4395 	uint8_t tid;
4396 	int hdrlen, hasqos;
4397 
4398 	wh = mtod(m, struct ieee80211_frame *);
4399 	hdrlen = ieee80211_get_hdrlen(wh);
4400 	ivp = (uint8_t *)wh + hdrlen;
4401 
4402 	/* find key for decryption */
4403 	k = ieee80211_get_rxkey(ic, m, ni);
4404 	if (k == NULL || k->k_cipher != IEEE80211_CIPHER_CCMP)
4405 		return 1;
4406 
4407 	/* Check that ExtIV bit is be set. */
4408 	if (!(ivp[3] & IEEE80211_WEP_EXTIV))
4409 		return 1;
4410 
4411 	hasqos = ieee80211_has_qos(wh);
4412 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4413 	prsc = &k->k_rsc[tid];
4414 
4415 	/* Extract the 48-bit PN from the CCMP header. */
4416 	pn = (uint64_t)ivp[0]       |
4417 	     (uint64_t)ivp[1] <<  8 |
4418 	     (uint64_t)ivp[4] << 16 |
4419 	     (uint64_t)ivp[5] << 24 |
4420 	     (uint64_t)ivp[6] << 32 |
4421 	     (uint64_t)ivp[7] << 40;
4422 	if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
4423 		if (pn < *prsc) {
4424 			ic->ic_stats.is_ccmp_replays++;
4425 			return 1;
4426 		}
4427 	} else if (pn <= *prsc) {
4428 		ic->ic_stats.is_ccmp_replays++;
4429 		return 1;
4430 	}
4431 	/* Last seen packet number is updated in ieee80211_inputm(). */
4432 
4433 	/*
4434 	 * Some firmware versions strip the MIC, and some don't. It is not
4435 	 * clear which of the capability flags could tell us what to expect.
4436 	 * For now, keep things simple and just leave the MIC in place if
4437 	 * it is present.
4438 	 *
4439 	 * The IV will be stripped by ieee80211_inputm().
4440 	 */
4441 	return 0;
4442 }
4443 
4444 int
4445 iwx_rx_hwdecrypt(struct iwx_softc *sc, struct mbuf *m, uint32_t rx_pkt_status,
4446     struct ieee80211_rxinfo *rxi)
4447 {
4448 	struct ieee80211com *ic = &sc->sc_ic;
4449 	struct ifnet *ifp = IC2IFP(ic);
4450 	struct ieee80211_frame *wh;
4451 	struct ieee80211_node *ni;
4452 	int ret = 0;
4453 	uint8_t type, subtype;
4454 
4455 	wh = mtod(m, struct ieee80211_frame *);
4456 
4457 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4458 	if (type == IEEE80211_FC0_TYPE_CTL)
4459 		return 0;
4460 
4461 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4462 	if (ieee80211_has_qos(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA))
4463 		return 0;
4464 
4465 	ni = ieee80211_find_rxnode(ic, wh);
4466 	/* Handle hardware decryption. */
4467 	if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL)
4468 	    && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) &&
4469 	    (ni->ni_flags & IEEE80211_NODE_RXPROT) &&
4470 	    ((!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4471 	    ni->ni_rsncipher == IEEE80211_CIPHER_CCMP) ||
4472 	    (IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4473 	    ni->ni_rsngroupcipher == IEEE80211_CIPHER_CCMP))) {
4474 		if ((rx_pkt_status & IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
4475 		    IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4476 			ic->ic_stats.is_ccmp_dec_errs++;
4477 			ret = 1;
4478 			goto out;
4479 		}
4480 		/* Check whether decryption was successful or not. */
4481 		if ((rx_pkt_status &
4482 		    (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
4483 		    IWX_RX_MPDU_RES_STATUS_MIC_OK)) !=
4484 		    (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
4485 		    IWX_RX_MPDU_RES_STATUS_MIC_OK)) {
4486 			ic->ic_stats.is_ccmp_dec_errs++;
4487 			ret = 1;
4488 			goto out;
4489 		}
4490 		rxi->rxi_flags |= IEEE80211_RXI_HWDEC;
4491 	}
4492 out:
4493 	if (ret)
4494 		ifp->if_ierrors++;
4495 	ieee80211_release_node(ic, ni);
4496 	return ret;
4497 }
4498 
4499 void
4500 iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx,
4501     uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
4502     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
4503     struct mbuf_list *ml)
4504 {
4505 	struct ieee80211com *ic = &sc->sc_ic;
4506 	struct ifnet *ifp = IC2IFP(ic);
4507 	struct ieee80211_frame *wh;
4508 	struct ieee80211_node *ni;
4509 
4510 	if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
4511 		chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
4512 
4513 	wh = mtod(m, struct ieee80211_frame *);
4514 	ni = ieee80211_find_rxnode(ic, wh);
4515 	if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
4516 	    iwx_ccmp_decap(sc, m, ni, rxi) != 0) {
4517 		ifp->if_ierrors++;
4518 		m_freem(m);
4519 		ieee80211_release_node(ic, ni);
4520 		return;
4521 	}
4522 
4523 #if NBPFILTER > 0
4524 	if (sc->sc_drvbpf != NULL) {
4525 		struct iwx_rx_radiotap_header *tap = &sc->sc_rxtap;
4526 		uint16_t chan_flags;
4527 
4528 		tap->wr_flags = 0;
4529 		if (is_shortpre)
4530 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
4531 		tap->wr_chan_freq =
4532 		    htole16(ic->ic_channels[chanidx].ic_freq);
4533 		chan_flags = ic->ic_channels[chanidx].ic_flags;
4534 		if (ic->ic_curmode != IEEE80211_MODE_11N &&
4535 		    ic->ic_curmode != IEEE80211_MODE_11AC) {
4536 			chan_flags &= ~IEEE80211_CHAN_HT;
4537 			chan_flags &= ~IEEE80211_CHAN_40MHZ;
4538 		}
4539 		if (ic->ic_curmode != IEEE80211_MODE_11AC)
4540 			chan_flags &= ~IEEE80211_CHAN_VHT;
4541 		tap->wr_chan_flags = htole16(chan_flags);
4542 		tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
4543 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4544 		tap->wr_tsft = device_timestamp;
4545 		if (rate_n_flags & IWX_RATE_MCS_HT_MSK) {
4546 			uint8_t mcs = (rate_n_flags &
4547 			    (IWX_RATE_HT_MCS_RATE_CODE_MSK |
4548 			    IWX_RATE_HT_MCS_NSS_MSK));
4549 			tap->wr_rate = (0x80 | mcs);
4550 		} else {
4551 			uint8_t rate = (rate_n_flags &
4552 			    IWX_RATE_LEGACY_RATE_MSK);
4553 			switch (rate) {
4554 			/* CCK rates. */
4555 			case  10: tap->wr_rate =   2; break;
4556 			case  20: tap->wr_rate =   4; break;
4557 			case  55: tap->wr_rate =  11; break;
4558 			case 110: tap->wr_rate =  22; break;
4559 			/* OFDM rates. */
4560 			case 0xd: tap->wr_rate =  12; break;
4561 			case 0xf: tap->wr_rate =  18; break;
4562 			case 0x5: tap->wr_rate =  24; break;
4563 			case 0x7: tap->wr_rate =  36; break;
4564 			case 0x9: tap->wr_rate =  48; break;
4565 			case 0xb: tap->wr_rate =  72; break;
4566 			case 0x1: tap->wr_rate =  96; break;
4567 			case 0x3: tap->wr_rate = 108; break;
4568 			/* Unknown rate: should not happen. */
4569 			default:  tap->wr_rate =   0;
4570 			}
4571 		}
4572 
4573 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
4574 		    m, BPF_DIRECTION_IN);
4575 	}
4576 #endif
4577 	ieee80211_inputm(IC2IFP(ic), m, ni, rxi, ml);
4578 	ieee80211_release_node(ic, ni);
4579 }
4580 
4581 /*
4582  * Drop duplicate 802.11 retransmissions
4583  * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
4584  * and handle pseudo-duplicate frames which result from deaggregation
4585  * of A-MSDU frames in hardware.
4586  */
4587 int
4588 iwx_detect_duplicate(struct iwx_softc *sc, struct mbuf *m,
4589     struct iwx_rx_mpdu_desc *desc, struct ieee80211_rxinfo *rxi)
4590 {
4591 	struct ieee80211com *ic = &sc->sc_ic;
4592 	struct iwx_node *in = (void *)ic->ic_bss;
4593 	struct iwx_rxq_dup_data *dup_data = &in->dup_data;
4594 	uint8_t tid = IWX_MAX_TID_COUNT, subframe_idx;
4595 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4596 	uint8_t type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4597 	uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4598 	int hasqos = ieee80211_has_qos(wh);
4599 	uint16_t seq;
4600 
4601 	if (type == IEEE80211_FC0_TYPE_CTL ||
4602 	    (hasqos && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) ||
4603 	    IEEE80211_IS_MULTICAST(wh->i_addr1))
4604 		return 0;
4605 
4606 	if (hasqos) {
4607 		tid = (ieee80211_get_qos(wh) & IEEE80211_QOS_TID);
4608 		if (tid > IWX_MAX_TID_COUNT)
4609 			tid = IWX_MAX_TID_COUNT;
4610 	}
4611 
4612 	/* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
4613 	subframe_idx = desc->amsdu_info &
4614 		IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
4615 
4616 	seq = letoh16(*(u_int16_t *)wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
4617 	if ((wh->i_fc[1] & IEEE80211_FC1_RETRY) &&
4618 	    dup_data->last_seq[tid] == seq &&
4619 	    dup_data->last_sub_frame[tid] >= subframe_idx)
4620 		return 1;
4621 
4622 	/*
4623 	 * Allow the same frame sequence number for all A-MSDU subframes
4624 	 * following the first subframe.
4625 	 * Otherwise these subframes would be discarded as replays.
4626 	 */
4627 	if (dup_data->last_seq[tid] == seq &&
4628 	    subframe_idx > dup_data->last_sub_frame[tid] &&
4629 	    (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU)) {
4630 		rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
4631 	}
4632 
4633 	dup_data->last_seq[tid] = seq;
4634 	dup_data->last_sub_frame[tid] = subframe_idx;
4635 
4636 	return 0;
4637 }
4638 
4639 /*
4640  * Returns true if sn2 - buffer_size < sn1 < sn2.
4641  * To be used only in order to compare reorder buffer head with NSSN.
4642  * We fully trust NSSN unless it is behind us due to reorder timeout.
4643  * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
4644  */
4645 int
4646 iwx_is_sn_less(uint16_t sn1, uint16_t sn2, uint16_t buffer_size)
4647 {
4648 	return SEQ_LT(sn1, sn2) && !SEQ_LT(sn1, sn2 - buffer_size);
4649 }
4650 
4651 void
4652 iwx_release_frames(struct iwx_softc *sc, struct ieee80211_node *ni,
4653     struct iwx_rxba_data *rxba, struct iwx_reorder_buffer *reorder_buf,
4654     uint16_t nssn, struct mbuf_list *ml)
4655 {
4656 	struct iwx_reorder_buf_entry *entries = &rxba->entries[0];
4657 	uint16_t ssn = reorder_buf->head_sn;
4658 
4659 	/* ignore nssn smaller than head sn - this can happen due to timeout */
4660 	if (iwx_is_sn_less(nssn, ssn, reorder_buf->buf_size))
4661 		goto set_timer;
4662 
4663 	while (iwx_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
4664 		int index = ssn % reorder_buf->buf_size;
4665 		struct mbuf *m;
4666 		int chanidx, is_shortpre;
4667 		uint32_t rx_pkt_status, rate_n_flags, device_timestamp;
4668 		struct ieee80211_rxinfo *rxi;
4669 
4670 		/* This data is the same for all A-MSDU subframes. */
4671 		chanidx = entries[index].chanidx;
4672 		rx_pkt_status = entries[index].rx_pkt_status;
4673 		is_shortpre = entries[index].is_shortpre;
4674 		rate_n_flags = entries[index].rate_n_flags;
4675 		device_timestamp = entries[index].device_timestamp;
4676 		rxi = &entries[index].rxi;
4677 
4678 		/*
4679 		 * Empty the list. Will have more than one frame for A-MSDU.
4680 		 * Empty list is valid as well since nssn indicates frames were
4681 		 * received.
4682 		 */
4683 		while ((m = ml_dequeue(&entries[index].frames)) != NULL) {
4684 			iwx_rx_frame(sc, m, chanidx, rx_pkt_status, is_shortpre,
4685 			    rate_n_flags, device_timestamp, rxi, ml);
4686 			reorder_buf->num_stored--;
4687 
4688 			/*
4689 			 * Allow the same frame sequence number and CCMP PN for
4690 			 * all A-MSDU subframes following the first subframe.
4691 			 * Otherwise they would be discarded as replays.
4692 			 */
4693 			rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
4694 			rxi->rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
4695 		}
4696 
4697 		ssn = (ssn + 1) & 0xfff;
4698 	}
4699 	reorder_buf->head_sn = nssn;
4700 
4701 set_timer:
4702 	if (reorder_buf->num_stored && !reorder_buf->removed) {
4703 		timeout_add_usec(&reorder_buf->reorder_timer,
4704 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
4705 	} else
4706 		timeout_del(&reorder_buf->reorder_timer);
4707 }
4708 
4709 int
4710 iwx_oldsn_workaround(struct iwx_softc *sc, struct ieee80211_node *ni, int tid,
4711     struct iwx_reorder_buffer *buffer, uint32_t reorder_data, uint32_t gp2)
4712 {
4713 	struct ieee80211com *ic = &sc->sc_ic;
4714 
4715 	if (gp2 != buffer->consec_oldsn_ampdu_gp2) {
4716 		/* we have a new (A-)MPDU ... */
4717 
4718 		/*
4719 		 * reset counter to 0 if we didn't have any oldsn in
4720 		 * the last A-MPDU (as detected by GP2 being identical)
4721 		 */
4722 		if (!buffer->consec_oldsn_prev_drop)
4723 			buffer->consec_oldsn_drops = 0;
4724 
4725 		/* either way, update our tracking state */
4726 		buffer->consec_oldsn_ampdu_gp2 = gp2;
4727 	} else if (buffer->consec_oldsn_prev_drop) {
4728 		/*
4729 		 * tracking state didn't change, and we had an old SN
4730 		 * indication before - do nothing in this case, we
4731 		 * already noted this one down and are waiting for the
4732 		 * next A-MPDU (by GP2)
4733 		 */
4734 		return 0;
4735 	}
4736 
4737 	/* return unless this MPDU has old SN */
4738 	if (!(reorder_data & IWX_RX_MPDU_REORDER_BA_OLD_SN))
4739 		return 0;
4740 
4741 	/* update state */
4742 	buffer->consec_oldsn_prev_drop = 1;
4743 	buffer->consec_oldsn_drops++;
4744 
4745 	/* if limit is reached, send del BA and reset state */
4746 	if (buffer->consec_oldsn_drops == IWX_AMPDU_CONSEC_DROPS_DELBA) {
4747 		ieee80211_delba_request(ic, ni, IEEE80211_REASON_UNSPECIFIED,
4748 		    0, tid);
4749 		buffer->consec_oldsn_prev_drop = 0;
4750 		buffer->consec_oldsn_drops = 0;
4751 		return 1;
4752 	}
4753 
4754 	return 0;
4755 }
4756 
4757 /*
4758  * Handle re-ordering of frames which were de-aggregated in hardware.
4759  * Returns 1 if the MPDU was consumed (buffered or dropped).
4760  * Returns 0 if the MPDU should be passed to upper layer.
4761  */
4762 int
4763 iwx_rx_reorder(struct iwx_softc *sc, struct mbuf *m, int chanidx,
4764     struct iwx_rx_mpdu_desc *desc, int is_shortpre, int rate_n_flags,
4765     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
4766     struct mbuf_list *ml)
4767 {
4768 	struct ieee80211com *ic = &sc->sc_ic;
4769 	struct ieee80211_frame *wh;
4770 	struct ieee80211_node *ni;
4771 	struct iwx_rxba_data *rxba;
4772 	struct iwx_reorder_buffer *buffer;
4773 	uint32_t reorder_data = le32toh(desc->reorder_data);
4774 	int is_amsdu = (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU);
4775 	int last_subframe =
4776 		(desc->amsdu_info & IWX_RX_MPDU_AMSDU_LAST_SUBFRAME);
4777 	uint8_t tid;
4778 	uint8_t subframe_idx = (desc->amsdu_info &
4779 	    IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
4780 	struct iwx_reorder_buf_entry *entries;
4781 	int index;
4782 	uint16_t nssn, sn;
4783 	uint8_t baid, type, subtype;
4784 	int hasqos;
4785 
4786 	wh = mtod(m, struct ieee80211_frame *);
4787 	hasqos = ieee80211_has_qos(wh);
4788 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4789 
4790 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4791 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4792 
4793 	/*
4794 	 * We are only interested in Block Ack requests and unicast QoS data.
4795 	 */
4796 	if (IEEE80211_IS_MULTICAST(wh->i_addr1))
4797 		return 0;
4798 	if (hasqos) {
4799 		if (subtype & IEEE80211_FC0_SUBTYPE_NODATA)
4800 			return 0;
4801 	} else {
4802 		if (type != IEEE80211_FC0_TYPE_CTL ||
4803 		    subtype != IEEE80211_FC0_SUBTYPE_BAR)
4804 			return 0;
4805 	}
4806 
4807 	baid = (reorder_data & IWX_RX_MPDU_REORDER_BAID_MASK) >>
4808 		IWX_RX_MPDU_REORDER_BAID_SHIFT;
4809 	if (baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
4810 	    baid >= nitems(sc->sc_rxba_data))
4811 		return 0;
4812 
4813 	rxba = &sc->sc_rxba_data[baid];
4814 	if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
4815 	    tid != rxba->tid || rxba->sta_id != IWX_STATION_ID)
4816 		return 0;
4817 
4818 	if (rxba->timeout != 0)
4819 		getmicrouptime(&rxba->last_rx);
4820 
4821 	/* Bypass A-MPDU re-ordering in net80211. */
4822 	rxi->rxi_flags |= IEEE80211_RXI_AMPDU_DONE;
4823 
4824 	nssn = reorder_data & IWX_RX_MPDU_REORDER_NSSN_MASK;
4825 	sn = (reorder_data & IWX_RX_MPDU_REORDER_SN_MASK) >>
4826 		IWX_RX_MPDU_REORDER_SN_SHIFT;
4827 
4828 	buffer = &rxba->reorder_buf;
4829 	entries = &rxba->entries[0];
4830 
4831 	if (!buffer->valid) {
4832 		if (reorder_data & IWX_RX_MPDU_REORDER_BA_OLD_SN)
4833 			return 0;
4834 		buffer->valid = 1;
4835 	}
4836 
4837 	ni = ieee80211_find_rxnode(ic, wh);
4838 	if (type == IEEE80211_FC0_TYPE_CTL &&
4839 	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
4840 		iwx_release_frames(sc, ni, rxba, buffer, nssn, ml);
4841 		goto drop;
4842 	}
4843 
4844 	/*
4845 	 * If there was a significant jump in the nssn - adjust.
4846 	 * If the SN is smaller than the NSSN it might need to first go into
4847 	 * the reorder buffer, in which case we just release up to it and the
4848 	 * rest of the function will take care of storing it and releasing up to
4849 	 * the nssn.
4850 	 */
4851 	if (!iwx_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
4852 	    buffer->buf_size) ||
4853 	    !SEQ_LT(sn, buffer->head_sn + buffer->buf_size)) {
4854 		uint16_t min_sn = SEQ_LT(sn, nssn) ? sn : nssn;
4855 		ic->ic_stats.is_ht_rx_frame_above_ba_winend++;
4856 		iwx_release_frames(sc, ni, rxba, buffer, min_sn, ml);
4857 	}
4858 
4859 	if (iwx_oldsn_workaround(sc, ni, tid, buffer, reorder_data,
4860 	    device_timestamp)) {
4861 		 /* BA session will be torn down. */
4862 		ic->ic_stats.is_ht_rx_ba_window_jump++;
4863 		goto drop;
4864 
4865 	}
4866 
4867 	/* drop any outdated packets */
4868 	if (SEQ_LT(sn, buffer->head_sn)) {
4869 		ic->ic_stats.is_ht_rx_frame_below_ba_winstart++;
4870 		goto drop;
4871 	}
4872 
4873 	/* release immediately if allowed by nssn and no stored frames */
4874 	if (!buffer->num_stored && SEQ_LT(sn, nssn)) {
4875 		if (iwx_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) &&
4876 		   (!is_amsdu || last_subframe))
4877 			buffer->head_sn = nssn;
4878 		ieee80211_release_node(ic, ni);
4879 		return 0;
4880 	}
4881 
4882 	/*
4883 	 * release immediately if there are no stored frames, and the sn is
4884 	 * equal to the head.
4885 	 * This can happen due to reorder timer, where NSSN is behind head_sn.
4886 	 * When we released everything, and we got the next frame in the
4887 	 * sequence, according to the NSSN we can't release immediately,
4888 	 * while technically there is no hole and we can move forward.
4889 	 */
4890 	if (!buffer->num_stored && sn == buffer->head_sn) {
4891 		if (!is_amsdu || last_subframe)
4892 			buffer->head_sn = (buffer->head_sn + 1) & 0xfff;
4893 		ieee80211_release_node(ic, ni);
4894 		return 0;
4895 	}
4896 
4897 	index = sn % buffer->buf_size;
4898 
4899 	/*
4900 	 * Check if we already stored this frame
4901 	 * As AMSDU is either received or not as whole, logic is simple:
4902 	 * If we have frames in that position in the buffer and the last frame
4903 	 * originated from AMSDU had a different SN then it is a retransmission.
4904 	 * If it is the same SN then if the subframe index is incrementing it
4905 	 * is the same AMSDU - otherwise it is a retransmission.
4906 	 */
4907 	if (!ml_empty(&entries[index].frames)) {
4908 		if (!is_amsdu) {
4909 			ic->ic_stats.is_ht_rx_ba_no_buf++;
4910 			goto drop;
4911 		} else if (sn != buffer->last_amsdu ||
4912 		    buffer->last_sub_index >= subframe_idx) {
4913 			ic->ic_stats.is_ht_rx_ba_no_buf++;
4914 			goto drop;
4915 		}
4916 	} else {
4917 		/* This data is the same for all A-MSDU subframes. */
4918 		entries[index].chanidx = chanidx;
4919 		entries[index].is_shortpre = is_shortpre;
4920 		entries[index].rate_n_flags = rate_n_flags;
4921 		entries[index].device_timestamp = device_timestamp;
4922 		memcpy(&entries[index].rxi, rxi, sizeof(entries[index].rxi));
4923 	}
4924 
4925 	/* put in reorder buffer */
4926 	ml_enqueue(&entries[index].frames, m);
4927 	buffer->num_stored++;
4928 	getmicrouptime(&entries[index].reorder_time);
4929 
4930 	if (is_amsdu) {
4931 		buffer->last_amsdu = sn;
4932 		buffer->last_sub_index = subframe_idx;
4933 	}
4934 
4935 	/*
4936 	 * We cannot trust NSSN for AMSDU sub-frames that are not the last.
4937 	 * The reason is that NSSN advances on the first sub-frame, and may
4938 	 * cause the reorder buffer to advance before all the sub-frames arrive.
4939 	 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
4940 	 * SN 1. NSSN for first sub frame will be 3 with the result of driver
4941 	 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
4942 	 * already ahead and it will be dropped.
4943 	 * If the last sub-frame is not on this queue - we will get frame
4944 	 * release notification with up to date NSSN.
4945 	 */
4946 	if (!is_amsdu || last_subframe)
4947 		iwx_release_frames(sc, ni, rxba, buffer, nssn, ml);
4948 
4949 	ieee80211_release_node(ic, ni);
4950 	return 1;
4951 
4952 drop:
4953 	m_freem(m);
4954 	ieee80211_release_node(ic, ni);
4955 	return 1;
4956 }
4957 
4958 void
4959 iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata,
4960     size_t maxlen, struct mbuf_list *ml)
4961 {
4962 	struct ieee80211com *ic = &sc->sc_ic;
4963 	struct ieee80211_rxinfo rxi;
4964 	struct iwx_rx_mpdu_desc *desc;
4965 	uint32_t len, hdrlen, rate_n_flags, device_timestamp;
4966 	int rssi;
4967 	uint8_t chanidx;
4968 	uint16_t phy_info;
4969 	size_t desc_size;
4970 
4971 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
4972 		desc_size = sizeof(*desc);
4973 	else
4974 		desc_size = IWX_RX_DESC_SIZE_V1;
4975 
4976 	if (maxlen < desc_size) {
4977 		m_freem(m);
4978 		return; /* drop */
4979 	}
4980 
4981 	desc = (struct iwx_rx_mpdu_desc *)pktdata;
4982 
4983 	if (!(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_CRC_OK)) ||
4984 	    !(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
4985 		m_freem(m);
4986 		return; /* drop */
4987 	}
4988 
4989 	len = le16toh(desc->mpdu_len);
4990 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4991 		/* Allow control frames in monitor mode. */
4992 		if (len < sizeof(struct ieee80211_frame_cts)) {
4993 			ic->ic_stats.is_rx_tooshort++;
4994 			IC2IFP(ic)->if_ierrors++;
4995 			m_freem(m);
4996 			return;
4997 		}
4998 	} else if (len < sizeof(struct ieee80211_frame)) {
4999 		ic->ic_stats.is_rx_tooshort++;
5000 		IC2IFP(ic)->if_ierrors++;
5001 		m_freem(m);
5002 		return;
5003 	}
5004 	if (len > maxlen - desc_size) {
5005 		IC2IFP(ic)->if_ierrors++;
5006 		m_freem(m);
5007 		return;
5008 	}
5009 
5010 	m->m_data = pktdata + desc_size;
5011 	m->m_pkthdr.len = m->m_len = len;
5012 
5013 	/* Account for padding following the frame header. */
5014 	if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_PAD) {
5015 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
5016 		int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5017 		if (type == IEEE80211_FC0_TYPE_CTL) {
5018 			switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
5019 			case IEEE80211_FC0_SUBTYPE_CTS:
5020 				hdrlen = sizeof(struct ieee80211_frame_cts);
5021 				break;
5022 			case IEEE80211_FC0_SUBTYPE_ACK:
5023 				hdrlen = sizeof(struct ieee80211_frame_ack);
5024 				break;
5025 			default:
5026 				hdrlen = sizeof(struct ieee80211_frame_min);
5027 				break;
5028 			}
5029 		} else
5030 			hdrlen = ieee80211_get_hdrlen(wh);
5031 
5032 		if ((le16toh(desc->status) &
5033 		    IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
5034 		    IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
5035 			/* Padding is inserted after the IV. */
5036 			hdrlen += IEEE80211_CCMP_HDRLEN;
5037 		}
5038 
5039 		memmove(m->m_data + 2, m->m_data, hdrlen);
5040 		m_adj(m, 2);
5041 	}
5042 
5043 	memset(&rxi, 0, sizeof(rxi));
5044 
5045 	/*
5046 	 * Hardware de-aggregates A-MSDUs and copies the same MAC header
5047 	 * in place for each subframe. But it leaves the 'A-MSDU present'
5048 	 * bit set in the frame header. We need to clear this bit ourselves.
5049 	 * (XXX This workaround is not required on AX200/AX201 devices that
5050 	 * have been tested by me, but it's unclear when this problem was
5051 	 * fixed in the hardware. It definitely affects the 9k generation.
5052 	 * Leaving this in place for now since some 9k/AX200 hybrids seem
5053 	 * to exist that we may eventually add support for.)
5054 	 *
5055 	 * And we must allow the same CCMP PN for subframes following the
5056 	 * first subframe. Otherwise they would be discarded as replays.
5057 	 */
5058 	if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU) {
5059 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
5060 		uint8_t subframe_idx = (desc->amsdu_info &
5061 		    IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
5062 		if (subframe_idx > 0)
5063 			rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
5064 		if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
5065 		    m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) {
5066 			struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,
5067 			    struct ieee80211_qosframe_addr4 *);
5068 			qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
5069 		} else if (ieee80211_has_qos(wh) &&
5070 		    m->m_len >= sizeof(struct ieee80211_qosframe)) {
5071 			struct ieee80211_qosframe *qwh = mtod(m,
5072 			    struct ieee80211_qosframe *);
5073 			qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
5074 		}
5075 	}
5076 
5077 	/*
5078 	 * Verify decryption before duplicate detection. The latter uses
5079 	 * the TID supplied in QoS frame headers and this TID is implicitly
5080 	 * verified as part of the CCMP nonce.
5081 	 */
5082 	if (iwx_rx_hwdecrypt(sc, m, le16toh(desc->status), &rxi)) {
5083 		m_freem(m);
5084 		return;
5085 	}
5086 
5087 	if (iwx_detect_duplicate(sc, m, desc, &rxi)) {
5088 		m_freem(m);
5089 		return;
5090 	}
5091 
5092 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
5093 		rate_n_flags = le32toh(desc->v3.rate_n_flags);
5094 		chanidx = desc->v3.channel;
5095 		device_timestamp = le32toh(desc->v3.gp2_on_air_rise);
5096 	} else {
5097 		rate_n_flags = le32toh(desc->v1.rate_n_flags);
5098 		chanidx = desc->v1.channel;
5099 		device_timestamp = le32toh(desc->v1.gp2_on_air_rise);
5100 	}
5101 
5102 	phy_info = le16toh(desc->phy_info);
5103 
5104 	rssi = iwx_rxmq_get_signal_strength(sc, desc);
5105 	rssi = (0 - IWX_MIN_DBM) + rssi;	/* normalize */
5106 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
5107 
5108 	rxi.rxi_rssi = rssi;
5109 	rxi.rxi_tstamp = device_timestamp;
5110 	rxi.rxi_chan = chanidx;
5111 
5112 	if (iwx_rx_reorder(sc, m, chanidx, desc,
5113 	    (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
5114 	    rate_n_flags, device_timestamp, &rxi, ml))
5115 		return;
5116 
5117 	iwx_rx_frame(sc, m, chanidx, le16toh(desc->status),
5118 	    (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
5119 	    rate_n_flags, device_timestamp, &rxi, ml);
5120 }
5121 
5122 void
5123 iwx_clear_tx_desc(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx)
5124 {
5125 	struct iwx_tfh_tfd *desc = &ring->desc[idx];
5126 	uint8_t num_tbs = le16toh(desc->num_tbs) & 0x1f;
5127 	int i;
5128 
5129 	/* First TB is never cleared - it is bidirectional DMA data. */
5130 	for (i = 1; i < num_tbs; i++) {
5131 		struct iwx_tfh_tb *tb = &desc->tbs[i];
5132 		memset(tb, 0, sizeof(*tb));
5133 	}
5134 	desc->num_tbs = htole16(1);
5135 
5136 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
5137 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
5138 	    sizeof(*desc), BUS_DMASYNC_PREWRITE);
5139 }
5140 
5141 void
5142 iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_data *txd)
5143 {
5144 	struct ieee80211com *ic = &sc->sc_ic;
5145 
5146 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
5147 	    BUS_DMASYNC_POSTWRITE);
5148 	bus_dmamap_unload(sc->sc_dmat, txd->map);
5149 	m_freem(txd->m);
5150 	txd->m = NULL;
5151 
5152 	KASSERT(txd->in);
5153 	ieee80211_release_node(ic, &txd->in->in_ni);
5154 	txd->in = NULL;
5155 }
5156 
5157 void
5158 iwx_txq_advance(struct iwx_softc *sc, struct iwx_tx_ring *ring, uint16_t idx)
5159 {
5160  	struct iwx_tx_data *txd;
5161 
5162 	while (ring->tail_hw != idx) {
5163 		txd = &ring->data[ring->tail];
5164 		if (txd->m != NULL) {
5165 			iwx_clear_tx_desc(sc, ring, ring->tail);
5166 			iwx_tx_update_byte_tbl(sc, ring, ring->tail, 0, 0);
5167 			iwx_txd_done(sc, txd);
5168 			ring->queued--;
5169 		}
5170 		ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT;
5171 		ring->tail_hw = (ring->tail_hw + 1) % sc->max_tfd_queue_size;
5172 	}
5173 }
5174 
5175 void
5176 iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
5177     struct iwx_rx_data *data)
5178 {
5179 	struct ieee80211com *ic = &sc->sc_ic;
5180 	struct ifnet *ifp = IC2IFP(ic);
5181 	struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
5182 	int qid = cmd_hdr->qid, status, txfail;
5183 	struct iwx_tx_ring *ring = &sc->txq[qid];
5184 	struct iwx_tx_resp *tx_resp = (void *)pkt->data;
5185 	uint32_t ssn;
5186 	uint32_t len = iwx_rx_packet_len(pkt);
5187 
5188 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,
5189 	    BUS_DMASYNC_POSTREAD);
5190 
5191 	/* Sanity checks. */
5192 	if (sizeof(*tx_resp) > len)
5193 		return;
5194 	if (qid < IWX_FIRST_AGG_TX_QUEUE && tx_resp->frame_count > 1)
5195 		return;
5196 	if (qid >= IWX_FIRST_AGG_TX_QUEUE && sizeof(*tx_resp) + sizeof(ssn) +
5197 	    tx_resp->frame_count * sizeof(tx_resp->status) > len)
5198 		return;
5199 
5200 	sc->sc_tx_timer[qid] = 0;
5201 
5202 	if (tx_resp->frame_count > 1) /* A-MPDU */
5203 		return;
5204 
5205 	status = le16toh(tx_resp->status.status) & IWX_TX_STATUS_MSK;
5206 	txfail = (status != IWX_TX_STATUS_SUCCESS &&
5207 	    status != IWX_TX_STATUS_DIRECT_DONE);
5208 
5209 	if (txfail)
5210 		ifp->if_oerrors++;
5211 
5212 	/*
5213 	 * On hardware supported by iwx(4) the SSN counter corresponds
5214 	 * to a Tx ring index rather than a sequence number.
5215 	 * Frames up to this index (non-inclusive) can now be freed.
5216 	 */
5217 	memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn));
5218 	ssn = le32toh(ssn);
5219 	if (ssn < sc->max_tfd_queue_size) {
5220 		iwx_txq_advance(sc, ring, ssn);
5221 		iwx_clear_oactive(sc, ring);
5222 	}
5223 }
5224 
5225 void
5226 iwx_clear_oactive(struct iwx_softc *sc, struct iwx_tx_ring *ring)
5227 {
5228 	struct ieee80211com *ic = &sc->sc_ic;
5229 	struct ifnet *ifp = IC2IFP(ic);
5230 
5231 	if (ring->queued < IWX_TX_RING_LOMARK) {
5232 		sc->qfullmsk &= ~(1 << ring->qid);
5233 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
5234 			ifq_clr_oactive(&ifp->if_snd);
5235 			/*
5236 			 * Well, we're in interrupt context, but then again
5237 			 * I guess net80211 does all sorts of stunts in
5238 			 * interrupt context, so maybe this is no biggie.
5239 			 */
5240 			(*ifp->if_start)(ifp);
5241 		}
5242 	}
5243 }
5244 
5245 void
5246 iwx_rx_compressed_ba(struct iwx_softc *sc, struct iwx_rx_packet *pkt)
5247 {
5248 	struct iwx_compressed_ba_notif *ba_res = (void *)pkt->data;
5249 	struct ieee80211com *ic = &sc->sc_ic;
5250 	struct ieee80211_node *ni;
5251 	struct ieee80211_tx_ba *ba;
5252 	struct iwx_node *in;
5253 	struct iwx_tx_ring *ring;
5254 	uint16_t i, tfd_cnt, ra_tid_cnt, idx;
5255 	int qid;
5256 
5257 	if (ic->ic_state != IEEE80211_S_RUN)
5258 		return;
5259 
5260 	if (iwx_rx_packet_payload_len(pkt) < sizeof(*ba_res))
5261 		return;
5262 
5263 	if (ba_res->sta_id != IWX_STATION_ID)
5264 		return;
5265 
5266 	ni = ic->ic_bss;
5267 	in = (void *)ni;
5268 
5269 	tfd_cnt = le16toh(ba_res->tfd_cnt);
5270 	ra_tid_cnt = le16toh(ba_res->ra_tid_cnt);
5271 	if (!tfd_cnt || iwx_rx_packet_payload_len(pkt) < (sizeof(*ba_res) +
5272 	    sizeof(ba_res->ra_tid[0]) * ra_tid_cnt +
5273 	    sizeof(ba_res->tfd[0]) * tfd_cnt))
5274 		return;
5275 
5276 	for (i = 0; i < tfd_cnt; i++) {
5277 		struct iwx_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
5278 		uint8_t tid;
5279 
5280 		tid = ba_tfd->tid;
5281 		if (tid >= nitems(sc->aggqid))
5282 			continue;
5283 
5284 		qid = sc->aggqid[tid];
5285 		if (qid != htole16(ba_tfd->q_num))
5286 			continue;
5287 
5288 		ring = &sc->txq[qid];
5289 
5290 		ba = &ni->ni_tx_ba[tid];
5291 		if (ba->ba_state != IEEE80211_BA_AGREED)
5292 			continue;
5293 
5294 		idx = le16toh(ba_tfd->tfd_index);
5295 		sc->sc_tx_timer[qid] = 0;
5296 		iwx_txq_advance(sc, ring, idx);
5297 		iwx_clear_oactive(sc, ring);
5298 	}
5299 }
5300 
5301 void
5302 iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
5303     struct iwx_rx_data *data)
5304 {
5305 	struct ieee80211com *ic = &sc->sc_ic;
5306 	struct iwx_missed_beacons_notif *mbn = (void *)pkt->data;
5307 	uint32_t missed;
5308 
5309 	if ((ic->ic_opmode != IEEE80211_M_STA) ||
5310 	    (ic->ic_state != IEEE80211_S_RUN))
5311 		return;
5312 
5313 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
5314 	    sizeof(*mbn), BUS_DMASYNC_POSTREAD);
5315 
5316 	missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
5317 	if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
5318 		if (ic->ic_if.if_flags & IFF_DEBUG)
5319 			printf("%s: receiving no beacons from %s; checking if "
5320 			    "this AP is still responding to probe requests\n",
5321 			    DEVNAME(sc), ether_sprintf(ic->ic_bss->ni_macaddr));
5322 		/*
5323 		 * Rather than go directly to scan state, try to send a
5324 		 * directed probe request first. If that fails then the
5325 		 * state machine will drop us into scanning after timing
5326 		 * out waiting for a probe response.
5327 		 */
5328 		IEEE80211_SEND_MGMT(ic, ic->ic_bss,
5329 		    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
5330 	}
5331 
5332 }
5333 
5334 int
5335 iwx_binding_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action)
5336 {
5337 	struct iwx_binding_cmd cmd;
5338 	struct iwx_phy_ctxt *phyctxt = in->in_phyctxt;
5339 	uint32_t mac_id = IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
5340 	int i, err, active = (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE);
5341 	uint32_t status;
5342 
5343 	if (action == IWX_FW_CTXT_ACTION_ADD && active)
5344 		panic("binding already added");
5345 	if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
5346 		panic("binding already removed");
5347 
5348 	if (phyctxt == NULL) /* XXX race with iwx_stop() */
5349 		return EINVAL;
5350 
5351 	memset(&cmd, 0, sizeof(cmd));
5352 
5353 	cmd.id_and_color
5354 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
5355 	cmd.action = htole32(action);
5356 	cmd.phy = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
5357 
5358 	cmd.macs[0] = htole32(mac_id);
5359 	for (i = 1; i < IWX_MAX_MACS_IN_BINDING; i++)
5360 		cmd.macs[i] = htole32(IWX_FW_CTXT_INVALID);
5361 
5362 	if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
5363 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
5364 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
5365 	else
5366 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
5367 
5368 	status = 0;
5369 	err = iwx_send_cmd_pdu_status(sc, IWX_BINDING_CONTEXT_CMD, sizeof(cmd),
5370 	    &cmd, &status);
5371 	if (err == 0 && status != 0)
5372 		err = EIO;
5373 
5374 	return err;
5375 }
5376 
5377 uint8_t
5378 iwx_get_vht_ctrl_pos(struct ieee80211com *ic, struct ieee80211_channel *chan)
5379 {
5380 	int center_idx = ic->ic_bss->ni_vht_chan_center_freq_idx0;
5381 	int primary_idx = ieee80211_chan2ieee(ic, ic->ic_bss->ni_chan);
5382 	/*
5383 	 * The FW is expected to check the control channel position only
5384 	 * when in HT/VHT and the channel width is not 20MHz. Return
5385 	 * this value as the default one:
5386 	 */
5387 	uint8_t pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5388 
5389 	switch (primary_idx - center_idx) {
5390 	case -6:
5391 		pos = IWX_PHY_VHT_CTRL_POS_2_BELOW;
5392 		break;
5393 	case -2:
5394 		pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5395 		break;
5396 	case 2:
5397 		pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5398 		break;
5399 	case 6:
5400 		pos = IWX_PHY_VHT_CTRL_POS_2_ABOVE;
5401 		break;
5402 	default:
5403 		break;
5404 	}
5405 
5406 	return pos;
5407 }
5408 
5409 int
5410 iwx_phy_ctxt_cmd_uhb_v3(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5411     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
5412     uint8_t vht_chan_width)
5413 {
5414 	struct ieee80211com *ic = &sc->sc_ic;
5415 	struct iwx_phy_context_cmd_uhb cmd;
5416 	uint8_t active_cnt, idle_cnt;
5417 	struct ieee80211_channel *chan = ctxt->channel;
5418 
5419 	memset(&cmd, 0, sizeof(cmd));
5420 	cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
5421 	    ctxt->color));
5422 	cmd.action = htole32(action);
5423 
5424 	if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel) ||
5425 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
5426 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
5427 	else
5428 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
5429 
5430 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
5431 	    IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
5432 	cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
5433 	if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
5434 		cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
5435 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
5436 	} else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
5437 		if (sco == IEEE80211_HTOP0_SCO_SCA) {
5438 			/* secondary chan above -> control chan below */
5439 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5440 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5441 		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
5442 			/* secondary chan below -> control chan above */
5443 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5444 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5445 		} else {
5446 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5447 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5448 		}
5449 	} else {
5450 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5451 		cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5452 	}
5453 
5454 	idle_cnt = chains_static;
5455 	active_cnt = chains_dynamic;
5456 	cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
5457 	    IWX_PHY_RX_CHAIN_VALID_POS);
5458 	cmd.rxchain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
5459 	cmd.rxchain_info |= htole32(active_cnt <<
5460 	    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
5461 
5462 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5463 }
5464 
5465 int
5466 iwx_phy_ctxt_cmd_v3(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5467     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
5468     uint8_t vht_chan_width)
5469 {
5470 	struct ieee80211com *ic = &sc->sc_ic;
5471 	struct iwx_phy_context_cmd cmd;
5472 	uint8_t active_cnt, idle_cnt;
5473 	struct ieee80211_channel *chan = ctxt->channel;
5474 
5475 	memset(&cmd, 0, sizeof(cmd));
5476 	cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
5477 	    ctxt->color));
5478 	cmd.action = htole32(action);
5479 
5480 	if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel) ||
5481 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
5482 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
5483 	else
5484 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
5485 
5486 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
5487 	    IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
5488 	cmd.ci.channel = ieee80211_chan2ieee(ic, chan);
5489 	if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
5490 		cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
5491 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
5492 	} else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
5493 		if (sco == IEEE80211_HTOP0_SCO_SCA) {
5494 			/* secondary chan above -> control chan below */
5495 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5496 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5497 		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
5498 			/* secondary chan below -> control chan above */
5499 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5500 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5501 		} else {
5502 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5503 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5504 		}
5505 	} else {
5506 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5507 		cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5508 	}
5509 
5510 	idle_cnt = chains_static;
5511 	active_cnt = chains_dynamic;
5512 	cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
5513 	    IWX_PHY_RX_CHAIN_VALID_POS);
5514 	cmd.rxchain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
5515 	cmd.rxchain_info |= htole32(active_cnt <<
5516 	    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
5517 
5518 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5519 }
5520 
5521 int
5522 iwx_phy_ctxt_cmd(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5523     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
5524     uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
5525 {
5526 	int cmdver;
5527 
5528 	cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_PHY_CONTEXT_CMD);
5529 	if (cmdver != 3) {
5530 		printf("%s: firmware does not support phy-context-cmd v3\n",
5531 		    DEVNAME(sc));
5532 		return ENOTSUP;
5533 	}
5534 
5535 	/*
5536 	 * Intel increased the size of the fw_channel_info struct and neglected
5537 	 * to bump the phy_context_cmd struct, which contains an fw_channel_info
5538 	 * member in the middle.
5539 	 * To keep things simple we use a separate function to handle the larger
5540 	 * variant of the phy context command.
5541 	 */
5542 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS)) {
5543 		return iwx_phy_ctxt_cmd_uhb_v3(sc, ctxt, chains_static,
5544 		    chains_dynamic, action, sco, vht_chan_width);
5545 	}
5546 
5547 	return iwx_phy_ctxt_cmd_v3(sc, ctxt, chains_static, chains_dynamic,
5548 	    action, sco, vht_chan_width);
5549 }
5550 
5551 int
5552 iwx_send_cmd(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5553 {
5554 	struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5555 	struct iwx_tfh_tfd *desc;
5556 	struct iwx_tx_data *txdata;
5557 	struct iwx_device_cmd *cmd;
5558 	struct mbuf *m;
5559 	bus_addr_t paddr;
5560 	uint64_t addr;
5561 	int err = 0, i, paylen, off, s;
5562 	int idx, code, async, group_id;
5563 	size_t hdrlen, datasz;
5564 	uint8_t *data;
5565 	int generation = sc->sc_generation;
5566 
5567 	code = hcmd->id;
5568 	async = hcmd->flags & IWX_CMD_ASYNC;
5569 	idx = ring->cur;
5570 
5571 	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
5572 		paylen += hcmd->len[i];
5573 	}
5574 
5575 	/* If this command waits for a response, allocate response buffer. */
5576 	hcmd->resp_pkt = NULL;
5577 	if (hcmd->flags & IWX_CMD_WANT_RESP) {
5578 		uint8_t *resp_buf;
5579 		KASSERT(!async);
5580 		KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet));
5581 		KASSERT(hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX);
5582 		if (sc->sc_cmd_resp_pkt[idx] != NULL)
5583 			return ENOSPC;
5584 		resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
5585 		    M_NOWAIT | M_ZERO);
5586 		if (resp_buf == NULL)
5587 			return ENOMEM;
5588 		sc->sc_cmd_resp_pkt[idx] = resp_buf;
5589 		sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
5590 	} else {
5591 		sc->sc_cmd_resp_pkt[idx] = NULL;
5592 	}
5593 
5594 	s = splnet();
5595 
5596 	desc = &ring->desc[idx];
5597 	txdata = &ring->data[idx];
5598 
5599 	/*
5600 	 * XXX Intel inside (tm)
5601 	 * Firmware API versions >= 50 reject old-style commands in
5602 	 * group 0 with a "BAD_COMMAND" firmware error. We must pretend
5603 	 * that such commands were in the LONG_GROUP instead in order
5604 	 * for firmware to accept them.
5605 	 */
5606 	if (iwx_cmd_groupid(code) == 0) {
5607 		code = IWX_WIDE_ID(IWX_LONG_GROUP, code);
5608 		txdata->flags |= IWX_TXDATA_FLAG_CMD_IS_NARROW;
5609 	} else
5610 		txdata->flags &= ~IWX_TXDATA_FLAG_CMD_IS_NARROW;
5611 
5612 	group_id = iwx_cmd_groupid(code);
5613 
5614 	hdrlen = sizeof(cmd->hdr_wide);
5615 	datasz = sizeof(cmd->data_wide);
5616 
5617 	if (paylen > datasz) {
5618 		/* Command is too large to fit in pre-allocated space. */
5619 		size_t totlen = hdrlen + paylen;
5620 		if (paylen > IWX_MAX_CMD_PAYLOAD_SIZE) {
5621 			printf("%s: firmware command too long (%zd bytes)\n",
5622 			    DEVNAME(sc), totlen);
5623 			err = EINVAL;
5624 			goto out;
5625 		}
5626 		m = MCLGETL(NULL, M_DONTWAIT, totlen);
5627 		if (m == NULL) {
5628 			printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
5629 			    DEVNAME(sc), totlen);
5630 			err = ENOMEM;
5631 			goto out;
5632 		}
5633 		cmd = mtod(m, struct iwx_device_cmd *);
5634 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
5635 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
5636 		if (err) {
5637 			printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
5638 			    DEVNAME(sc), totlen);
5639 			m_freem(m);
5640 			goto out;
5641 		}
5642 		txdata->m = m; /* mbuf will be freed in iwx_cmd_done() */
5643 		paddr = txdata->map->dm_segs[0].ds_addr;
5644 	} else {
5645 		cmd = &ring->cmd[idx];
5646 		paddr = txdata->cmd_paddr;
5647 	}
5648 
5649 	memset(cmd, 0, sizeof(*cmd));
5650 	cmd->hdr_wide.opcode = iwx_cmd_opcode(code);
5651 	cmd->hdr_wide.group_id = group_id;
5652 	cmd->hdr_wide.qid = ring->qid;
5653 	cmd->hdr_wide.idx = idx;
5654 	cmd->hdr_wide.length = htole16(paylen);
5655 	cmd->hdr_wide.version = iwx_cmd_version(code);
5656 	data = cmd->data_wide;
5657 
5658 	for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
5659 		if (hcmd->len[i] == 0)
5660 			continue;
5661 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
5662 		off += hcmd->len[i];
5663 	}
5664 	KASSERT(off == paylen);
5665 
5666 	desc->tbs[0].tb_len = htole16(MIN(hdrlen + paylen, IWX_FIRST_TB_SIZE));
5667 	addr = htole64(paddr);
5668 	memcpy(&desc->tbs[0].addr, &addr, sizeof(addr));
5669 	if (hdrlen + paylen > IWX_FIRST_TB_SIZE) {
5670 		desc->tbs[1].tb_len = htole16(hdrlen + paylen -
5671 		    IWX_FIRST_TB_SIZE);
5672 		addr = htole64(paddr + IWX_FIRST_TB_SIZE);
5673 		memcpy(&desc->tbs[1].addr, &addr, sizeof(addr));
5674 		desc->num_tbs = htole16(2);
5675 	} else
5676 		desc->num_tbs = htole16(1);
5677 
5678 	if (paylen > datasz) {
5679 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
5680 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
5681 	} else {
5682 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
5683 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
5684 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
5685 	}
5686 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
5687 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
5688 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
5689 	/* Kick command ring. */
5690 	DPRINTF(("%s: sending command 0x%x\n", __func__, code));
5691 	ring->queued++;
5692 	ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
5693 	ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
5694 	IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
5695 
5696 	if (!async) {
5697 		err = tsleep_nsec(desc, PCATCH, "iwxcmd", SEC_TO_NSEC(1));
5698 		if (err == 0) {
5699 			/* if hardware is no longer up, return error */
5700 			if (generation != sc->sc_generation) {
5701 				err = ENXIO;
5702 				goto out;
5703 			}
5704 
5705 			/* Response buffer will be freed in iwx_free_resp(). */
5706 			hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
5707 			sc->sc_cmd_resp_pkt[idx] = NULL;
5708 		} else if (generation == sc->sc_generation) {
5709 			free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
5710 			    sc->sc_cmd_resp_len[idx]);
5711 			sc->sc_cmd_resp_pkt[idx] = NULL;
5712 		}
5713 	}
5714  out:
5715 	splx(s);
5716 
5717 	return err;
5718 }
5719 
5720 int
5721 iwx_send_cmd_pdu(struct iwx_softc *sc, uint32_t id, uint32_t flags,
5722     uint16_t len, const void *data)
5723 {
5724 	struct iwx_host_cmd cmd = {
5725 		.id = id,
5726 		.len = { len, },
5727 		.data = { data, },
5728 		.flags = flags,
5729 	};
5730 
5731 	return iwx_send_cmd(sc, &cmd);
5732 }
5733 
5734 int
5735 iwx_send_cmd_status(struct iwx_softc *sc, struct iwx_host_cmd *cmd,
5736     uint32_t *status)
5737 {
5738 	struct iwx_rx_packet *pkt;
5739 	struct iwx_cmd_response *resp;
5740 	int err, resp_len;
5741 
5742 	KASSERT((cmd->flags & IWX_CMD_WANT_RESP) == 0);
5743 	cmd->flags |= IWX_CMD_WANT_RESP;
5744 	cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
5745 
5746 	err = iwx_send_cmd(sc, cmd);
5747 	if (err)
5748 		return err;
5749 
5750 	pkt = cmd->resp_pkt;
5751 	if (pkt == NULL || (pkt->hdr.flags & IWX_CMD_FAILED_MSK))
5752 		return EIO;
5753 
5754 	resp_len = iwx_rx_packet_payload_len(pkt);
5755 	if (resp_len != sizeof(*resp)) {
5756 		iwx_free_resp(sc, cmd);
5757 		return EIO;
5758 	}
5759 
5760 	resp = (void *)pkt->data;
5761 	*status = le32toh(resp->status);
5762 	iwx_free_resp(sc, cmd);
5763 	return err;
5764 }
5765 
5766 int
5767 iwx_send_cmd_pdu_status(struct iwx_softc *sc, uint32_t id, uint16_t len,
5768     const void *data, uint32_t *status)
5769 {
5770 	struct iwx_host_cmd cmd = {
5771 		.id = id,
5772 		.len = { len, },
5773 		.data = { data, },
5774 	};
5775 
5776 	return iwx_send_cmd_status(sc, &cmd, status);
5777 }
5778 
5779 void
5780 iwx_free_resp(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5781 {
5782 	KASSERT((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP);
5783 	free(hcmd->resp_pkt, M_DEVBUF, hcmd->resp_pkt_len);
5784 	hcmd->resp_pkt = NULL;
5785 }
5786 
5787 void
5788 iwx_cmd_done(struct iwx_softc *sc, int qid, int idx, int code)
5789 {
5790 	struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5791 	struct iwx_tx_data *data;
5792 
5793 	if (qid != IWX_DQA_CMD_QUEUE) {
5794 		return;	/* Not a command ack. */
5795 	}
5796 
5797 	data = &ring->data[idx];
5798 
5799 	if (data->m != NULL) {
5800 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
5801 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5802 		bus_dmamap_unload(sc->sc_dmat, data->map);
5803 		m_freem(data->m);
5804 		data->m = NULL;
5805 	}
5806 	wakeup(&ring->desc[idx]);
5807 
5808 	DPRINTF(("%s: command 0x%x done\n", __func__, code));
5809 	if (ring->queued == 0) {
5810 		DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
5811 			DEVNAME(sc), code));
5812 	} else if (ring->queued > 0)
5813 		ring->queued--;
5814 }
5815 
5816 /*
5817  * Determine the Tx command flags and Tx rate+flags to use.
5818  * Return the selected Tx rate.
5819  */
5820 const struct iwx_rate *
5821 iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in,
5822     struct ieee80211_frame *wh, uint16_t *flags, uint32_t *rate_n_flags)
5823 {
5824 	struct ieee80211com *ic = &sc->sc_ic;
5825 	struct ieee80211_node *ni = &in->in_ni;
5826 	struct ieee80211_rateset *rs = &ni->ni_rates;
5827 	const struct iwx_rate *rinfo;
5828 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5829 	int min_ridx = iwx_rval2ridx(ieee80211_min_basic_rate(ic));
5830 	int ridx, rate_flags;
5831 
5832 	*flags = 0;
5833 
5834 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
5835 	    type != IEEE80211_FC0_TYPE_DATA) {
5836 		/* for non-data, use the lowest supported rate */
5837 		ridx = min_ridx;
5838 		*flags |= IWX_TX_FLAGS_CMD_RATE;
5839 	} else if (ic->ic_fixed_mcs != -1) {
5840 		ridx = sc->sc_fixed_ridx;
5841 		*flags |= IWX_TX_FLAGS_CMD_RATE;
5842 	} else if (ic->ic_fixed_rate != -1) {
5843 		ridx = sc->sc_fixed_ridx;
5844 		*flags |= IWX_TX_FLAGS_CMD_RATE;
5845 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
5846 		ridx = iwx_mcs2ridx[ni->ni_txmcs];
5847 	} else {
5848 		uint8_t rval;
5849 		rval = (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL);
5850 		ridx = iwx_rval2ridx(rval);
5851 		if (ridx < min_ridx)
5852 			ridx = min_ridx;
5853 	}
5854 
5855 	if ((ic->ic_flags & IEEE80211_F_RSNON) &&
5856 	    ni->ni_rsn_supp_state == RSNA_SUPP_PTKNEGOTIATING)
5857 		*flags |= IWX_TX_FLAGS_HIGH_PRI;
5858 
5859 	rinfo = &iwx_rates[ridx];
5860 	if (iwx_is_mimo_ht_plcp(rinfo->ht_plcp))
5861 		rate_flags = IWX_RATE_MCS_ANT_AB_MSK;
5862 	else
5863 		rate_flags = IWX_RATE_MCS_ANT_A_MSK;
5864 	if (IWX_RIDX_IS_CCK(ridx))
5865 		rate_flags |= IWX_RATE_MCS_CCK_MSK;
5866 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
5867  	    type == IEEE80211_FC0_TYPE_DATA &&
5868 	    rinfo->ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP) {
5869 		uint8_t sco = IEEE80211_HTOP0_SCO_SCN;
5870 		uint8_t vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
5871 		if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
5872 		    IEEE80211_CHAN_80MHZ_ALLOWED(ni->ni_chan) &&
5873 		    ieee80211_node_supports_vht_chan80(ni))
5874 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
5875 		else if (IEEE80211_CHAN_40MHZ_ALLOWED(ni->ni_chan) &&
5876 		    ieee80211_node_supports_ht_chan40(ni))
5877 			sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
5878 		if (ni->ni_flags & IEEE80211_NODE_VHT)
5879 			rate_flags |= IWX_RATE_MCS_VHT_MSK;
5880 		else
5881 			rate_flags |= IWX_RATE_MCS_HT_MSK;
5882 		if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80 &&
5883 		    in->in_phyctxt != NULL &&
5884 		    in->in_phyctxt->vht_chan_width == vht_chan_width) {
5885 			rate_flags |= IWX_RATE_MCS_CHAN_WIDTH_80;
5886 			if (ieee80211_node_supports_vht_sgi80(ni))
5887 				rate_flags |= IWX_RATE_MCS_SGI_MSK;
5888 		} else if ((sco == IEEE80211_HTOP0_SCO_SCA ||
5889 		    sco == IEEE80211_HTOP0_SCO_SCB) &&
5890 		    in->in_phyctxt != NULL && in->in_phyctxt->sco == sco) {
5891 			rate_flags |= IWX_RATE_MCS_CHAN_WIDTH_40;
5892 			if (ieee80211_node_supports_ht_sgi40(ni))
5893 				rate_flags |= IWX_RATE_MCS_SGI_MSK;
5894 		} else if (ieee80211_node_supports_ht_sgi20(ni))
5895 			rate_flags |= IWX_RATE_MCS_SGI_MSK;
5896 		*rate_n_flags = rate_flags | rinfo->ht_plcp;
5897 	} else
5898 		*rate_n_flags = rate_flags | rinfo->plcp;
5899 
5900 	return rinfo;
5901 }
5902 
5903 void
5904 iwx_tx_update_byte_tbl(struct iwx_softc *sc, struct iwx_tx_ring *txq,
5905     int idx, uint16_t byte_cnt, uint16_t num_tbs)
5906 {
5907 	uint8_t filled_tfd_size, num_fetch_chunks;
5908 	uint16_t len = byte_cnt;
5909 	uint16_t bc_ent;
5910 
5911 	filled_tfd_size = offsetof(struct iwx_tfh_tfd, tbs) +
5912 			  num_tbs * sizeof(struct iwx_tfh_tb);
5913 	/*
5914 	 * filled_tfd_size contains the number of filled bytes in the TFD.
5915 	 * Dividing it by 64 will give the number of chunks to fetch
5916 	 * to SRAM- 0 for one chunk, 1 for 2 and so on.
5917 	 * If, for example, TFD contains only 3 TBs then 32 bytes
5918 	 * of the TFD are used, and only one chunk of 64 bytes should
5919 	 * be fetched
5920 	 */
5921 	num_fetch_chunks = howmany(filled_tfd_size, 64) - 1;
5922 
5923 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
5924 		struct iwx_gen3_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.vaddr;
5925 		/* Starting from AX210, the HW expects bytes */
5926 		bc_ent = htole16(len | (num_fetch_chunks << 14));
5927 		scd_bc_tbl[idx].tfd_offset = bc_ent;
5928 	} else {
5929 		struct iwx_agn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.vaddr;
5930 		/* Before AX210, the HW expects DW */
5931 		len = howmany(len, 4);
5932 		bc_ent = htole16(len | (num_fetch_chunks << 12));
5933 		scd_bc_tbl->tfd_offset[idx] = bc_ent;
5934 	}
5935 
5936 	bus_dmamap_sync(sc->sc_dmat, txq->bc_tbl.map, 0,
5937 	    txq->bc_tbl.map->dm_mapsize, BUS_DMASYNC_PREWRITE);
5938 }
5939 
5940 int
5941 iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
5942 {
5943 	struct ieee80211com *ic = &sc->sc_ic;
5944 	struct iwx_node *in = (void *)ni;
5945 	struct iwx_tx_ring *ring;
5946 	struct iwx_tx_data *data;
5947 	struct iwx_tfh_tfd *desc;
5948 	struct iwx_device_cmd *cmd;
5949 	struct ieee80211_frame *wh;
5950 	struct ieee80211_key *k = NULL;
5951 	const struct iwx_rate *rinfo;
5952 	uint64_t paddr;
5953 	u_int hdrlen;
5954 	bus_dma_segment_t *seg;
5955 	uint32_t rate_n_flags;
5956 	uint16_t num_tbs, flags, offload_assist = 0;
5957 	uint8_t type, subtype;
5958 	int i, totlen, err, pad, qid;
5959 	size_t txcmd_size;
5960 
5961 	wh = mtod(m, struct ieee80211_frame *);
5962 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5963 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
5964 	if (type == IEEE80211_FC0_TYPE_CTL)
5965 		hdrlen = sizeof(struct ieee80211_frame_min);
5966 	else
5967 		hdrlen = ieee80211_get_hdrlen(wh);
5968 
5969 	qid = sc->first_data_qid;
5970 
5971 	/* Put QoS frames on the data queue which maps to their TID. */
5972 	if (ieee80211_has_qos(wh)) {
5973 		struct ieee80211_tx_ba *ba;
5974 		uint16_t qos = ieee80211_get_qos(wh);
5975 		uint8_t tid = qos & IEEE80211_QOS_TID;
5976 
5977 		ba = &ni->ni_tx_ba[tid];
5978 		if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
5979 		    type == IEEE80211_FC0_TYPE_DATA &&
5980 		    subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
5981 		    sc->aggqid[tid] != 0 &&
5982 		    ba->ba_state == IEEE80211_BA_AGREED) {
5983 			qid = sc->aggqid[tid];
5984 		}
5985 	}
5986 
5987 	ring = &sc->txq[qid];
5988 	desc = &ring->desc[ring->cur];
5989 	memset(desc, 0, sizeof(*desc));
5990 	data = &ring->data[ring->cur];
5991 
5992 	cmd = &ring->cmd[ring->cur];
5993 	cmd->hdr.code = IWX_TX_CMD;
5994 	cmd->hdr.flags = 0;
5995 	cmd->hdr.qid = ring->qid;
5996 	cmd->hdr.idx = ring->cur;
5997 
5998 	rinfo = iwx_tx_fill_cmd(sc, in, wh, &flags, &rate_n_flags);
5999 
6000 #if NBPFILTER > 0
6001 	if (sc->sc_drvbpf != NULL) {
6002 		struct iwx_tx_radiotap_header *tap = &sc->sc_txtap;
6003 		uint16_t chan_flags;
6004 
6005 		tap->wt_flags = 0;
6006 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
6007 		chan_flags = ni->ni_chan->ic_flags;
6008 		if (ic->ic_curmode != IEEE80211_MODE_11N &&
6009 		    ic->ic_curmode != IEEE80211_MODE_11AC) {
6010 			chan_flags &= ~IEEE80211_CHAN_HT;
6011 			chan_flags &= ~IEEE80211_CHAN_40MHZ;
6012 		}
6013 		if (ic->ic_curmode != IEEE80211_MODE_11AC)
6014 			chan_flags &= ~IEEE80211_CHAN_VHT;
6015 		tap->wt_chan_flags = htole16(chan_flags);
6016 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6017 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6018 		    type == IEEE80211_FC0_TYPE_DATA &&
6019 		    rinfo->ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP) {
6020 			tap->wt_rate = (0x80 | rinfo->ht_plcp);
6021 		} else
6022 			tap->wt_rate = rinfo->rate;
6023 		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
6024 		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
6025 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
6026 
6027 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
6028 		    m, BPF_DIRECTION_OUT);
6029 	}
6030 #endif
6031 
6032 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
6033                 k = ieee80211_get_txkey(ic, wh, ni);
6034 		if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
6035 			if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
6036 				return ENOBUFS;
6037 			/* 802.11 header may have moved. */
6038 			wh = mtod(m, struct ieee80211_frame *);
6039 			flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
6040 		} else {
6041 			k->k_tsc++;
6042 			/* Hardware increments PN internally and adds IV. */
6043 		}
6044 	} else
6045 		flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
6046 
6047 	totlen = m->m_pkthdr.len;
6048 
6049 	if (hdrlen & 3) {
6050 		/* First segment length must be a multiple of 4. */
6051 		pad = 4 - (hdrlen & 3);
6052 		offload_assist |= IWX_TX_CMD_OFFLD_PAD;
6053 	} else
6054 		pad = 0;
6055 
6056 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
6057 		struct iwx_tx_cmd_gen3 *tx = (void *)cmd->data;
6058 		memset(tx, 0, sizeof(*tx));
6059 		tx->len = htole16(totlen);
6060 		tx->offload_assist = htole32(offload_assist);
6061 		tx->flags = htole16(flags);
6062 		tx->rate_n_flags = htole32(rate_n_flags);
6063 		memcpy(tx->hdr, wh, hdrlen);
6064 		txcmd_size = sizeof(*tx);
6065 	} else {
6066 		struct iwx_tx_cmd_gen2 *tx = (void *)cmd->data;
6067 		memset(tx, 0, sizeof(*tx));
6068 		tx->len = htole16(totlen);
6069 		tx->offload_assist = htole16(offload_assist);
6070 		tx->flags = htole32(flags);
6071 		tx->rate_n_flags = htole32(rate_n_flags);
6072 		memcpy(tx->hdr, wh, hdrlen);
6073 		txcmd_size = sizeof(*tx);
6074 	}
6075 
6076 	/* Trim 802.11 header. */
6077 	m_adj(m, hdrlen);
6078 
6079 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
6080 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6081 	if (err && err != EFBIG) {
6082 		printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
6083 		m_freem(m);
6084 		return err;
6085 	}
6086 	if (err) {
6087 		/* Too many DMA segments, linearize mbuf. */
6088 		if (m_defrag(m, M_DONTWAIT)) {
6089 			m_freem(m);
6090 			return ENOBUFS;
6091 		}
6092 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
6093 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6094 		if (err) {
6095 			printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
6096 			    err);
6097 			m_freem(m);
6098 			return err;
6099 		}
6100 	}
6101 	data->m = m;
6102 	data->in = in;
6103 
6104 	/* Fill TX descriptor. */
6105 	num_tbs = 2 + data->map->dm_nsegs;
6106 	desc->num_tbs = htole16(num_tbs);
6107 
6108 	desc->tbs[0].tb_len = htole16(IWX_FIRST_TB_SIZE);
6109 	paddr = htole64(data->cmd_paddr);
6110 	memcpy(&desc->tbs[0].addr, &paddr, sizeof(paddr));
6111 	if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[0].tb_len)) >> 32)
6112 		DPRINTF(("%s: TB0 crosses 32bit boundary\n", __func__));
6113 	desc->tbs[1].tb_len = htole16(sizeof(struct iwx_cmd_header) +
6114 	    txcmd_size + hdrlen + pad - IWX_FIRST_TB_SIZE);
6115 	paddr = htole64(data->cmd_paddr + IWX_FIRST_TB_SIZE);
6116 	memcpy(&desc->tbs[1].addr, &paddr, sizeof(paddr));
6117 
6118 	if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[1].tb_len)) >> 32)
6119 		DPRINTF(("%s: TB1 crosses 32bit boundary\n", __func__));
6120 
6121 	/* Other DMA segments are for data payload. */
6122 	seg = data->map->dm_segs;
6123 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
6124 		desc->tbs[i + 2].tb_len = htole16(seg->ds_len);
6125 		paddr = htole64(seg->ds_addr);
6126 		memcpy(&desc->tbs[i + 2].addr, &paddr, sizeof(paddr));
6127 		if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[i + 2].tb_len)) >> 32)
6128 			DPRINTF(("%s: TB%d crosses 32bit boundary\n", __func__, i + 2));
6129 	}
6130 
6131 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
6132 	    BUS_DMASYNC_PREWRITE);
6133 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
6134 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
6135 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
6136 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
6137 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
6138 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
6139 
6140 	iwx_tx_update_byte_tbl(sc, ring, ring->cur, totlen, num_tbs);
6141 
6142 	/* Kick TX ring. */
6143 	ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
6144 	ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
6145 	IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
6146 
6147 	/* Mark TX ring as full if we reach a certain threshold. */
6148 	if (++ring->queued > IWX_TX_RING_HIMARK) {
6149 		sc->qfullmsk |= 1 << ring->qid;
6150 	}
6151 
6152 	if (ic->ic_if.if_flags & IFF_UP)
6153 		sc->sc_tx_timer[ring->qid] = 15;
6154 
6155 	return 0;
6156 }
6157 
6158 int
6159 iwx_flush_sta_tids(struct iwx_softc *sc, int sta_id, uint16_t tids)
6160 {
6161 	struct iwx_rx_packet *pkt;
6162 	struct iwx_tx_path_flush_cmd_rsp *resp;
6163 	struct iwx_tx_path_flush_cmd flush_cmd = {
6164 		.sta_id = htole32(sta_id),
6165 		.tid_mask = htole16(tids),
6166 	};
6167 	struct iwx_host_cmd hcmd = {
6168 		.id = IWX_TXPATH_FLUSH,
6169 		.len = { sizeof(flush_cmd), },
6170 		.data = { &flush_cmd, },
6171 		.flags = IWX_CMD_WANT_RESP,
6172 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
6173 	};
6174 	int err, resp_len, i, num_flushed_queues;
6175 
6176 	err = iwx_send_cmd(sc, &hcmd);
6177 	if (err)
6178 		return err;
6179 
6180 	pkt = hcmd.resp_pkt;
6181 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
6182 		err = EIO;
6183 		goto out;
6184 	}
6185 
6186 	resp_len = iwx_rx_packet_payload_len(pkt);
6187 	/* Some firmware versions don't provide a response. */
6188 	if (resp_len == 0)
6189 		goto out;
6190 	else if (resp_len != sizeof(*resp)) {
6191 		err = EIO;
6192 		goto out;
6193 	}
6194 
6195 	resp = (void *)pkt->data;
6196 
6197 	if (le16toh(resp->sta_id) != sta_id) {
6198 		err = EIO;
6199 		goto out;
6200 	}
6201 
6202 	num_flushed_queues = le16toh(resp->num_flushed_queues);
6203 	if (num_flushed_queues > IWX_TX_FLUSH_QUEUE_RSP) {
6204 		err = EIO;
6205 		goto out;
6206 	}
6207 
6208 	for (i = 0; i < num_flushed_queues; i++) {
6209 		struct iwx_flush_queue_info *queue_info = &resp->queues[i];
6210 		uint16_t tid = le16toh(queue_info->tid);
6211 		uint16_t read_after = le16toh(queue_info->read_after_flush);
6212 		uint16_t qid = le16toh(queue_info->queue_num);
6213 		struct iwx_tx_ring *txq;
6214 
6215 		if (qid >= nitems(sc->txq))
6216 			continue;
6217 
6218 		txq = &sc->txq[qid];
6219 		if (tid != txq->tid)
6220 			continue;
6221 
6222 		iwx_txq_advance(sc, txq, read_after);
6223 	}
6224 out:
6225 	iwx_free_resp(sc, &hcmd);
6226 	return err;
6227 }
6228 
6229 #define IWX_FLUSH_WAIT_MS	2000
6230 
6231 int
6232 iwx_wait_tx_queues_empty(struct iwx_softc *sc)
6233 {
6234 	int i, err;
6235 
6236 	for (i = 0; i < nitems(sc->txq); i++) {
6237 		struct iwx_tx_ring *ring = &sc->txq[i];
6238 
6239 		if (i == IWX_DQA_CMD_QUEUE)
6240 			continue;
6241 
6242 		while (ring->queued > 0) {
6243 			err = tsleep_nsec(ring, 0, "iwxflush",
6244 			    MSEC_TO_NSEC(IWX_FLUSH_WAIT_MS));
6245 			if (err)
6246 				return err;
6247 		}
6248 	}
6249 
6250 	return 0;
6251 }
6252 
6253 int
6254 iwx_drain_sta(struct iwx_softc *sc, struct iwx_node* in, int drain)
6255 {
6256 	struct iwx_add_sta_cmd cmd;
6257 	int err;
6258 	uint32_t status;
6259 
6260 	memset(&cmd, 0, sizeof(cmd));
6261 	cmd.mac_id_n_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
6262 	    in->in_color));
6263 	cmd.sta_id = IWX_STATION_ID;
6264 	cmd.add_modify = IWX_STA_MODE_MODIFY;
6265 	cmd.station_flags = drain ? htole32(IWX_STA_FLG_DRAIN_FLOW) : 0;
6266 	cmd.station_flags_msk = htole32(IWX_STA_FLG_DRAIN_FLOW);
6267 
6268 	status = IWX_ADD_STA_SUCCESS;
6269 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA,
6270 	    sizeof(cmd), &cmd, &status);
6271 	if (err) {
6272 		printf("%s: could not update sta (error %d)\n",
6273 		    DEVNAME(sc), err);
6274 		return err;
6275 	}
6276 
6277 	switch (status & IWX_ADD_STA_STATUS_MASK) {
6278 	case IWX_ADD_STA_SUCCESS:
6279 		break;
6280 	default:
6281 		err = EIO;
6282 		printf("%s: Couldn't %s draining for station\n",
6283 		    DEVNAME(sc), drain ? "enable" : "disable");
6284 		break;
6285 	}
6286 
6287 	return err;
6288 }
6289 
6290 int
6291 iwx_flush_sta(struct iwx_softc *sc, struct iwx_node *in)
6292 {
6293 	int err;
6294 
6295 	splassert(IPL_NET);
6296 
6297 	sc->sc_flags |= IWX_FLAG_TXFLUSH;
6298 
6299 	err = iwx_drain_sta(sc, in, 1);
6300 	if (err)
6301 		goto done;
6302 
6303 	err = iwx_flush_sta_tids(sc, IWX_STATION_ID, 0xffff);
6304 	if (err) {
6305 		printf("%s: could not flush Tx path (error %d)\n",
6306 		    DEVNAME(sc), err);
6307 		goto done;
6308 	}
6309 
6310 	err = iwx_wait_tx_queues_empty(sc);
6311 	if (err) {
6312 		printf("%s: Could not empty Tx queues (error %d)\n",
6313 		    DEVNAME(sc), err);
6314 		goto done;
6315 	}
6316 
6317 	err = iwx_drain_sta(sc, in, 0);
6318 done:
6319 	sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
6320 	return err;
6321 }
6322 
6323 #define IWX_POWER_KEEP_ALIVE_PERIOD_SEC    25
6324 
6325 int
6326 iwx_beacon_filter_send_cmd(struct iwx_softc *sc,
6327     struct iwx_beacon_filter_cmd *cmd)
6328 {
6329 	return iwx_send_cmd_pdu(sc, IWX_REPLY_BEACON_FILTERING_CMD,
6330 	    0, sizeof(struct iwx_beacon_filter_cmd), cmd);
6331 }
6332 
6333 int
6334 iwx_update_beacon_abort(struct iwx_softc *sc, struct iwx_node *in, int enable)
6335 {
6336 	struct iwx_beacon_filter_cmd cmd = {
6337 		IWX_BF_CMD_CONFIG_DEFAULTS,
6338 		.bf_enable_beacon_filter = htole32(1),
6339 		.ba_enable_beacon_abort = htole32(enable),
6340 	};
6341 
6342 	if (!sc->sc_bf.bf_enabled)
6343 		return 0;
6344 
6345 	sc->sc_bf.ba_enabled = enable;
6346 	return iwx_beacon_filter_send_cmd(sc, &cmd);
6347 }
6348 
6349 void
6350 iwx_power_build_cmd(struct iwx_softc *sc, struct iwx_node *in,
6351     struct iwx_mac_power_cmd *cmd)
6352 {
6353 	struct ieee80211com *ic = &sc->sc_ic;
6354 	struct ieee80211_node *ni = &in->in_ni;
6355 	int dtim_period, dtim_msec, keep_alive;
6356 
6357 	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
6358 	    in->in_color));
6359 	if (ni->ni_dtimperiod)
6360 		dtim_period = ni->ni_dtimperiod;
6361 	else
6362 		dtim_period = 1;
6363 
6364 	/*
6365 	 * Regardless of power management state the driver must set
6366 	 * keep alive period. FW will use it for sending keep alive NDPs
6367 	 * immediately after association. Check that keep alive period
6368 	 * is at least 3 * DTIM.
6369 	 */
6370 	dtim_msec = dtim_period * ni->ni_intval;
6371 	keep_alive = MAX(3 * dtim_msec, 1000 * IWX_POWER_KEEP_ALIVE_PERIOD_SEC);
6372 	keep_alive = roundup(keep_alive, 1000) / 1000;
6373 	cmd->keep_alive_seconds = htole16(keep_alive);
6374 
6375 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
6376 		cmd->flags = htole16(IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6377 }
6378 
6379 int
6380 iwx_power_mac_update_mode(struct iwx_softc *sc, struct iwx_node *in)
6381 {
6382 	int err;
6383 	int ba_enable;
6384 	struct iwx_mac_power_cmd cmd;
6385 
6386 	memset(&cmd, 0, sizeof(cmd));
6387 
6388 	iwx_power_build_cmd(sc, in, &cmd);
6389 
6390 	err = iwx_send_cmd_pdu(sc, IWX_MAC_PM_POWER_TABLE, 0,
6391 	    sizeof(cmd), &cmd);
6392 	if (err != 0)
6393 		return err;
6394 
6395 	ba_enable = !!(cmd.flags &
6396 	    htole16(IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
6397 	return iwx_update_beacon_abort(sc, in, ba_enable);
6398 }
6399 
6400 int
6401 iwx_power_update_device(struct iwx_softc *sc)
6402 {
6403 	struct iwx_device_power_cmd cmd = { };
6404 	struct ieee80211com *ic = &sc->sc_ic;
6405 
6406 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
6407 		cmd.flags = htole16(IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6408 
6409 	return iwx_send_cmd_pdu(sc,
6410 	    IWX_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
6411 }
6412 
6413 int
6414 iwx_enable_beacon_filter(struct iwx_softc *sc, struct iwx_node *in)
6415 {
6416 	struct iwx_beacon_filter_cmd cmd = {
6417 		IWX_BF_CMD_CONFIG_DEFAULTS,
6418 		.bf_enable_beacon_filter = htole32(1),
6419 		.ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled),
6420 	};
6421 	int err;
6422 
6423 	err = iwx_beacon_filter_send_cmd(sc, &cmd);
6424 	if (err == 0)
6425 		sc->sc_bf.bf_enabled = 1;
6426 
6427 	return err;
6428 }
6429 
6430 int
6431 iwx_disable_beacon_filter(struct iwx_softc *sc)
6432 {
6433 	struct iwx_beacon_filter_cmd cmd;
6434 	int err;
6435 
6436 	memset(&cmd, 0, sizeof(cmd));
6437 
6438 	err = iwx_beacon_filter_send_cmd(sc, &cmd);
6439 	if (err == 0)
6440 		sc->sc_bf.bf_enabled = 0;
6441 
6442 	return err;
6443 }
6444 
6445 int
6446 iwx_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update)
6447 {
6448 	struct iwx_add_sta_cmd add_sta_cmd;
6449 	int err;
6450 	uint32_t status, aggsize;
6451 	const uint32_t max_aggsize = (IWX_STA_FLG_MAX_AGG_SIZE_64K >>
6452 		    IWX_STA_FLG_MAX_AGG_SIZE_SHIFT);
6453 	struct ieee80211com *ic = &sc->sc_ic;
6454 
6455 	if (!update && (sc->sc_flags & IWX_FLAG_STA_ACTIVE))
6456 		panic("STA already added");
6457 
6458 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
6459 
6460 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6461 		add_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
6462 		add_sta_cmd.station_type = IWX_STA_GENERAL_PURPOSE;
6463 	} else {
6464 		add_sta_cmd.sta_id = IWX_STATION_ID;
6465 		add_sta_cmd.station_type = IWX_STA_LINK;
6466 	}
6467 	add_sta_cmd.mac_id_n_color
6468 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
6469 	if (!update) {
6470 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
6471 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
6472 			    etheranyaddr);
6473 		else
6474 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
6475 			    in->in_macaddr);
6476 	}
6477 	add_sta_cmd.add_modify = update ? 1 : 0;
6478 	add_sta_cmd.station_flags_msk
6479 	    |= htole32(IWX_STA_FLG_FAT_EN_MSK | IWX_STA_FLG_MIMO_EN_MSK);
6480 
6481 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
6482 		add_sta_cmd.station_flags_msk
6483 		    |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_MSK |
6484 		    IWX_STA_FLG_AGG_MPDU_DENS_MSK);
6485 
6486 		if (iwx_mimo_enabled(sc)) {
6487 			if (in->in_ni.ni_flags & IEEE80211_NODE_VHT) {
6488 				uint16_t rx_mcs = (in->in_ni.ni_vht_rxmcs &
6489 				    IEEE80211_VHT_MCS_FOR_SS_MASK(2)) >>
6490 				    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2);
6491 				if (rx_mcs != IEEE80211_VHT_MCS_SS_NOT_SUPP) {
6492 					add_sta_cmd.station_flags |=
6493 					    htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
6494 				}
6495 			} else {
6496 				if (in->in_ni.ni_rxmcs[1] != 0) {
6497 					add_sta_cmd.station_flags |=
6498 					    htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
6499 				}
6500 				if (in->in_ni.ni_rxmcs[2] != 0) {
6501 					add_sta_cmd.station_flags |=
6502 					    htole32(IWX_STA_FLG_MIMO_EN_MIMO3);
6503 				}
6504 			}
6505 		}
6506 
6507 		if (IEEE80211_CHAN_40MHZ_ALLOWED(in->in_ni.ni_chan) &&
6508 		    ieee80211_node_supports_ht_chan40(&in->in_ni)) {
6509 			add_sta_cmd.station_flags |= htole32(
6510 			    IWX_STA_FLG_FAT_EN_40MHZ);
6511 		}
6512 
6513 		if (in->in_ni.ni_flags & IEEE80211_NODE_VHT) {
6514 			if (IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
6515 			    ieee80211_node_supports_vht_chan80(&in->in_ni)) {
6516 				add_sta_cmd.station_flags |= htole32(
6517 				    IWX_STA_FLG_FAT_EN_80MHZ);
6518 			}
6519 			aggsize = (in->in_ni.ni_vhtcaps &
6520 			    IEEE80211_VHTCAP_MAX_AMPDU_LEN_MASK) >>
6521 			    IEEE80211_VHTCAP_MAX_AMPDU_LEN_SHIFT;
6522 		} else {
6523 			aggsize = (in->in_ni.ni_ampdu_param &
6524 			    IEEE80211_AMPDU_PARAM_LE);
6525 		}
6526 		if (aggsize > max_aggsize)
6527 			aggsize = max_aggsize;
6528 		add_sta_cmd.station_flags |= htole32((aggsize <<
6529 		    IWX_STA_FLG_MAX_AGG_SIZE_SHIFT) &
6530 		    IWX_STA_FLG_MAX_AGG_SIZE_MSK);
6531 
6532 		switch (in->in_ni.ni_ampdu_param & IEEE80211_AMPDU_PARAM_SS) {
6533 		case IEEE80211_AMPDU_PARAM_SS_2:
6534 			add_sta_cmd.station_flags
6535 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_2US);
6536 			break;
6537 		case IEEE80211_AMPDU_PARAM_SS_4:
6538 			add_sta_cmd.station_flags
6539 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_4US);
6540 			break;
6541 		case IEEE80211_AMPDU_PARAM_SS_8:
6542 			add_sta_cmd.station_flags
6543 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_8US);
6544 			break;
6545 		case IEEE80211_AMPDU_PARAM_SS_16:
6546 			add_sta_cmd.station_flags
6547 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_16US);
6548 			break;
6549 		default:
6550 			break;
6551 		}
6552 	}
6553 
6554 	status = IWX_ADD_STA_SUCCESS;
6555 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(add_sta_cmd),
6556 	    &add_sta_cmd, &status);
6557 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
6558 		err = EIO;
6559 
6560 	return err;
6561 }
6562 
6563 int
6564 iwx_rm_sta_cmd(struct iwx_softc *sc, struct iwx_node *in)
6565 {
6566 	struct ieee80211com *ic = &sc->sc_ic;
6567 	struct iwx_rm_sta_cmd rm_sta_cmd;
6568 	int err;
6569 
6570 	if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
6571 		panic("sta already removed");
6572 
6573 	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
6574 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6575 		rm_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
6576 	else
6577 		rm_sta_cmd.sta_id = IWX_STATION_ID;
6578 
6579 	err = iwx_send_cmd_pdu(sc, IWX_REMOVE_STA, 0, sizeof(rm_sta_cmd),
6580 	    &rm_sta_cmd);
6581 
6582 	return err;
6583 }
6584 
6585 int
6586 iwx_rm_sta(struct iwx_softc *sc, struct iwx_node *in)
6587 {
6588 	struct ieee80211com *ic = &sc->sc_ic;
6589 	struct ieee80211_node *ni = &in->in_ni;
6590 	int err, i;
6591 
6592 	err = iwx_flush_sta(sc, in);
6593 	if (err) {
6594 		printf("%s: could not flush Tx path (error %d)\n",
6595 		    DEVNAME(sc), err);
6596 		return err;
6597 	}
6598 	err = iwx_rm_sta_cmd(sc, in);
6599 	if (err) {
6600 		printf("%s: could not remove STA (error %d)\n",
6601 		    DEVNAME(sc), err);
6602 		return err;
6603 	}
6604 
6605 	in->in_flags = 0;
6606 
6607 	sc->sc_rx_ba_sessions = 0;
6608 	sc->ba_rx.start_tidmask = 0;
6609 	sc->ba_rx.stop_tidmask = 0;
6610 	memset(sc->aggqid, 0, sizeof(sc->aggqid));
6611 	sc->ba_tx.start_tidmask = 0;
6612 	sc->ba_tx.stop_tidmask = 0;
6613 	for (i = IWX_FIRST_AGG_TX_QUEUE; i < IWX_LAST_AGG_TX_QUEUE; i++)
6614 		sc->qenablemsk &= ~(1 << i);
6615 	for (i = 0; i < IEEE80211_NUM_TID; i++) {
6616 		struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
6617 		if (ba->ba_state != IEEE80211_BA_AGREED)
6618 			continue;
6619 		ieee80211_delba_request(ic, ni, 0, 1, i);
6620 	}
6621 
6622 	return 0;
6623 }
6624 
6625 uint8_t
6626 iwx_umac_scan_fill_channels(struct iwx_softc *sc,
6627     struct iwx_scan_channel_cfg_umac *chan, size_t chan_nitems,
6628     int n_ssids, int bgscan)
6629 {
6630 	struct ieee80211com *ic = &sc->sc_ic;
6631 	struct ieee80211_channel *c;
6632 	uint8_t nchan;
6633 
6634 	for (nchan = 0, c = &ic->ic_channels[1];
6635 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
6636 	    nchan < chan_nitems &&
6637 	    nchan < sc->sc_capa_n_scan_channels;
6638 	    c++) {
6639 		uint8_t channel_num;
6640 
6641 		if (c->ic_flags == 0)
6642 			continue;
6643 
6644 		channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
6645 		if (isset(sc->sc_ucode_api,
6646 		    IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
6647 			chan->v2.channel_num = channel_num;
6648 			if (IEEE80211_IS_CHAN_2GHZ(c))
6649 				chan->v2.band = IWX_PHY_BAND_24;
6650 			else
6651 				chan->v2.band = IWX_PHY_BAND_5;
6652 			chan->v2.iter_count = 1;
6653 			chan->v2.iter_interval = 0;
6654 		} else {
6655 			chan->v1.channel_num = channel_num;
6656 			chan->v1.iter_count = 1;
6657 			chan->v1.iter_interval = htole16(0);
6658 		}
6659 		if (n_ssids != 0 && !bgscan)
6660 			chan->flags = htole32(1 << 0); /* select SSID 0 */
6661 		chan++;
6662 		nchan++;
6663 	}
6664 
6665 	return nchan;
6666 }
6667 
6668 int
6669 iwx_fill_probe_req(struct iwx_softc *sc, struct iwx_scan_probe_req *preq)
6670 {
6671 	struct ieee80211com *ic = &sc->sc_ic;
6672 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
6673 	struct ieee80211_rateset *rs;
6674 	size_t remain = sizeof(preq->buf);
6675 	uint8_t *frm, *pos;
6676 
6677 	memset(preq, 0, sizeof(*preq));
6678 
6679 	if (remain < sizeof(*wh) + 2)
6680 		return ENOBUFS;
6681 
6682 	/*
6683 	 * Build a probe request frame.  Most of the following code is a
6684 	 * copy & paste of what is done in net80211.
6685 	 */
6686 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
6687 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
6688 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
6689 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
6690 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
6691 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
6692 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
6693 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
6694 
6695 	frm = (uint8_t *)(wh + 1);
6696 	*frm++ = IEEE80211_ELEMID_SSID;
6697 	*frm++ = 0;
6698 	/* hardware inserts SSID */
6699 
6700 	/* Tell the firmware where the MAC header is. */
6701 	preq->mac_header.offset = 0;
6702 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
6703 	remain -= frm - (uint8_t *)wh;
6704 
6705 	/* Fill in 2GHz IEs and tell firmware where they are. */
6706 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
6707 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6708 		if (remain < 4 + rs->rs_nrates)
6709 			return ENOBUFS;
6710 	} else if (remain < 2 + rs->rs_nrates)
6711 		return ENOBUFS;
6712 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
6713 	pos = frm;
6714 	frm = ieee80211_add_rates(frm, rs);
6715 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6716 		frm = ieee80211_add_xrates(frm, rs);
6717 	remain -= frm - pos;
6718 
6719 	if (isset(sc->sc_enabled_capa,
6720 	    IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
6721 		if (remain < 3)
6722 			return ENOBUFS;
6723 		*frm++ = IEEE80211_ELEMID_DSPARMS;
6724 		*frm++ = 1;
6725 		*frm++ = 0;
6726 		remain -= 3;
6727 	}
6728 	preq->band_data[0].len = htole16(frm - pos);
6729 
6730 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
6731 		/* Fill in 5GHz IEs. */
6732 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
6733 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6734 			if (remain < 4 + rs->rs_nrates)
6735 				return ENOBUFS;
6736 		} else if (remain < 2 + rs->rs_nrates)
6737 			return ENOBUFS;
6738 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
6739 		pos = frm;
6740 		frm = ieee80211_add_rates(frm, rs);
6741 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6742 			frm = ieee80211_add_xrates(frm, rs);
6743 		preq->band_data[1].len = htole16(frm - pos);
6744 		remain -= frm - pos;
6745 		if (ic->ic_flags & IEEE80211_F_VHTON) {
6746 			if (remain < 14)
6747 				return ENOBUFS;
6748 			frm = ieee80211_add_vhtcaps(frm, ic);
6749 			remain -= frm - pos;
6750 		}
6751 	}
6752 
6753 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
6754 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
6755 	pos = frm;
6756 	if (ic->ic_flags & IEEE80211_F_HTON) {
6757 		if (remain < 28)
6758 			return ENOBUFS;
6759 		frm = ieee80211_add_htcaps(frm, ic);
6760 		/* XXX add WME info? */
6761 		remain -= frm - pos;
6762 	}
6763 
6764 	preq->common_data.len = htole16(frm - pos);
6765 
6766 	return 0;
6767 }
6768 
6769 int
6770 iwx_config_umac_scan_reduced(struct iwx_softc *sc)
6771 {
6772 	struct iwx_scan_config scan_cfg;
6773 	struct iwx_host_cmd hcmd = {
6774 		.id = iwx_cmd_id(IWX_SCAN_CFG_CMD, IWX_LONG_GROUP, 0),
6775 		.len[0] = sizeof(scan_cfg),
6776 		.data[0] = &scan_cfg,
6777 		.flags = 0,
6778 	};
6779 	int cmdver;
6780 
6781 	if (!isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG)) {
6782 		printf("%s: firmware does not support reduced scan config\n",
6783 		    DEVNAME(sc));
6784 		return ENOTSUP;
6785 	}
6786 
6787 	memset(&scan_cfg, 0, sizeof(scan_cfg));
6788 
6789 	/*
6790 	 * SCAN_CFG version >= 5 implies that the broadcast
6791 	 * STA ID field is deprecated.
6792 	 */
6793 	cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_SCAN_CFG_CMD);
6794 	if (cmdver == IWX_FW_CMD_VER_UNKNOWN || cmdver < 5)
6795 		scan_cfg.bcast_sta_id = 0xff;
6796 
6797 	scan_cfg.tx_chains = htole32(iwx_fw_valid_tx_ant(sc));
6798 	scan_cfg.rx_chains = htole32(iwx_fw_valid_rx_ant(sc));
6799 
6800 	return iwx_send_cmd(sc, &hcmd);
6801 }
6802 
6803 uint16_t
6804 iwx_scan_umac_flags_v2(struct iwx_softc *sc, int bgscan)
6805 {
6806 	struct ieee80211com *ic = &sc->sc_ic;
6807 	uint16_t flags = 0;
6808 
6809 	if (ic->ic_des_esslen == 0)
6810 		flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
6811 
6812 	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
6813 	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
6814 	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
6815 
6816 	return flags;
6817 }
6818 
6819 #define IWX_SCAN_DWELL_ACTIVE		10
6820 #define IWX_SCAN_DWELL_PASSIVE		110
6821 
6822 /* adaptive dwell max budget time [TU] for full scan */
6823 #define IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
6824 /* adaptive dwell max budget time [TU] for directed scan */
6825 #define IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
6826 /* adaptive dwell default high band APs number */
6827 #define IWX_SCAN_ADWELL_DEFAULT_HB_N_APS 8
6828 /* adaptive dwell default low band APs number */
6829 #define IWX_SCAN_ADWELL_DEFAULT_LB_N_APS 2
6830 /* adaptive dwell default APs number in social channels (1, 6, 11) */
6831 #define IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
6832 /* adaptive dwell number of APs override for p2p friendly GO channels */
6833 #define IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY 10
6834 /* adaptive dwell number of APs override for social channels */
6835 #define IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
6836 
6837 void
6838 iwx_scan_umac_dwell_v10(struct iwx_softc *sc,
6839     struct iwx_scan_general_params_v10 *general_params, int bgscan)
6840 {
6841 	uint32_t suspend_time, max_out_time;
6842 	uint8_t active_dwell, passive_dwell;
6843 
6844 	active_dwell = IWX_SCAN_DWELL_ACTIVE;
6845 	passive_dwell = IWX_SCAN_DWELL_PASSIVE;
6846 
6847 	general_params->adwell_default_social_chn =
6848 		IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
6849 	general_params->adwell_default_2g = IWX_SCAN_ADWELL_DEFAULT_LB_N_APS;
6850 	general_params->adwell_default_5g = IWX_SCAN_ADWELL_DEFAULT_HB_N_APS;
6851 
6852 	if (bgscan)
6853 		general_params->adwell_max_budget =
6854 			htole16(IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
6855 	else
6856 		general_params->adwell_max_budget =
6857 			htole16(IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
6858 
6859 	general_params->scan_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
6860 	if (bgscan) {
6861 		max_out_time = htole32(120);
6862 		suspend_time = htole32(120);
6863 	} else {
6864 		max_out_time = htole32(0);
6865 		suspend_time = htole32(0);
6866 	}
6867 	general_params->max_out_of_time[IWX_SCAN_LB_LMAC_IDX] =
6868 		htole32(max_out_time);
6869 	general_params->suspend_time[IWX_SCAN_LB_LMAC_IDX] =
6870 		htole32(suspend_time);
6871 	general_params->max_out_of_time[IWX_SCAN_HB_LMAC_IDX] =
6872 		htole32(max_out_time);
6873 	general_params->suspend_time[IWX_SCAN_HB_LMAC_IDX] =
6874 		htole32(suspend_time);
6875 
6876 	general_params->active_dwell[IWX_SCAN_LB_LMAC_IDX] = active_dwell;
6877 	general_params->passive_dwell[IWX_SCAN_LB_LMAC_IDX] = passive_dwell;
6878 	general_params->active_dwell[IWX_SCAN_HB_LMAC_IDX] = active_dwell;
6879 	general_params->passive_dwell[IWX_SCAN_HB_LMAC_IDX] = passive_dwell;
6880 }
6881 
6882 void
6883 iwx_scan_umac_fill_general_p_v10(struct iwx_softc *sc,
6884     struct iwx_scan_general_params_v10 *gp, uint16_t gen_flags, int bgscan)
6885 {
6886 	iwx_scan_umac_dwell_v10(sc, gp, bgscan);
6887 
6888 	gp->flags = htole16(gen_flags);
6889 
6890 	if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
6891 		gp->num_of_fragments[IWX_SCAN_LB_LMAC_IDX] = 3;
6892 	if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
6893 		gp->num_of_fragments[IWX_SCAN_HB_LMAC_IDX] = 3;
6894 
6895 	gp->scan_start_mac_id = 0;
6896 }
6897 
6898 void
6899 iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *sc,
6900     struct iwx_scan_channel_params_v6 *cp, uint32_t channel_cfg_flags,
6901     int n_ssid, int bgscan)
6902 {
6903 	cp->flags = IWX_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
6904 
6905 	cp->count = iwx_umac_scan_fill_channels(sc, cp->channel_config,
6906 	    nitems(cp->channel_config), n_ssid, bgscan);
6907 
6908 	cp->n_aps_override[0] = IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY;
6909 	cp->n_aps_override[1] = IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS;
6910 }
6911 
6912 int
6913 iwx_umac_scan_v14(struct iwx_softc *sc, int bgscan)
6914 {
6915 	struct ieee80211com *ic = &sc->sc_ic;
6916 	struct iwx_host_cmd hcmd = {
6917 		.id = iwx_cmd_id(IWX_SCAN_REQ_UMAC, IWX_LONG_GROUP, 0),
6918 		.len = { 0, },
6919 		.data = { NULL, },
6920 		.flags = 0,
6921 	};
6922 	struct iwx_scan_req_umac_v14 *cmd;
6923 	struct iwx_scan_req_params_v14 *scan_p;
6924 	int err, async = bgscan, n_ssid = 0;
6925 	uint16_t gen_flags;
6926 	uint32_t bitmap_ssid = 0;
6927 
6928 	cmd = malloc(sizeof(*cmd), M_DEVBUF,
6929 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
6930 	if (cmd == NULL)
6931 		return ENOMEM;
6932 
6933 	scan_p = &cmd->scan_params;
6934 
6935 	cmd->ooc_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
6936 	cmd->uid = htole32(0);
6937 
6938 	gen_flags = iwx_scan_umac_flags_v2(sc, bgscan);
6939 	iwx_scan_umac_fill_general_p_v10(sc, &scan_p->general_params,
6940 	    gen_flags, bgscan);
6941 
6942 	scan_p->periodic_params.schedule[0].interval = htole16(0);
6943 	scan_p->periodic_params.schedule[0].iter_count = 1;
6944 
6945 	err = iwx_fill_probe_req(sc, &scan_p->probe_params.preq);
6946 	if (err) {
6947 		free(cmd, M_DEVBUF, sizeof(*cmd));
6948 		return err;
6949 	}
6950 
6951 	if (ic->ic_des_esslen != 0) {
6952 		scan_p->probe_params.direct_scan[0].id = IEEE80211_ELEMID_SSID;
6953 		scan_p->probe_params.direct_scan[0].len = ic->ic_des_esslen;
6954 		memcpy(scan_p->probe_params.direct_scan[0].ssid,
6955 		    ic->ic_des_essid, ic->ic_des_esslen);
6956 		bitmap_ssid |= (1 << 0);
6957 		n_ssid = 1;
6958 	}
6959 
6960 	iwx_scan_umac_fill_ch_p_v6(sc, &scan_p->channel_params, bitmap_ssid,
6961 	    n_ssid, bgscan);
6962 
6963 	hcmd.len[0] = sizeof(*cmd);
6964 	hcmd.data[0] = (void *)cmd;
6965 	hcmd.flags |= async ? IWX_CMD_ASYNC : 0;
6966 
6967 	err = iwx_send_cmd(sc, &hcmd);
6968 	free(cmd, M_DEVBUF, sizeof(*cmd));
6969 	return err;
6970 }
6971 
6972 void
6973 iwx_mcc_update(struct iwx_softc *sc, struct iwx_mcc_chub_notif *notif)
6974 {
6975 	struct ieee80211com *ic = &sc->sc_ic;
6976 	struct ifnet *ifp = IC2IFP(ic);
6977 	char alpha2[3];
6978 
6979 	snprintf(alpha2, sizeof(alpha2), "%c%c",
6980 	    (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
6981 
6982 	if (ifp->if_flags & IFF_DEBUG) {
6983 		printf("%s: firmware has detected regulatory domain '%s' "
6984 		    "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
6985 	}
6986 
6987 	/* TODO: Schedule a task to send MCC_UPDATE_CMD? */
6988 }
6989 
6990 uint8_t
6991 iwx_ridx2rate(struct ieee80211_rateset *rs, int ridx)
6992 {
6993 	int i;
6994 	uint8_t rval;
6995 
6996 	for (i = 0; i < rs->rs_nrates; i++) {
6997 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
6998 		if (rval == iwx_rates[ridx].rate)
6999 			return rs->rs_rates[i];
7000 	}
7001 
7002 	return 0;
7003 }
7004 
7005 int
7006 iwx_rval2ridx(int rval)
7007 {
7008 	int ridx;
7009 
7010 	for (ridx = 0; ridx < nitems(iwx_rates); ridx++) {
7011 		if (iwx_rates[ridx].plcp == IWX_RATE_INVM_PLCP)
7012 			continue;
7013 		if (rval == iwx_rates[ridx].rate)
7014 			break;
7015 	}
7016 
7017        return ridx;
7018 }
7019 
7020 void
7021 iwx_ack_rates(struct iwx_softc *sc, struct iwx_node *in, int *cck_rates,
7022     int *ofdm_rates)
7023 {
7024 	struct ieee80211_node *ni = &in->in_ni;
7025 	struct ieee80211_rateset *rs = &ni->ni_rates;
7026 	int lowest_present_ofdm = -1;
7027 	int lowest_present_cck = -1;
7028 	uint8_t cck = 0;
7029 	uint8_t ofdm = 0;
7030 	int i;
7031 
7032 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
7033 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
7034 		for (i = IWX_FIRST_CCK_RATE; i < IWX_FIRST_OFDM_RATE; i++) {
7035 			if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
7036 				continue;
7037 			cck |= (1 << i);
7038 			if (lowest_present_cck == -1 || lowest_present_cck > i)
7039 				lowest_present_cck = i;
7040 		}
7041 	}
7042 	for (i = IWX_FIRST_OFDM_RATE; i <= IWX_LAST_NON_HT_RATE; i++) {
7043 		if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
7044 			continue;
7045 		ofdm |= (1 << (i - IWX_FIRST_OFDM_RATE));
7046 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
7047 			lowest_present_ofdm = i;
7048 	}
7049 
7050 	/*
7051 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
7052 	 * variables. This isn't sufficient though, as there might not
7053 	 * be all the right rates in the bitmap. E.g. if the only basic
7054 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
7055 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
7056 	 *
7057 	 *    [...] a STA responding to a received frame shall transmit
7058 	 *    its Control Response frame [...] at the highest rate in the
7059 	 *    BSSBasicRateSet parameter that is less than or equal to the
7060 	 *    rate of the immediately previous frame in the frame exchange
7061 	 *    sequence ([...]) and that is of the same modulation class
7062 	 *    ([...]) as the received frame. If no rate contained in the
7063 	 *    BSSBasicRateSet parameter meets these conditions, then the
7064 	 *    control frame sent in response to a received frame shall be
7065 	 *    transmitted at the highest mandatory rate of the PHY that is
7066 	 *    less than or equal to the rate of the received frame, and
7067 	 *    that is of the same modulation class as the received frame.
7068 	 *
7069 	 * As a consequence, we need to add all mandatory rates that are
7070 	 * lower than all of the basic rates to these bitmaps.
7071 	 */
7072 
7073 	if (IWX_RATE_24M_INDEX < lowest_present_ofdm)
7074 		ofdm |= IWX_RATE_BIT_MSK(24) >> IWX_FIRST_OFDM_RATE;
7075 	if (IWX_RATE_12M_INDEX < lowest_present_ofdm)
7076 		ofdm |= IWX_RATE_BIT_MSK(12) >> IWX_FIRST_OFDM_RATE;
7077 	/* 6M already there or needed so always add */
7078 	ofdm |= IWX_RATE_BIT_MSK(6) >> IWX_FIRST_OFDM_RATE;
7079 
7080 	/*
7081 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
7082 	 * Note, however:
7083 	 *  - if no CCK rates are basic, it must be ERP since there must
7084 	 *    be some basic rates at all, so they're OFDM => ERP PHY
7085 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
7086 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
7087 	 *  - if 5.5M is basic, 1M and 2M are mandatory
7088 	 *  - if 2M is basic, 1M is mandatory
7089 	 *  - if 1M is basic, that's the only valid ACK rate.
7090 	 * As a consequence, it's not as complicated as it sounds, just add
7091 	 * any lower rates to the ACK rate bitmap.
7092 	 */
7093 	if (IWX_RATE_11M_INDEX < lowest_present_cck)
7094 		cck |= IWX_RATE_BIT_MSK(11) >> IWX_FIRST_CCK_RATE;
7095 	if (IWX_RATE_5M_INDEX < lowest_present_cck)
7096 		cck |= IWX_RATE_BIT_MSK(5) >> IWX_FIRST_CCK_RATE;
7097 	if (IWX_RATE_2M_INDEX < lowest_present_cck)
7098 		cck |= IWX_RATE_BIT_MSK(2) >> IWX_FIRST_CCK_RATE;
7099 	/* 1M already there or needed so always add */
7100 	cck |= IWX_RATE_BIT_MSK(1) >> IWX_FIRST_CCK_RATE;
7101 
7102 	*cck_rates = cck;
7103 	*ofdm_rates = ofdm;
7104 }
7105 
7106 void
7107 iwx_mac_ctxt_cmd_common(struct iwx_softc *sc, struct iwx_node *in,
7108     struct iwx_mac_ctx_cmd *cmd, uint32_t action)
7109 {
7110 #define IWX_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
7111 	struct ieee80211com *ic = &sc->sc_ic;
7112 	struct ieee80211_node *ni = ic->ic_bss;
7113 	int cck_ack_rates, ofdm_ack_rates;
7114 	int i;
7115 
7116 	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
7117 	    in->in_color));
7118 	cmd->action = htole32(action);
7119 
7120 	if (action == IWX_FW_CTXT_ACTION_REMOVE)
7121 		return;
7122 
7123 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7124 		cmd->mac_type = htole32(IWX_FW_MAC_TYPE_LISTENER);
7125 	else if (ic->ic_opmode == IEEE80211_M_STA)
7126 		cmd->mac_type = htole32(IWX_FW_MAC_TYPE_BSS_STA);
7127 	else
7128 		panic("unsupported operating mode %d", ic->ic_opmode);
7129 	cmd->tsf_id = htole32(IWX_TSF_ID_A);
7130 
7131 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
7132 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7133 		IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
7134 		return;
7135 	}
7136 
7137 	IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr);
7138 	iwx_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
7139 	cmd->cck_rates = htole32(cck_ack_rates);
7140 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
7141 
7142 	cmd->cck_short_preamble
7143 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
7144 	      ? IWX_MAC_FLG_SHORT_PREAMBLE : 0);
7145 	cmd->short_slot
7146 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
7147 	      ? IWX_MAC_FLG_SHORT_SLOT : 0);
7148 
7149 	for (i = 0; i < EDCA_NUM_AC; i++) {
7150 		struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
7151 		int txf = iwx_ac_to_tx_fifo[i];
7152 
7153 		cmd->ac[txf].cw_min = htole16(IWX_EXP2(ac->ac_ecwmin));
7154 		cmd->ac[txf].cw_max = htole16(IWX_EXP2(ac->ac_ecwmax));
7155 		cmd->ac[txf].aifsn = ac->ac_aifsn;
7156 		cmd->ac[txf].fifos_mask = (1 << txf);
7157 		cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32);
7158 	}
7159 	if (ni->ni_flags & IEEE80211_NODE_QOS)
7160 		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_UPDATE_EDCA);
7161 
7162 	if (ni->ni_flags & IEEE80211_NODE_HT) {
7163 		enum ieee80211_htprot htprot =
7164 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
7165 		switch (htprot) {
7166 		case IEEE80211_HTPROT_NONE:
7167 			break;
7168 		case IEEE80211_HTPROT_NONMEMBER:
7169 		case IEEE80211_HTPROT_NONHT_MIXED:
7170 			cmd->protection_flags |=
7171 			    htole32(IWX_MAC_PROT_FLG_HT_PROT |
7172 			    IWX_MAC_PROT_FLG_FAT_PROT);
7173 			break;
7174 		case IEEE80211_HTPROT_20MHZ:
7175 			if (in->in_phyctxt &&
7176 			    (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7177 			    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)) {
7178 				cmd->protection_flags |=
7179 				    htole32(IWX_MAC_PROT_FLG_HT_PROT |
7180 				    IWX_MAC_PROT_FLG_FAT_PROT);
7181 			}
7182 			break;
7183 		default:
7184 			break;
7185 		}
7186 
7187 		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_TGN);
7188 	}
7189 	if (ic->ic_flags & IEEE80211_F_USEPROT)
7190 		cmd->protection_flags |= htole32(IWX_MAC_PROT_FLG_TGG_PROTECT);
7191 
7192 	cmd->filter_flags = htole32(IWX_MAC_FILTER_ACCEPT_GRP);
7193 #undef IWX_EXP2
7194 }
7195 
7196 void
7197 iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *sc, struct iwx_node *in,
7198     struct iwx_mac_data_sta *sta, int assoc)
7199 {
7200 	struct ieee80211_node *ni = &in->in_ni;
7201 	uint32_t dtim_off;
7202 	uint64_t tsf;
7203 
7204 	dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU;
7205 	memcpy(&tsf, ni->ni_tstamp, sizeof(tsf));
7206 	tsf = letoh64(tsf);
7207 
7208 	sta->is_assoc = htole32(assoc);
7209 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
7210 	sta->dtim_tsf = htole64(tsf + dtim_off);
7211 	sta->bi = htole32(ni->ni_intval);
7212 	sta->bi_reciprocal = htole32(iwx_reciprocal(ni->ni_intval));
7213 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod);
7214 	sta->dtim_reciprocal = htole32(iwx_reciprocal(sta->dtim_interval));
7215 	sta->listen_interval = htole32(10);
7216 	sta->assoc_id = htole32(ni->ni_associd);
7217 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
7218 }
7219 
7220 int
7221 iwx_mac_ctxt_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action,
7222     int assoc)
7223 {
7224 	struct ieee80211com *ic = &sc->sc_ic;
7225 	struct ieee80211_node *ni = &in->in_ni;
7226 	struct iwx_mac_ctx_cmd cmd;
7227 	int active = (sc->sc_flags & IWX_FLAG_MAC_ACTIVE);
7228 
7229 	if (action == IWX_FW_CTXT_ACTION_ADD && active)
7230 		panic("MAC already added");
7231 	if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
7232 		panic("MAC already removed");
7233 
7234 	memset(&cmd, 0, sizeof(cmd));
7235 
7236 	iwx_mac_ctxt_cmd_common(sc, in, &cmd, action);
7237 
7238 	if (action == IWX_FW_CTXT_ACTION_REMOVE) {
7239 		return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0,
7240 		    sizeof(cmd), &cmd);
7241 	}
7242 
7243 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7244 		cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_PROMISC |
7245 		    IWX_MAC_FILTER_IN_CONTROL_AND_MGMT |
7246 		    IWX_MAC_FILTER_ACCEPT_GRP |
7247 		    IWX_MAC_FILTER_IN_BEACON |
7248 		    IWX_MAC_FILTER_IN_PROBE_REQUEST |
7249 		    IWX_MAC_FILTER_IN_CRC32);
7250 	} else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod)
7251 		/*
7252 		 * Allow beacons to pass through as long as we are not
7253 		 * associated or we do not have dtim period information.
7254 		 */
7255 		cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_BEACON);
7256 	else
7257 		iwx_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
7258 
7259 	return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
7260 }
7261 
7262 int
7263 iwx_clear_statistics(struct iwx_softc *sc)
7264 {
7265 	struct iwx_statistics_cmd scmd = {
7266 		.flags = htole32(IWX_STATISTICS_FLG_CLEAR)
7267 	};
7268 	struct iwx_host_cmd cmd = {
7269 		.id = IWX_STATISTICS_CMD,
7270 		.len[0] = sizeof(scmd),
7271 		.data[0] = &scmd,
7272 		.flags = IWX_CMD_WANT_RESP,
7273 		.resp_pkt_len = sizeof(struct iwx_notif_statistics),
7274 	};
7275 	int err;
7276 
7277 	err = iwx_send_cmd(sc, &cmd);
7278 	if (err)
7279 		return err;
7280 
7281 	iwx_free_resp(sc, &cmd);
7282 	return 0;
7283 }
7284 
7285 void
7286 iwx_add_task(struct iwx_softc *sc, struct taskq *taskq, struct task *task)
7287 {
7288 	int s = splnet();
7289 
7290 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
7291 		splx(s);
7292 		return;
7293 	}
7294 
7295 	refcnt_take(&sc->task_refs);
7296 	if (!task_add(taskq, task))
7297 		refcnt_rele_wake(&sc->task_refs);
7298 	splx(s);
7299 }
7300 
7301 void
7302 iwx_del_task(struct iwx_softc *sc, struct taskq *taskq, struct task *task)
7303 {
7304 	if (task_del(taskq, task))
7305 		refcnt_rele(&sc->task_refs);
7306 }
7307 
7308 int
7309 iwx_scan(struct iwx_softc *sc)
7310 {
7311 	struct ieee80211com *ic = &sc->sc_ic;
7312 	struct ifnet *ifp = IC2IFP(ic);
7313 	int err;
7314 
7315 	if (sc->sc_flags & IWX_FLAG_BGSCAN) {
7316 		err = iwx_scan_abort(sc);
7317 		if (err) {
7318 			printf("%s: could not abort background scan\n",
7319 			    DEVNAME(sc));
7320 			return err;
7321 		}
7322 	}
7323 
7324 	err = iwx_umac_scan_v14(sc, 0);
7325 	if (err) {
7326 		printf("%s: could not initiate scan\n", DEVNAME(sc));
7327 		return err;
7328 	}
7329 
7330 	/*
7331 	 * The current mode might have been fixed during association.
7332 	 * Ensure all channels get scanned.
7333 	 */
7334 	if (IFM_MODE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
7335 		ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
7336 
7337 	sc->sc_flags |= IWX_FLAG_SCANNING;
7338 	if (ifp->if_flags & IFF_DEBUG)
7339 		printf("%s: %s -> %s\n", ifp->if_xname,
7340 		    ieee80211_state_name[ic->ic_state],
7341 		    ieee80211_state_name[IEEE80211_S_SCAN]);
7342 	if ((sc->sc_flags & IWX_FLAG_BGSCAN) == 0) {
7343 		ieee80211_set_link_state(ic, LINK_STATE_DOWN);
7344 		ieee80211_node_cleanup(ic, ic->ic_bss);
7345 	}
7346 	ic->ic_state = IEEE80211_S_SCAN;
7347 	wakeup(&ic->ic_state); /* wake iwx_init() */
7348 
7349 	return 0;
7350 }
7351 
7352 int
7353 iwx_bgscan(struct ieee80211com *ic)
7354 {
7355 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
7356 	int err;
7357 
7358 	if (sc->sc_flags & IWX_FLAG_SCANNING)
7359 		return 0;
7360 
7361 	err = iwx_umac_scan_v14(sc, 1);
7362 	if (err) {
7363 		printf("%s: could not initiate scan\n", DEVNAME(sc));
7364 		return err;
7365 	}
7366 
7367 	sc->sc_flags |= IWX_FLAG_BGSCAN;
7368 	return 0;
7369 }
7370 
7371 void
7372 iwx_bgscan_done(struct ieee80211com *ic,
7373     struct ieee80211_node_switch_bss_arg *arg, size_t arg_size)
7374 {
7375 	struct iwx_softc *sc = ic->ic_softc;
7376 
7377 	free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
7378 	sc->bgscan_unref_arg = arg;
7379 	sc->bgscan_unref_arg_size = arg_size;
7380 	iwx_add_task(sc, sc->sc_nswq, &sc->bgscan_done_task);
7381 }
7382 
7383 void
7384 iwx_bgscan_done_task(void *arg)
7385 {
7386 	struct iwx_softc *sc = arg;
7387 	struct ieee80211com *ic = &sc->sc_ic;
7388 	struct iwx_node *in = (void *)ic->ic_bss;
7389 	struct ieee80211_node *ni = &in->in_ni;
7390 	int tid, err = 0, s = splnet();
7391 
7392 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) ||
7393 	    (ic->ic_flags & IEEE80211_F_BGSCAN) == 0 ||
7394 	    ic->ic_state != IEEE80211_S_RUN) {
7395 		err = ENXIO;
7396 		goto done;
7397 	}
7398 
7399 	err = iwx_flush_sta(sc, in);
7400 	if (err)
7401 		goto done;
7402 
7403 	for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
7404 		int qid = IWX_FIRST_AGG_TX_QUEUE + tid;
7405 
7406 		if (sc->aggqid[tid] == 0)
7407 			continue;
7408 
7409 		err = iwx_disable_txq(sc, IWX_STATION_ID, qid, tid);
7410 		if (err)
7411 			goto done;
7412 #if 0 /* disabled for now; we are going to DEAUTH soon anyway */
7413 		IEEE80211_SEND_ACTION(ic, ni, IEEE80211_CATEG_BA,
7414 		    IEEE80211_ACTION_DELBA,
7415 		    IEEE80211_REASON_AUTH_LEAVE << 16 |
7416 		    IEEE80211_FC1_DIR_TODS << 8 | tid);
7417 #endif
7418 		ieee80211_node_tx_ba_clear(ni, tid);
7419 		sc->aggqid[tid] = 0;
7420 	}
7421 
7422 	/*
7423 	 * Tx queues have been flushed and Tx agg has been stopped.
7424 	 * Allow roaming to proceed.
7425 	 */
7426 	ni->ni_unref_arg = sc->bgscan_unref_arg;
7427 	ni->ni_unref_arg_size = sc->bgscan_unref_arg_size;
7428 	sc->bgscan_unref_arg = NULL;
7429 	sc->bgscan_unref_arg_size = 0;
7430 	ieee80211_node_tx_stopped(ic, &in->in_ni);
7431 done:
7432 	if (err) {
7433 		free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
7434 		sc->bgscan_unref_arg = NULL;
7435 		sc->bgscan_unref_arg_size = 0;
7436 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
7437 			task_add(systq, &sc->init_task);
7438 	}
7439 	refcnt_rele_wake(&sc->task_refs);
7440 	splx(s);
7441 }
7442 
7443 int
7444 iwx_umac_scan_abort(struct iwx_softc *sc)
7445 {
7446 	struct iwx_umac_scan_abort cmd = { 0 };
7447 
7448 	return iwx_send_cmd_pdu(sc,
7449 	    IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC),
7450 	    0, sizeof(cmd), &cmd);
7451 }
7452 
7453 int
7454 iwx_scan_abort(struct iwx_softc *sc)
7455 {
7456 	int err;
7457 
7458 	err = iwx_umac_scan_abort(sc);
7459 	if (err == 0)
7460 		sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
7461 	return err;
7462 }
7463 
7464 int
7465 iwx_enable_mgmt_queue(struct iwx_softc *sc)
7466 {
7467 	int err;
7468 
7469 	sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
7470 
7471 	/*
7472 	 * Non-QoS frames use the "MGMT" TID and queue.
7473 	 * Other TIDs and data queues are reserved for QoS data frames.
7474 	 */
7475 	err = iwx_enable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
7476 	    IWX_MGMT_TID, IWX_TX_RING_COUNT);
7477 	if (err) {
7478 		printf("%s: could not enable Tx queue %d (error %d)\n",
7479 		    DEVNAME(sc), sc->first_data_qid, err);
7480 		return err;
7481 	}
7482 
7483 	return 0;
7484 }
7485 
7486 int
7487 iwx_rs_rval2idx(uint8_t rval)
7488 {
7489 	/* Firmware expects indices which match our 11g rate set. */
7490 	const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11g;
7491 	int i;
7492 
7493 	for (i = 0; i < rs->rs_nrates; i++) {
7494 		if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
7495 			return i;
7496 	}
7497 
7498 	return -1;
7499 }
7500 
7501 uint16_t
7502 iwx_rs_ht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int rsidx)
7503 {
7504 	struct ieee80211com *ic = &sc->sc_ic;
7505 	const struct ieee80211_ht_rateset *rs;
7506 	uint16_t htrates = 0;
7507 	int mcs;
7508 
7509 	rs = &ieee80211_std_ratesets_11n[rsidx];
7510 	for (mcs = rs->min_mcs; mcs <= rs->max_mcs; mcs++) {
7511 		if (!isset(ni->ni_rxmcs, mcs) ||
7512 		    !isset(ic->ic_sup_mcs, mcs))
7513 			continue;
7514 		htrates |= (1 << (mcs - rs->min_mcs));
7515 	}
7516 
7517 	return htrates;
7518 }
7519 
7520 uint16_t
7521 iwx_rs_vht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int num_ss)
7522 {
7523 	uint16_t rx_mcs;
7524 	int max_mcs = -1;
7525 
7526 	rx_mcs = (ni->ni_vht_rxmcs & IEEE80211_VHT_MCS_FOR_SS_MASK(num_ss)) >>
7527 	    IEEE80211_VHT_MCS_FOR_SS_SHIFT(num_ss);
7528 	switch (rx_mcs) {
7529 	case IEEE80211_VHT_MCS_SS_NOT_SUPP:
7530 		break;
7531 	case IEEE80211_VHT_MCS_0_7:
7532 		max_mcs = 7;
7533 		break;
7534 	case IEEE80211_VHT_MCS_0_8:
7535 		max_mcs = 8;
7536 		break;
7537 	case IEEE80211_VHT_MCS_0_9:
7538 		/* Disable VHT MCS 9 for 20MHz-only stations. */
7539 		if (!ieee80211_node_supports_ht_chan40(ni))
7540 			max_mcs = 8;
7541 		else
7542 			max_mcs = 9;
7543 		break;
7544 	default:
7545 		/* Should not happen; Values above cover the possible range. */
7546 		panic("invalid VHT Rx MCS value %u", rx_mcs);
7547 	}
7548 
7549 	return ((1 << (max_mcs + 1)) - 1);
7550 }
7551 
7552 int
7553 iwx_rs_init(struct iwx_softc *sc, struct iwx_node *in)
7554 {
7555 	struct ieee80211_node *ni = &in->in_ni;
7556 	struct ieee80211_rateset *rs = &ni->ni_rates;
7557 	struct iwx_tlc_config_cmd cfg_cmd;
7558 	uint32_t cmd_id;
7559 	int i;
7560 	size_t cmd_size = sizeof(cfg_cmd);
7561 
7562 	memset(&cfg_cmd, 0, sizeof(cfg_cmd));
7563 
7564 	for (i = 0; i < rs->rs_nrates; i++) {
7565 		uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7566 		int idx = iwx_rs_rval2idx(rval);
7567 		if (idx == -1)
7568 			return EINVAL;
7569 		cfg_cmd.non_ht_rates |= (1 << idx);
7570 	}
7571 
7572 	if (ni->ni_flags & IEEE80211_NODE_VHT) {
7573 		cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
7574 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_HT_BW_NONE_160] =
7575 		    htole16(iwx_rs_vht_rates(sc, ni, 1));
7576 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_HT_BW_NONE_160] =
7577 		    htole16(iwx_rs_vht_rates(sc, ni, 2));
7578 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
7579 		cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
7580 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_HT_BW_NONE_160] =
7581 		    htole16(iwx_rs_ht_rates(sc, ni,
7582 		    IEEE80211_HT_RATESET_SISO));
7583 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_HT_BW_NONE_160] =
7584 		    htole16(iwx_rs_ht_rates(sc, ni,
7585 		    IEEE80211_HT_RATESET_MIMO2));
7586 	} else
7587 		cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
7588 
7589 	cfg_cmd.sta_id = IWX_STATION_ID;
7590 	if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
7591 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7592 	else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7593 	    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
7594 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7595 	else
7596 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7597 	cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
7598 	if (ni->ni_flags & IEEE80211_NODE_VHT)
7599 		cfg_cmd.max_mpdu_len = htole16(3895);
7600 	else
7601 		cfg_cmd.max_mpdu_len = htole16(3839);
7602 	if (ni->ni_flags & IEEE80211_NODE_HT) {
7603 		if (ieee80211_node_supports_ht_sgi20(ni)) {
7604 			cfg_cmd.sgi_ch_width_supp |= (1 <<
7605 			    IWX_TLC_MNG_CH_WIDTH_20MHZ);
7606 		}
7607 		if (ieee80211_node_supports_ht_sgi40(ni)) {
7608 			cfg_cmd.sgi_ch_width_supp |= (1 <<
7609 			    IWX_TLC_MNG_CH_WIDTH_40MHZ);
7610 		}
7611 	}
7612 	if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
7613 	    ieee80211_node_supports_vht_sgi80(ni))
7614 		cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
7615 
7616 	cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
7617 	return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
7618 }
7619 
7620 void
7621 iwx_rs_update(struct iwx_softc *sc, struct iwx_tlc_update_notif *notif)
7622 {
7623 	struct ieee80211com *ic = &sc->sc_ic;
7624 	struct ieee80211_node *ni = ic->ic_bss;
7625 	struct ieee80211_rateset *rs = &ni->ni_rates;
7626 	uint32_t rate_n_flags;
7627 	int i;
7628 
7629 	if (notif->sta_id != IWX_STATION_ID ||
7630 	    (le32toh(notif->flags) & IWX_TLC_NOTIF_FLAG_RATE) == 0)
7631 		return;
7632 
7633 	rate_n_flags = le32toh(notif->rate);
7634 	if (rate_n_flags & IWX_RATE_MCS_VHT_MSK) {
7635 		ni->ni_txmcs = (rate_n_flags & IWX_RATE_VHT_MCS_RATE_CODE_MSK);
7636 		ni->ni_vht_ss = ((rate_n_flags & IWX_RATE_VHT_MCS_NSS_MSK) >>
7637 		    IWX_RATE_VHT_MCS_NSS_POS) + 1;
7638 	} else if (rate_n_flags & IWX_RATE_MCS_HT_MSK) {
7639 		ni->ni_txmcs = (rate_n_flags &
7640 		    (IWX_RATE_HT_MCS_RATE_CODE_MSK |
7641 		    IWX_RATE_HT_MCS_NSS_MSK));
7642 	} else {
7643 		uint8_t plcp = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
7644 		uint8_t rval = 0;
7645 		for (i = IWX_RATE_1M_INDEX; i < nitems(iwx_rates); i++) {
7646 			if (iwx_rates[i].plcp == plcp) {
7647 				rval = iwx_rates[i].rate;
7648 				break;
7649 			}
7650 		}
7651 		if (rval) {
7652 			uint8_t rv;
7653 			for (i = 0; i < rs->rs_nrates; i++) {
7654 				rv = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7655 				if (rv == rval) {
7656 					ni->ni_txrate = i;
7657 					break;
7658 				}
7659 			}
7660 		}
7661 	}
7662 }
7663 
7664 int
7665 iwx_phy_ctxt_update(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
7666     struct ieee80211_channel *chan, uint8_t chains_static,
7667     uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco,
7668     uint8_t vht_chan_width)
7669 {
7670 	uint16_t band_flags = (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
7671 	int err;
7672 
7673 	if (isset(sc->sc_enabled_capa,
7674 	    IWX_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
7675 	    (phyctxt->channel->ic_flags & band_flags) !=
7676 	    (chan->ic_flags & band_flags)) {
7677 		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7678 		    chains_dynamic, IWX_FW_CTXT_ACTION_REMOVE, apply_time, sco,
7679 		    vht_chan_width);
7680 		if (err) {
7681 			printf("%s: could not remove PHY context "
7682 			    "(error %d)\n", DEVNAME(sc), err);
7683 			return err;
7684 		}
7685 		phyctxt->channel = chan;
7686 		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7687 		    chains_dynamic, IWX_FW_CTXT_ACTION_ADD, apply_time, sco,
7688 		    vht_chan_width);
7689 		if (err) {
7690 			printf("%s: could not add PHY context "
7691 			    "(error %d)\n", DEVNAME(sc), err);
7692 			return err;
7693 		}
7694 	} else {
7695 		phyctxt->channel = chan;
7696 		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7697 		    chains_dynamic, IWX_FW_CTXT_ACTION_MODIFY, apply_time, sco,
7698 		    vht_chan_width);
7699 		if (err) {
7700 			printf("%s: could not update PHY context (error %d)\n",
7701 			    DEVNAME(sc), err);
7702 			return err;
7703 		}
7704 	}
7705 
7706 	phyctxt->sco = sco;
7707 	phyctxt->vht_chan_width = vht_chan_width;
7708 	return 0;
7709 }
7710 
7711 int
7712 iwx_auth(struct iwx_softc *sc)
7713 {
7714 	struct ieee80211com *ic = &sc->sc_ic;
7715 	struct iwx_node *in = (void *)ic->ic_bss;
7716 	uint32_t duration;
7717 	int generation = sc->sc_generation, err;
7718 
7719 	splassert(IPL_NET);
7720 
7721 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7722 		err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7723 		    ic->ic_ibss_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7724 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7725 		if (err)
7726 			return err;
7727 	} else {
7728 		err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7729 		    in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7730 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7731 		if (err)
7732 			return err;
7733 	}
7734 	in->in_phyctxt = &sc->sc_phyctxt[0];
7735 	IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr);
7736 
7737 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD, 0);
7738 	if (err) {
7739 		printf("%s: could not add MAC context (error %d)\n",
7740 		    DEVNAME(sc), err);
7741 		return err;
7742  	}
7743 	sc->sc_flags |= IWX_FLAG_MAC_ACTIVE;
7744 
7745 	err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD);
7746 	if (err) {
7747 		printf("%s: could not add binding (error %d)\n",
7748 		    DEVNAME(sc), err);
7749 		goto rm_mac_ctxt;
7750 	}
7751 	sc->sc_flags |= IWX_FLAG_BINDING_ACTIVE;
7752 
7753 	err = iwx_add_sta_cmd(sc, in, 0);
7754 	if (err) {
7755 		printf("%s: could not add sta (error %d)\n",
7756 		    DEVNAME(sc), err);
7757 		goto rm_binding;
7758 	}
7759 	sc->sc_flags |= IWX_FLAG_STA_ACTIVE;
7760 
7761 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7762 		err = iwx_enable_txq(sc, IWX_MONITOR_STA_ID,
7763 		    IWX_DQA_INJECT_MONITOR_QUEUE, IWX_MGMT_TID,
7764 		    IWX_TX_RING_COUNT);
7765 		if (err)
7766 			goto rm_sta;
7767 		return 0;
7768 	}
7769 
7770 	err = iwx_enable_mgmt_queue(sc);
7771 	if (err)
7772 		goto rm_sta;
7773 
7774 	err = iwx_clear_statistics(sc);
7775 	if (err)
7776 		goto rm_sta;
7777 
7778 	/*
7779 	 * Prevent the FW from wandering off channel during association
7780 	 * by "protecting" the session with a time event.
7781 	 */
7782 	if (in->in_ni.ni_intval)
7783 		duration = in->in_ni.ni_intval * 2;
7784 	else
7785 		duration = IEEE80211_DUR_TU;
7786 	return iwx_schedule_session_protection(sc, in, duration);
7787 rm_sta:
7788 	if (generation == sc->sc_generation) {
7789 		iwx_rm_sta_cmd(sc, in);
7790 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
7791 	}
7792 rm_binding:
7793 	if (generation == sc->sc_generation) {
7794 		iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
7795 		sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
7796 	}
7797 rm_mac_ctxt:
7798 	if (generation == sc->sc_generation) {
7799 		iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
7800 		sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
7801 	}
7802 	return err;
7803 }
7804 
7805 int
7806 iwx_deauth(struct iwx_softc *sc)
7807 {
7808 	struct ieee80211com *ic = &sc->sc_ic;
7809 	struct iwx_node *in = (void *)ic->ic_bss;
7810 	int err;
7811 
7812 	splassert(IPL_NET);
7813 
7814 	iwx_unprotect_session(sc, in);
7815 
7816 	if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
7817 		err = iwx_rm_sta(sc, in);
7818 		if (err)
7819 			return err;
7820 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
7821 	}
7822 
7823 	if (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE) {
7824 		err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
7825 		if (err) {
7826 			printf("%s: could not remove binding (error %d)\n",
7827 			    DEVNAME(sc), err);
7828 			return err;
7829 		}
7830 		sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
7831 	}
7832 
7833 	if (sc->sc_flags & IWX_FLAG_MAC_ACTIVE) {
7834 		err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
7835 		if (err) {
7836 			printf("%s: could not remove MAC context (error %d)\n",
7837 			    DEVNAME(sc), err);
7838 			return err;
7839 		}
7840 		sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
7841 	}
7842 
7843 	/* Move unused PHY context to a default channel. */
7844 	err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7845 	    &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7846 	    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7847 	if (err)
7848 		return err;
7849 
7850 	return 0;
7851 }
7852 
7853 int
7854 iwx_run(struct iwx_softc *sc)
7855 {
7856 	struct ieee80211com *ic = &sc->sc_ic;
7857 	struct iwx_node *in = (void *)ic->ic_bss;
7858 	struct ieee80211_node *ni = &in->in_ni;
7859 	int err;
7860 
7861 	splassert(IPL_NET);
7862 
7863 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7864 		/* Add a MAC context and a sniffing STA. */
7865 		err = iwx_auth(sc);
7866 		if (err)
7867 			return err;
7868 	}
7869 
7870 	/* Configure Rx chains for MIMO and configure 40 MHz channel. */
7871 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7872 		uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1;
7873 		err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
7874 		    in->in_phyctxt->channel, chains, chains,
7875 		    0, IEEE80211_HTOP0_SCO_SCN,
7876 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7877 		if (err) {
7878 			printf("%s: failed to update PHY\n", DEVNAME(sc));
7879 			return err;
7880 		}
7881 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
7882 		uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1;
7883 		uint8_t sco, vht_chan_width;
7884 		if (IEEE80211_CHAN_40MHZ_ALLOWED(in->in_ni.ni_chan) &&
7885 		    ieee80211_node_supports_ht_chan40(ni))
7886 			sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
7887 		else
7888 			sco = IEEE80211_HTOP0_SCO_SCN;
7889 		if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
7890 		    IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
7891 		    ieee80211_node_supports_vht_chan80(ni))
7892 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
7893 		else
7894 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
7895 		err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
7896 		    in->in_phyctxt->channel, chains, chains,
7897 		    0, sco, vht_chan_width);
7898 		if (err) {
7899 			printf("%s: failed to update PHY\n", DEVNAME(sc));
7900 			return err;
7901 		}
7902 	}
7903 
7904 	/* Update STA again to apply HT and VHT settings. */
7905 	err = iwx_add_sta_cmd(sc, in, 1);
7906 	if (err) {
7907 		printf("%s: could not update STA (error %d)\n",
7908 		    DEVNAME(sc), err);
7909 		return err;
7910 	}
7911 
7912 	/* We have now been assigned an associd by the AP. */
7913 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
7914 	if (err) {
7915 		printf("%s: failed to update MAC\n", DEVNAME(sc));
7916 		return err;
7917 	}
7918 
7919 	err = iwx_sf_config(sc, IWX_SF_FULL_ON);
7920 	if (err) {
7921 		printf("%s: could not set sf full on (error %d)\n",
7922 		    DEVNAME(sc), err);
7923 		return err;
7924 	}
7925 
7926 	err = iwx_allow_mcast(sc);
7927 	if (err) {
7928 		printf("%s: could not allow mcast (error %d)\n",
7929 		    DEVNAME(sc), err);
7930 		return err;
7931 	}
7932 
7933 	err = iwx_power_update_device(sc);
7934 	if (err) {
7935 		printf("%s: could not send power command (error %d)\n",
7936 		    DEVNAME(sc), err);
7937 		return err;
7938 	}
7939 #ifdef notyet
7940 	/*
7941 	 * Disabled for now. Default beacon filter settings
7942 	 * prevent net80211 from getting ERP and HT protection
7943 	 * updates from beacons.
7944 	 */
7945 	err = iwx_enable_beacon_filter(sc, in);
7946 	if (err) {
7947 		printf("%s: could not enable beacon filter\n",
7948 		    DEVNAME(sc));
7949 		return err;
7950 	}
7951 #endif
7952 	err = iwx_power_mac_update_mode(sc, in);
7953 	if (err) {
7954 		printf("%s: could not update MAC power (error %d)\n",
7955 		    DEVNAME(sc), err);
7956 		return err;
7957 	}
7958 
7959 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7960 		return 0;
7961 
7962 	/* Start at lowest available bit-rate. Firmware will raise. */
7963 	in->in_ni.ni_txrate = 0;
7964 	in->in_ni.ni_txmcs = 0;
7965 
7966 	err = iwx_rs_init(sc, in);
7967 	if (err) {
7968 		printf("%s: could not init rate scaling (error %d)\n",
7969 		    DEVNAME(sc), err);
7970 		return err;
7971 	}
7972 
7973 	return 0;
7974 }
7975 
7976 int
7977 iwx_run_stop(struct iwx_softc *sc)
7978 {
7979 	struct ieee80211com *ic = &sc->sc_ic;
7980 	struct iwx_node *in = (void *)ic->ic_bss;
7981 	struct ieee80211_node *ni = &in->in_ni;
7982 	int err, i;
7983 
7984 	splassert(IPL_NET);
7985 
7986 	err = iwx_flush_sta(sc, in);
7987 	if (err) {
7988 		printf("%s: could not flush Tx path (error %d)\n",
7989 		    DEVNAME(sc), err);
7990 		return err;
7991 	}
7992 
7993 	/*
7994 	 * Stop Rx BA sessions now. We cannot rely on the BA task
7995 	 * for this when moving out of RUN state since it runs in a
7996 	 * separate thread.
7997 	 * Note that in->in_ni (struct ieee80211_node) already represents
7998 	 * our new access point in case we are roaming between APs.
7999 	 * This means we cannot rely on struct ieee802111_node to tell
8000 	 * us which BA sessions exist.
8001 	 */
8002 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
8003 		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
8004 		if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID)
8005 			continue;
8006 		iwx_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
8007 	}
8008 
8009 	err = iwx_sf_config(sc, IWX_SF_INIT_OFF);
8010 	if (err)
8011 		return err;
8012 
8013 	err = iwx_disable_beacon_filter(sc);
8014 	if (err) {
8015 		printf("%s: could not disable beacon filter (error %d)\n",
8016 		    DEVNAME(sc), err);
8017 		return err;
8018 	}
8019 
8020 	/* Mark station as disassociated. */
8021 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 0);
8022 	if (err) {
8023 		printf("%s: failed to update MAC\n", DEVNAME(sc));
8024 		return err;
8025 	}
8026 
8027 	/* Reset Tx chains in case MIMO or 40 MHz channels were enabled. */
8028 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
8029 		err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
8030 		    in->in_phyctxt->channel, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
8031 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8032 		if (err) {
8033 			printf("%s: failed to update PHY\n", DEVNAME(sc));
8034 			return err;
8035 		}
8036 	}
8037 
8038 	return 0;
8039 }
8040 
8041 struct ieee80211_node *
8042 iwx_node_alloc(struct ieee80211com *ic)
8043 {
8044 	return malloc(sizeof (struct iwx_node), M_DEVBUF, M_NOWAIT | M_ZERO);
8045 }
8046 
8047 int
8048 iwx_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
8049     struct ieee80211_key *k)
8050 {
8051 	struct iwx_softc *sc = ic->ic_softc;
8052 	struct iwx_node *in = (void *)ni;
8053 	struct iwx_setkey_task_arg *a;
8054 	int err;
8055 
8056 	if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
8057 		/* Fallback to software crypto for other ciphers. */
8058 		err = ieee80211_set_key(ic, ni, k);
8059 		if (!err && in != NULL && (k->k_flags & IEEE80211_KEY_GROUP))
8060 			in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
8061 		return err;
8062 	}
8063 
8064 	if (sc->setkey_nkeys >= nitems(sc->setkey_arg))
8065 		return ENOSPC;
8066 
8067 	a = &sc->setkey_arg[sc->setkey_cur];
8068 	a->sta_id = IWX_STATION_ID;
8069 	a->ni = ni;
8070 	a->k = k;
8071 	sc->setkey_cur = (sc->setkey_cur + 1) % nitems(sc->setkey_arg);
8072 	sc->setkey_nkeys++;
8073 	iwx_add_task(sc, systq, &sc->setkey_task);
8074 	return EBUSY;
8075 }
8076 
8077 int
8078 iwx_add_sta_key(struct iwx_softc *sc, int sta_id, struct ieee80211_node *ni,
8079     struct ieee80211_key *k)
8080 {
8081 	struct ieee80211com *ic = &sc->sc_ic;
8082 	struct iwx_node *in = (void *)ni;
8083 	struct iwx_add_sta_key_cmd cmd;
8084 	uint32_t status;
8085 	const int want_keymask = (IWX_NODE_FLAG_HAVE_PAIRWISE_KEY |
8086 	    IWX_NODE_FLAG_HAVE_GROUP_KEY);
8087 	int err;
8088 
8089 	/*
8090 	 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
8091 	 * Currently we only implement station mode where 'ni' is always
8092 	 * ic->ic_bss so there is no need to validate arguments beyond this:
8093 	 */
8094 	KASSERT(ni == ic->ic_bss);
8095 
8096 	memset(&cmd, 0, sizeof(cmd));
8097 
8098 	cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
8099 	    IWX_STA_KEY_FLG_WEP_KEY_MAP |
8100 	    ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
8101 	    IWX_STA_KEY_FLG_KEYID_MSK));
8102 	if (k->k_flags & IEEE80211_KEY_GROUP) {
8103 		cmd.common.key_offset = 1;
8104 		cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
8105 	} else
8106 		cmd.common.key_offset = 0;
8107 
8108 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
8109 	cmd.common.sta_id = sta_id;
8110 
8111 	cmd.transmit_seq_cnt = htole64(k->k_tsc);
8112 
8113 	status = IWX_ADD_STA_SUCCESS;
8114 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
8115 	    &status);
8116 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
8117 		return ECANCELED;
8118 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
8119 		err = EIO;
8120 	if (err) {
8121 		IEEE80211_SEND_MGMT(ic, ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
8122 		    IEEE80211_REASON_AUTH_LEAVE);
8123 		ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
8124 		return err;
8125 	}
8126 
8127 	if (k->k_flags & IEEE80211_KEY_GROUP)
8128 		in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
8129 	else
8130 		in->in_flags |= IWX_NODE_FLAG_HAVE_PAIRWISE_KEY;
8131 
8132 	if ((in->in_flags & want_keymask) == want_keymask) {
8133 		DPRINTF(("marking port %s valid\n",
8134 		    ether_sprintf(ni->ni_macaddr)));
8135 		ni->ni_port_valid = 1;
8136 		ieee80211_set_link_state(ic, LINK_STATE_UP);
8137 	}
8138 
8139 	return 0;
8140 }
8141 
8142 void
8143 iwx_setkey_task(void *arg)
8144 {
8145 	struct iwx_softc *sc = arg;
8146 	struct iwx_setkey_task_arg *a;
8147 	int err = 0, s = splnet();
8148 
8149 	while (sc->setkey_nkeys > 0) {
8150 		if (err || (sc->sc_flags & IWX_FLAG_SHUTDOWN))
8151 			break;
8152 		a = &sc->setkey_arg[sc->setkey_tail];
8153 		err = iwx_add_sta_key(sc, a->sta_id, a->ni, a->k);
8154 		a->sta_id = 0;
8155 		a->ni = NULL;
8156 		a->k = NULL;
8157 		sc->setkey_tail = (sc->setkey_tail + 1) %
8158 		    nitems(sc->setkey_arg);
8159 		sc->setkey_nkeys--;
8160 	}
8161 
8162 	refcnt_rele_wake(&sc->task_refs);
8163 	splx(s);
8164 }
8165 
8166 void
8167 iwx_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
8168     struct ieee80211_key *k)
8169 {
8170 	struct iwx_softc *sc = ic->ic_softc;
8171 	struct iwx_add_sta_key_cmd cmd;
8172 
8173 	if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
8174 		/* Fallback to software crypto for other ciphers. */
8175                 ieee80211_delete_key(ic, ni, k);
8176 		return;
8177 	}
8178 
8179 	memset(&cmd, 0, sizeof(cmd));
8180 
8181 	cmd.common.key_flags = htole16(IWX_STA_KEY_NOT_VALID |
8182 	    IWX_STA_KEY_FLG_NO_ENC | IWX_STA_KEY_FLG_WEP_KEY_MAP |
8183 	    ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
8184 	    IWX_STA_KEY_FLG_KEYID_MSK));
8185 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
8186 	if (k->k_flags & IEEE80211_KEY_GROUP)
8187 		cmd.common.key_offset = 1;
8188 	else
8189 		cmd.common.key_offset = 0;
8190 	cmd.common.sta_id = IWX_STATION_ID;
8191 
8192 	iwx_send_cmd_pdu(sc, IWX_ADD_STA_KEY, IWX_CMD_ASYNC, sizeof(cmd), &cmd);
8193 }
8194 
8195 int
8196 iwx_media_change(struct ifnet *ifp)
8197 {
8198 	struct iwx_softc *sc = ifp->if_softc;
8199 	struct ieee80211com *ic = &sc->sc_ic;
8200 	uint8_t rate, ridx;
8201 	int err;
8202 
8203 	err = ieee80211_media_change(ifp);
8204 	if (err != ENETRESET)
8205 		return err;
8206 
8207 	if (ic->ic_fixed_mcs != -1)
8208 		sc->sc_fixed_ridx = iwx_mcs2ridx[ic->ic_fixed_mcs];
8209 	else if (ic->ic_fixed_rate != -1) {
8210 		rate = ic->ic_sup_rates[ic->ic_curmode].
8211 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
8212 		/* Map 802.11 rate to HW rate index. */
8213 		for (ridx = 0; ridx <= IWX_RIDX_MAX; ridx++)
8214 			if (iwx_rates[ridx].rate == rate)
8215 				break;
8216 		sc->sc_fixed_ridx = ridx;
8217 	}
8218 
8219 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
8220 	    (IFF_UP | IFF_RUNNING)) {
8221 		iwx_stop(ifp);
8222 		err = iwx_init(ifp);
8223 	}
8224 	return err;
8225 }
8226 
8227 void
8228 iwx_newstate_task(void *psc)
8229 {
8230 	struct iwx_softc *sc = (struct iwx_softc *)psc;
8231 	struct ieee80211com *ic = &sc->sc_ic;
8232 	enum ieee80211_state nstate = sc->ns_nstate;
8233 	enum ieee80211_state ostate = ic->ic_state;
8234 	int arg = sc->ns_arg;
8235 	int err = 0, s = splnet();
8236 
8237 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
8238 		/* iwx_stop() is waiting for us. */
8239 		refcnt_rele_wake(&sc->task_refs);
8240 		splx(s);
8241 		return;
8242 	}
8243 
8244 	if (ostate == IEEE80211_S_SCAN) {
8245 		if (nstate == ostate) {
8246 			if (sc->sc_flags & IWX_FLAG_SCANNING) {
8247 				refcnt_rele_wake(&sc->task_refs);
8248 				splx(s);
8249 				return;
8250 			}
8251 			/* Firmware is no longer scanning. Do another scan. */
8252 			goto next_scan;
8253 		}
8254 	}
8255 
8256 	if (nstate <= ostate) {
8257 		switch (ostate) {
8258 		case IEEE80211_S_RUN:
8259 			err = iwx_run_stop(sc);
8260 			if (err)
8261 				goto out;
8262 			/* FALLTHROUGH */
8263 		case IEEE80211_S_ASSOC:
8264 		case IEEE80211_S_AUTH:
8265 			if (nstate <= IEEE80211_S_AUTH) {
8266 				err = iwx_deauth(sc);
8267 				if (err)
8268 					goto out;
8269 			}
8270 			/* FALLTHROUGH */
8271 		case IEEE80211_S_SCAN:
8272 		case IEEE80211_S_INIT:
8273 			break;
8274 		}
8275 
8276 		/* Die now if iwx_stop() was called while we were sleeping. */
8277 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
8278 			refcnt_rele_wake(&sc->task_refs);
8279 			splx(s);
8280 			return;
8281 		}
8282 	}
8283 
8284 	switch (nstate) {
8285 	case IEEE80211_S_INIT:
8286 		break;
8287 
8288 	case IEEE80211_S_SCAN:
8289 next_scan:
8290 		err = iwx_scan(sc);
8291 		if (err)
8292 			break;
8293 		refcnt_rele_wake(&sc->task_refs);
8294 		splx(s);
8295 		return;
8296 
8297 	case IEEE80211_S_AUTH:
8298 		err = iwx_auth(sc);
8299 		break;
8300 
8301 	case IEEE80211_S_ASSOC:
8302 		break;
8303 
8304 	case IEEE80211_S_RUN:
8305 		err = iwx_run(sc);
8306 		break;
8307 	}
8308 
8309 out:
8310 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
8311 		if (err)
8312 			task_add(systq, &sc->init_task);
8313 		else
8314 			sc->sc_newstate(ic, nstate, arg);
8315 	}
8316 	refcnt_rele_wake(&sc->task_refs);
8317 	splx(s);
8318 }
8319 
8320 int
8321 iwx_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
8322 {
8323 	struct ifnet *ifp = IC2IFP(ic);
8324 	struct iwx_softc *sc = ifp->if_softc;
8325 
8326 	/*
8327 	 * Prevent attempts to transition towards the same state, unless
8328 	 * we are scanning in which case a SCAN -> SCAN transition
8329 	 * triggers another scan iteration. And AUTH -> AUTH is needed
8330 	 * to support band-steering.
8331 	 */
8332 	if (sc->ns_nstate == nstate && nstate != IEEE80211_S_SCAN &&
8333 	    nstate != IEEE80211_S_AUTH)
8334 		return 0;
8335 
8336 	if (ic->ic_state == IEEE80211_S_RUN) {
8337 		iwx_del_task(sc, systq, &sc->ba_task);
8338 		iwx_del_task(sc, systq, &sc->setkey_task);
8339 		memset(sc->setkey_arg, 0, sizeof(sc->setkey_arg));
8340 		sc->setkey_cur = sc->setkey_tail = sc->setkey_nkeys = 0;
8341 		iwx_del_task(sc, systq, &sc->mac_ctxt_task);
8342 		iwx_del_task(sc, systq, &sc->phy_ctxt_task);
8343 		iwx_del_task(sc, systq, &sc->bgscan_done_task);
8344 	}
8345 
8346 	sc->ns_nstate = nstate;
8347 	sc->ns_arg = arg;
8348 
8349 	iwx_add_task(sc, sc->sc_nswq, &sc->newstate_task);
8350 
8351 	return 0;
8352 }
8353 
8354 void
8355 iwx_endscan(struct iwx_softc *sc)
8356 {
8357 	struct ieee80211com *ic = &sc->sc_ic;
8358 
8359 	if ((sc->sc_flags & (IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN)) == 0)
8360 		return;
8361 
8362 	sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
8363 	ieee80211_end_scan(&ic->ic_if);
8364 }
8365 
8366 /*
8367  * Aging and idle timeouts for the different possible scenarios
8368  * in default configuration
8369  */
8370 static const uint32_t
8371 iwx_sf_full_timeout_def[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
8372 	{
8373 		htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
8374 		htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
8375 	},
8376 	{
8377 		htole32(IWX_SF_AGG_UNICAST_AGING_TIMER_DEF),
8378 		htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF)
8379 	},
8380 	{
8381 		htole32(IWX_SF_MCAST_AGING_TIMER_DEF),
8382 		htole32(IWX_SF_MCAST_IDLE_TIMER_DEF)
8383 	},
8384 	{
8385 		htole32(IWX_SF_BA_AGING_TIMER_DEF),
8386 		htole32(IWX_SF_BA_IDLE_TIMER_DEF)
8387 	},
8388 	{
8389 		htole32(IWX_SF_TX_RE_AGING_TIMER_DEF),
8390 		htole32(IWX_SF_TX_RE_IDLE_TIMER_DEF)
8391 	},
8392 };
8393 
8394 /*
8395  * Aging and idle timeouts for the different possible scenarios
8396  * in single BSS MAC configuration.
8397  */
8398 static const uint32_t
8399 iwx_sf_full_timeout[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
8400 	{
8401 		htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER),
8402 		htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER)
8403 	},
8404 	{
8405 		htole32(IWX_SF_AGG_UNICAST_AGING_TIMER),
8406 		htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER)
8407 	},
8408 	{
8409 		htole32(IWX_SF_MCAST_AGING_TIMER),
8410 		htole32(IWX_SF_MCAST_IDLE_TIMER)
8411 	},
8412 	{
8413 		htole32(IWX_SF_BA_AGING_TIMER),
8414 		htole32(IWX_SF_BA_IDLE_TIMER)
8415 	},
8416 	{
8417 		htole32(IWX_SF_TX_RE_AGING_TIMER),
8418 		htole32(IWX_SF_TX_RE_IDLE_TIMER)
8419 	},
8420 };
8421 
8422 void
8423 iwx_fill_sf_command(struct iwx_softc *sc, struct iwx_sf_cfg_cmd *sf_cmd,
8424     struct ieee80211_node *ni)
8425 {
8426 	int i, j, watermark;
8427 
8428 	sf_cmd->watermark[IWX_SF_LONG_DELAY_ON] = htole32(IWX_SF_W_MARK_SCAN);
8429 
8430 	/*
8431 	 * If we are in association flow - check antenna configuration
8432 	 * capabilities of the AP station, and choose the watermark accordingly.
8433 	 */
8434 	if (ni) {
8435 		if (ni->ni_flags & IEEE80211_NODE_HT) {
8436 			if (ni->ni_rxmcs[1] != 0)
8437 				watermark = IWX_SF_W_MARK_MIMO2;
8438 			else
8439 				watermark = IWX_SF_W_MARK_SISO;
8440 		} else {
8441 			watermark = IWX_SF_W_MARK_LEGACY;
8442 		}
8443 	/* default watermark value for unassociated mode. */
8444 	} else {
8445 		watermark = IWX_SF_W_MARK_MIMO2;
8446 	}
8447 	sf_cmd->watermark[IWX_SF_FULL_ON] = htole32(watermark);
8448 
8449 	for (i = 0; i < IWX_SF_NUM_SCENARIO; i++) {
8450 		for (j = 0; j < IWX_SF_NUM_TIMEOUT_TYPES; j++) {
8451 			sf_cmd->long_delay_timeouts[i][j] =
8452 					htole32(IWX_SF_LONG_DELAY_AGING_TIMER);
8453 		}
8454 	}
8455 
8456 	if (ni) {
8457 		memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout,
8458 		       sizeof(iwx_sf_full_timeout));
8459 	} else {
8460 		memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout_def,
8461 		       sizeof(iwx_sf_full_timeout_def));
8462 	}
8463 
8464 }
8465 
8466 int
8467 iwx_sf_config(struct iwx_softc *sc, int new_state)
8468 {
8469 	struct ieee80211com *ic = &sc->sc_ic;
8470 	struct iwx_sf_cfg_cmd sf_cmd = {
8471 		.state = htole32(new_state),
8472 	};
8473 	int err = 0;
8474 
8475 	switch (new_state) {
8476 	case IWX_SF_UNINIT:
8477 	case IWX_SF_INIT_OFF:
8478 		iwx_fill_sf_command(sc, &sf_cmd, NULL);
8479 		break;
8480 	case IWX_SF_FULL_ON:
8481 		iwx_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
8482 		break;
8483 	default:
8484 		return EINVAL;
8485 	}
8486 
8487 	err = iwx_send_cmd_pdu(sc, IWX_REPLY_SF_CFG_CMD, IWX_CMD_ASYNC,
8488 				   sizeof(sf_cmd), &sf_cmd);
8489 	return err;
8490 }
8491 
8492 int
8493 iwx_send_bt_init_conf(struct iwx_softc *sc)
8494 {
8495 	struct iwx_bt_coex_cmd bt_cmd;
8496 
8497 	bt_cmd.mode = htole32(IWX_BT_COEX_WIFI);
8498 	bt_cmd.enabled_modules = 0;
8499 
8500 	return iwx_send_cmd_pdu(sc, IWX_BT_CONFIG, 0, sizeof(bt_cmd),
8501 	    &bt_cmd);
8502 }
8503 
8504 int
8505 iwx_send_soc_conf(struct iwx_softc *sc)
8506 {
8507 	struct iwx_soc_configuration_cmd cmd;
8508 	int err;
8509 	uint32_t cmd_id, flags = 0;
8510 
8511 	memset(&cmd, 0, sizeof(cmd));
8512 
8513 	/*
8514 	 * In VER_1 of this command, the discrete value is considered
8515 	 * an integer; In VER_2, it's a bitmask.  Since we have only 2
8516 	 * values in VER_1, this is backwards-compatible with VER_2,
8517 	 * as long as we don't set any other flag bits.
8518 	 */
8519 	if (!sc->sc_integrated) { /* VER_1 */
8520 		flags = IWX_SOC_CONFIG_CMD_FLAGS_DISCRETE;
8521 	} else { /* VER_2 */
8522 		uint8_t scan_cmd_ver;
8523 		if (sc->sc_ltr_delay != IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
8524 			flags |= (sc->sc_ltr_delay &
8525 			    IWX_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
8526 		scan_cmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP,
8527 		    IWX_SCAN_REQ_UMAC);
8528 		if (scan_cmd_ver != IWX_FW_CMD_VER_UNKNOWN &&
8529 		    scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
8530 			flags |= IWX_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
8531 	}
8532 	cmd.flags = htole32(flags);
8533 
8534 	cmd.latency = htole32(sc->sc_xtal_latency);
8535 
8536 	cmd_id = iwx_cmd_id(IWX_SOC_CONFIGURATION_CMD, IWX_SYSTEM_GROUP, 0);
8537 	err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
8538 	if (err)
8539 		printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
8540 	return err;
8541 }
8542 
8543 int
8544 iwx_send_update_mcc_cmd(struct iwx_softc *sc, const char *alpha2)
8545 {
8546 	struct iwx_mcc_update_cmd mcc_cmd;
8547 	struct iwx_host_cmd hcmd = {
8548 		.id = IWX_MCC_UPDATE_CMD,
8549 		.flags = IWX_CMD_WANT_RESP,
8550 		.data = { &mcc_cmd },
8551 	};
8552 	struct iwx_rx_packet *pkt;
8553 	struct iwx_mcc_update_resp *resp;
8554 	size_t resp_len;
8555 	int err;
8556 
8557 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
8558 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
8559 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
8560 	    isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC))
8561 		mcc_cmd.source_id = IWX_MCC_SOURCE_GET_CURRENT;
8562 	else
8563 		mcc_cmd.source_id = IWX_MCC_SOURCE_OLD_FW;
8564 
8565 	hcmd.len[0] = sizeof(struct iwx_mcc_update_cmd);
8566 	hcmd.resp_pkt_len = IWX_CMD_RESP_MAX;
8567 
8568 	err = iwx_send_cmd(sc, &hcmd);
8569 	if (err)
8570 		return err;
8571 
8572 	pkt = hcmd.resp_pkt;
8573 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
8574 		err = EIO;
8575 		goto out;
8576 	}
8577 
8578 	resp_len = iwx_rx_packet_payload_len(pkt);
8579 	if (resp_len < sizeof(*resp)) {
8580 		err = EIO;
8581 		goto out;
8582 	}
8583 
8584 	resp = (void *)pkt->data;
8585 	if (resp_len != sizeof(*resp) +
8586 	    resp->n_channels * sizeof(resp->channels[0])) {
8587 		err = EIO;
8588 		goto out;
8589 	}
8590 
8591 	DPRINTF(("MCC status=0x%x mcc=0x%x cap=0x%x time=0x%x geo_info=0x%x source_id=0x%d n_channels=%u\n",
8592 	    resp->status, resp->mcc, resp->cap, resp->time, resp->geo_info, resp->source_id, resp->n_channels));
8593 
8594 	/* Update channel map for net80211 and our scan configuration. */
8595 	iwx_init_channel_map(sc, NULL, resp->channels, resp->n_channels);
8596 
8597 out:
8598 	iwx_free_resp(sc, &hcmd);
8599 
8600 	return err;
8601 }
8602 
8603 int
8604 iwx_send_temp_report_ths_cmd(struct iwx_softc *sc)
8605 {
8606 	struct iwx_temp_report_ths_cmd cmd;
8607 	int err;
8608 
8609 	/*
8610 	 * In order to give responsibility for critical-temperature-kill
8611 	 * and TX backoff to FW we need to send an empty temperature
8612 	 * reporting command at init time.
8613 	 */
8614 	memset(&cmd, 0, sizeof(cmd));
8615 
8616 	err = iwx_send_cmd_pdu(sc,
8617 	    IWX_WIDE_ID(IWX_PHY_OPS_GROUP, IWX_TEMP_REPORTING_THRESHOLDS_CMD),
8618 	    0, sizeof(cmd), &cmd);
8619 	if (err)
8620 		printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
8621 		    DEVNAME(sc), err);
8622 
8623 	return err;
8624 }
8625 
8626 int
8627 iwx_init_hw(struct iwx_softc *sc)
8628 {
8629 	struct ieee80211com *ic = &sc->sc_ic;
8630 	int err, i;
8631 
8632 	err = iwx_run_init_mvm_ucode(sc, 0);
8633 	if (err)
8634 		return err;
8635 
8636 	if (!iwx_nic_lock(sc))
8637 		return EBUSY;
8638 
8639 	err = iwx_send_tx_ant_cfg(sc, iwx_fw_valid_tx_ant(sc));
8640 	if (err) {
8641 		printf("%s: could not init tx ant config (error %d)\n",
8642 		    DEVNAME(sc), err);
8643 		goto err;
8644 	}
8645 
8646 	if (sc->sc_tx_with_siso_diversity) {
8647 		err = iwx_send_phy_cfg_cmd(sc);
8648 		if (err) {
8649 			printf("%s: could not send phy config (error %d)\n",
8650 			    DEVNAME(sc), err);
8651 			goto err;
8652 		}
8653 	}
8654 
8655 	err = iwx_send_bt_init_conf(sc);
8656 	if (err) {
8657 		printf("%s: could not init bt coex (error %d)\n",
8658 		    DEVNAME(sc), err);
8659 		return err;
8660 	}
8661 
8662 	err = iwx_send_soc_conf(sc);
8663 	if (err)
8664 		return err;
8665 
8666 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DQA_SUPPORT)) {
8667 		err = iwx_send_dqa_cmd(sc);
8668 		if (err)
8669 			return err;
8670 	}
8671 
8672 	for (i = 0; i < IWX_NUM_PHY_CTX; i++) {
8673 		/*
8674 		 * The channel used here isn't relevant as it's
8675 		 * going to be overwritten in the other flows.
8676 		 * For now use the first channel we have.
8677 		 */
8678 		sc->sc_phyctxt[i].id = i;
8679 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
8680 		err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
8681 		    IWX_FW_CTXT_ACTION_ADD, 0, IEEE80211_HTOP0_SCO_SCN,
8682 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8683 		if (err) {
8684 			printf("%s: could not add phy context %d (error %d)\n",
8685 			    DEVNAME(sc), i, err);
8686 			goto err;
8687 		}
8688 	}
8689 
8690 	err = iwx_config_ltr(sc);
8691 	if (err) {
8692 		printf("%s: PCIe LTR configuration failed (error %d)\n",
8693 		    DEVNAME(sc), err);
8694 	}
8695 
8696 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
8697 		err = iwx_send_temp_report_ths_cmd(sc);
8698 		if (err)
8699 			goto err;
8700 	}
8701 
8702 	err = iwx_power_update_device(sc);
8703 	if (err) {
8704 		printf("%s: could not send power command (error %d)\n",
8705 		    DEVNAME(sc), err);
8706 		goto err;
8707 	}
8708 
8709 	if (sc->sc_nvm.lar_enabled) {
8710 		err = iwx_send_update_mcc_cmd(sc, "ZZ");
8711 		if (err) {
8712 			printf("%s: could not init LAR (error %d)\n",
8713 			    DEVNAME(sc), err);
8714 			goto err;
8715 		}
8716 	}
8717 
8718 	err = iwx_config_umac_scan_reduced(sc);
8719 	if (err) {
8720 		printf("%s: could not configure scan (error %d)\n",
8721 		    DEVNAME(sc), err);
8722 		goto err;
8723 	}
8724 
8725 	err = iwx_disable_beacon_filter(sc);
8726 	if (err) {
8727 		printf("%s: could not disable beacon filter (error %d)\n",
8728 		    DEVNAME(sc), err);
8729 		goto err;
8730 	}
8731 
8732 err:
8733 	iwx_nic_unlock(sc);
8734 	return err;
8735 }
8736 
8737 /* Allow multicast from our BSSID. */
8738 int
8739 iwx_allow_mcast(struct iwx_softc *sc)
8740 {
8741 	struct ieee80211com *ic = &sc->sc_ic;
8742 	struct iwx_node *in = (void *)ic->ic_bss;
8743 	struct iwx_mcast_filter_cmd *cmd;
8744 	size_t size;
8745 	int err;
8746 
8747 	size = roundup(sizeof(*cmd), 4);
8748 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
8749 	if (cmd == NULL)
8750 		return ENOMEM;
8751 	cmd->filter_own = 1;
8752 	cmd->port_id = 0;
8753 	cmd->count = 0;
8754 	cmd->pass_all = 1;
8755 	IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr);
8756 
8757 	err = iwx_send_cmd_pdu(sc, IWX_MCAST_FILTER_CMD,
8758 	    0, size, cmd);
8759 	free(cmd, M_DEVBUF, size);
8760 	return err;
8761 }
8762 
8763 int
8764 iwx_init(struct ifnet *ifp)
8765 {
8766 	struct iwx_softc *sc = ifp->if_softc;
8767 	struct ieee80211com *ic = &sc->sc_ic;
8768 	int err, generation;
8769 
8770 	rw_assert_wrlock(&sc->ioctl_rwl);
8771 
8772 	generation = ++sc->sc_generation;
8773 
8774 	err = iwx_preinit(sc);
8775 	if (err)
8776 		return err;
8777 
8778 	err = iwx_start_hw(sc);
8779 	if (err) {
8780 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
8781 		return err;
8782 	}
8783 
8784 	err = iwx_init_hw(sc);
8785 	if (err) {
8786 		if (generation == sc->sc_generation)
8787 			iwx_stop_device(sc);
8788 		return err;
8789 	}
8790 
8791 	if (sc->sc_nvm.sku_cap_11n_enable)
8792 		iwx_setup_ht_rates(sc);
8793 	if (sc->sc_nvm.sku_cap_11ac_enable)
8794 		iwx_setup_vht_rates(sc);
8795 
8796 	KASSERT(sc->task_refs.r_refs == 0);
8797 	refcnt_init(&sc->task_refs);
8798 	ifq_clr_oactive(&ifp->if_snd);
8799 	ifp->if_flags |= IFF_RUNNING;
8800 
8801 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8802 		ic->ic_bss->ni_chan = ic->ic_ibss_chan;
8803 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
8804 		return 0;
8805 	}
8806 
8807 	ieee80211_begin_scan(ifp);
8808 
8809 	/*
8810 	 * ieee80211_begin_scan() ends up scheduling iwx_newstate_task().
8811 	 * Wait until the transition to SCAN state has completed.
8812 	 */
8813 	do {
8814 		err = tsleep_nsec(&ic->ic_state, PCATCH, "iwxinit",
8815 		    SEC_TO_NSEC(1));
8816 		if (generation != sc->sc_generation)
8817 			return ENXIO;
8818 		if (err) {
8819 			iwx_stop(ifp);
8820 			return err;
8821 		}
8822 	} while (ic->ic_state != IEEE80211_S_SCAN);
8823 
8824 	return 0;
8825 }
8826 
8827 void
8828 iwx_start(struct ifnet *ifp)
8829 {
8830 	struct iwx_softc *sc = ifp->if_softc;
8831 	struct ieee80211com *ic = &sc->sc_ic;
8832 	struct ieee80211_node *ni;
8833 	struct ether_header *eh;
8834 	struct mbuf *m;
8835 
8836 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
8837 		return;
8838 
8839 	for (;;) {
8840 		/* why isn't this done per-queue? */
8841 		if (sc->qfullmsk != 0) {
8842 			ifq_set_oactive(&ifp->if_snd);
8843 			break;
8844 		}
8845 
8846 		/* Don't queue additional frames while flushing Tx queues. */
8847 		if (sc->sc_flags & IWX_FLAG_TXFLUSH)
8848 			break;
8849 
8850 		/* need to send management frames even if we're not RUNning */
8851 		m = mq_dequeue(&ic->ic_mgtq);
8852 		if (m) {
8853 			ni = m->m_pkthdr.ph_cookie;
8854 			goto sendit;
8855 		}
8856 
8857 		if (ic->ic_state != IEEE80211_S_RUN ||
8858 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
8859 			break;
8860 
8861 		m = ifq_dequeue(&ifp->if_snd);
8862 		if (!m)
8863 			break;
8864 		if (m->m_len < sizeof (*eh) &&
8865 		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
8866 			ifp->if_oerrors++;
8867 			continue;
8868 		}
8869 #if NBPFILTER > 0
8870 		if (ifp->if_bpf != NULL)
8871 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
8872 #endif
8873 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
8874 			ifp->if_oerrors++;
8875 			continue;
8876 		}
8877 
8878  sendit:
8879 #if NBPFILTER > 0
8880 		if (ic->ic_rawbpf != NULL)
8881 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
8882 #endif
8883 		if (iwx_tx(sc, m, ni) != 0) {
8884 			ieee80211_release_node(ic, ni);
8885 			ifp->if_oerrors++;
8886 			continue;
8887 		}
8888 
8889 		if (ifp->if_flags & IFF_UP)
8890 			ifp->if_timer = 1;
8891 	}
8892 
8893 	return;
8894 }
8895 
8896 void
8897 iwx_stop(struct ifnet *ifp)
8898 {
8899 	struct iwx_softc *sc = ifp->if_softc;
8900 	struct ieee80211com *ic = &sc->sc_ic;
8901 	struct iwx_node *in = (void *)ic->ic_bss;
8902 	int i, s = splnet();
8903 
8904 	rw_assert_wrlock(&sc->ioctl_rwl);
8905 
8906 	sc->sc_flags |= IWX_FLAG_SHUTDOWN; /* Disallow new tasks. */
8907 
8908 	/* Cancel scheduled tasks and let any stale tasks finish up. */
8909 	task_del(systq, &sc->init_task);
8910 	iwx_del_task(sc, sc->sc_nswq, &sc->newstate_task);
8911 	iwx_del_task(sc, systq, &sc->ba_task);
8912 	iwx_del_task(sc, systq, &sc->setkey_task);
8913 	memset(sc->setkey_arg, 0, sizeof(sc->setkey_arg));
8914 	sc->setkey_cur = sc->setkey_tail = sc->setkey_nkeys = 0;
8915 	iwx_del_task(sc, systq, &sc->mac_ctxt_task);
8916 	iwx_del_task(sc, systq, &sc->phy_ctxt_task);
8917 	iwx_del_task(sc, systq, &sc->bgscan_done_task);
8918 	KASSERT(sc->task_refs.r_refs >= 1);
8919 	refcnt_finalize(&sc->task_refs, "iwxstop");
8920 
8921 	iwx_stop_device(sc);
8922 
8923 	free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
8924 	sc->bgscan_unref_arg = NULL;
8925 	sc->bgscan_unref_arg_size = 0;
8926 
8927 	/* Reset soft state. */
8928 
8929 	sc->sc_generation++;
8930 	for (i = 0; i < nitems(sc->sc_cmd_resp_pkt); i++) {
8931 		free(sc->sc_cmd_resp_pkt[i], M_DEVBUF, sc->sc_cmd_resp_len[i]);
8932 		sc->sc_cmd_resp_pkt[i] = NULL;
8933 		sc->sc_cmd_resp_len[i] = 0;
8934 	}
8935 	ifp->if_flags &= ~IFF_RUNNING;
8936 	ifq_clr_oactive(&ifp->if_snd);
8937 
8938 	in->in_phyctxt = NULL;
8939 	in->in_flags = 0;
8940 	IEEE80211_ADDR_COPY(in->in_macaddr, etheranyaddr);
8941 
8942 	sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
8943 	sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
8944 	sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
8945 	sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
8946 	sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
8947 	sc->sc_flags &= ~IWX_FLAG_HW_ERR;
8948 	sc->sc_flags &= ~IWX_FLAG_SHUTDOWN;
8949 	sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
8950 
8951 	sc->sc_rx_ba_sessions = 0;
8952 	sc->ba_rx.start_tidmask = 0;
8953 	sc->ba_rx.stop_tidmask = 0;
8954 	memset(sc->aggqid, 0, sizeof(sc->aggqid));
8955 	sc->ba_tx.start_tidmask = 0;
8956 	sc->ba_tx.stop_tidmask = 0;
8957 
8958 	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
8959 	sc->ns_nstate = IEEE80211_S_INIT;
8960 
8961 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
8962 		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
8963 		iwx_clear_reorder_buffer(sc, rxba);
8964 	}
8965 	memset(sc->sc_tx_timer, 0, sizeof(sc->sc_tx_timer));
8966 	ifp->if_timer = 0;
8967 
8968 	splx(s);
8969 }
8970 
8971 void
8972 iwx_watchdog(struct ifnet *ifp)
8973 {
8974 	struct iwx_softc *sc = ifp->if_softc;
8975 	int i;
8976 
8977 	ifp->if_timer = 0;
8978 
8979 	/*
8980 	 * We maintain a separate timer for each Tx queue because
8981 	 * Tx aggregation queues can get "stuck" while other queues
8982 	 * keep working. The Linux driver uses a similar workaround.
8983 	 */
8984 	for (i = 0; i < nitems(sc->sc_tx_timer); i++) {
8985 		if (sc->sc_tx_timer[i] > 0) {
8986 			if (--sc->sc_tx_timer[i] == 0) {
8987 				printf("%s: device timeout\n", DEVNAME(sc));
8988 				if (ifp->if_flags & IFF_DEBUG) {
8989 					iwx_nic_error(sc);
8990 					iwx_dump_driver_status(sc);
8991 				}
8992 				if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
8993 					task_add(systq, &sc->init_task);
8994 				ifp->if_oerrors++;
8995 				return;
8996 			}
8997 			ifp->if_timer = 1;
8998 		}
8999 	}
9000 
9001 	ieee80211_watchdog(ifp);
9002 }
9003 
9004 int
9005 iwx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
9006 {
9007 	struct iwx_softc *sc = ifp->if_softc;
9008 	int s, err = 0, generation = sc->sc_generation;
9009 
9010 	/*
9011 	 * Prevent processes from entering this function while another
9012 	 * process is tsleep'ing in it.
9013 	 */
9014 	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
9015 	if (err == 0 && generation != sc->sc_generation) {
9016 		rw_exit(&sc->ioctl_rwl);
9017 		return ENXIO;
9018 	}
9019 	if (err)
9020 		return err;
9021 	s = splnet();
9022 
9023 	switch (cmd) {
9024 	case SIOCSIFADDR:
9025 		ifp->if_flags |= IFF_UP;
9026 		/* FALLTHROUGH */
9027 	case SIOCSIFFLAGS:
9028 		if (ifp->if_flags & IFF_UP) {
9029 			if (!(ifp->if_flags & IFF_RUNNING)) {
9030 				/* Force reload of firmware image from disk. */
9031 				sc->sc_fw.fw_status = IWX_FW_STATUS_NONE;
9032 				err = iwx_init(ifp);
9033 			}
9034 		} else {
9035 			if (ifp->if_flags & IFF_RUNNING)
9036 				iwx_stop(ifp);
9037 		}
9038 		break;
9039 
9040 	default:
9041 		err = ieee80211_ioctl(ifp, cmd, data);
9042 	}
9043 
9044 	if (err == ENETRESET) {
9045 		err = 0;
9046 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
9047 		    (IFF_UP | IFF_RUNNING)) {
9048 			iwx_stop(ifp);
9049 			err = iwx_init(ifp);
9050 		}
9051 	}
9052 
9053 	splx(s);
9054 	rw_exit(&sc->ioctl_rwl);
9055 
9056 	return err;
9057 }
9058 
9059 /*
9060  * Note: This structure is read from the device with IO accesses,
9061  * and the reading already does the endian conversion. As it is
9062  * read with uint32_t-sized accesses, any members with a different size
9063  * need to be ordered correctly though!
9064  */
9065 struct iwx_error_event_table {
9066 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
9067 	uint32_t error_id;		/* type of error */
9068 	uint32_t trm_hw_status0;	/* TRM HW status */
9069 	uint32_t trm_hw_status1;	/* TRM HW status */
9070 	uint32_t blink2;		/* branch link */
9071 	uint32_t ilink1;		/* interrupt link */
9072 	uint32_t ilink2;		/* interrupt link */
9073 	uint32_t data1;		/* error-specific data */
9074 	uint32_t data2;		/* error-specific data */
9075 	uint32_t data3;		/* error-specific data */
9076 	uint32_t bcon_time;		/* beacon timer */
9077 	uint32_t tsf_low;		/* network timestamp function timer */
9078 	uint32_t tsf_hi;		/* network timestamp function timer */
9079 	uint32_t gp1;		/* GP1 timer register */
9080 	uint32_t gp2;		/* GP2 timer register */
9081 	uint32_t fw_rev_type;	/* firmware revision type */
9082 	uint32_t major;		/* uCode version major */
9083 	uint32_t minor;		/* uCode version minor */
9084 	uint32_t hw_ver;		/* HW Silicon version */
9085 	uint32_t brd_ver;		/* HW board version */
9086 	uint32_t log_pc;		/* log program counter */
9087 	uint32_t frame_ptr;		/* frame pointer */
9088 	uint32_t stack_ptr;		/* stack pointer */
9089 	uint32_t hcmd;		/* last host command header */
9090 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
9091 				 * rxtx_flag */
9092 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
9093 				 * host_flag */
9094 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
9095 				 * enc_flag */
9096 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
9097 				 * time_flag */
9098 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
9099 				 * wico interrupt */
9100 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
9101 	uint32_t wait_event;		/* wait event() caller address */
9102 	uint32_t l2p_control;	/* L2pControlField */
9103 	uint32_t l2p_duration;	/* L2pDurationField */
9104 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
9105 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
9106 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
9107 				 * (LMPM_PMG_SEL) */
9108 	uint32_t u_timestamp;	/* indicate when the date and time of the
9109 				 * compilation */
9110 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
9111 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
9112 
9113 /*
9114  * UMAC error struct - relevant starting from family 8000 chip.
9115  * Note: This structure is read from the device with IO accesses,
9116  * and the reading already does the endian conversion. As it is
9117  * read with u32-sized accesses, any members with a different size
9118  * need to be ordered correctly though!
9119  */
9120 struct iwx_umac_error_event_table {
9121 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
9122 	uint32_t error_id;	/* type of error */
9123 	uint32_t blink1;	/* branch link */
9124 	uint32_t blink2;	/* branch link */
9125 	uint32_t ilink1;	/* interrupt link */
9126 	uint32_t ilink2;	/* interrupt link */
9127 	uint32_t data1;		/* error-specific data */
9128 	uint32_t data2;		/* error-specific data */
9129 	uint32_t data3;		/* error-specific data */
9130 	uint32_t umac_major;
9131 	uint32_t umac_minor;
9132 	uint32_t frame_pointer;	/* core register 27*/
9133 	uint32_t stack_pointer;	/* core register 28 */
9134 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
9135 	uint32_t nic_isr_pref;	/* ISR status register */
9136 } __packed;
9137 
9138 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
9139 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
9140 
9141 void
9142 iwx_nic_umac_error(struct iwx_softc *sc)
9143 {
9144 	struct iwx_umac_error_event_table table;
9145 	uint32_t base;
9146 
9147 	base = sc->sc_uc.uc_umac_error_event_table;
9148 
9149 	if (base < 0x400000) {
9150 		printf("%s: Invalid error log pointer 0x%08x\n",
9151 		    DEVNAME(sc), base);
9152 		return;
9153 	}
9154 
9155 	if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
9156 		printf("%s: reading errlog failed\n", DEVNAME(sc));
9157 		return;
9158 	}
9159 
9160 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
9161 		printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
9162 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
9163 			sc->sc_flags, table.valid);
9164 	}
9165 
9166 	printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
9167 		iwx_desc_lookup(table.error_id));
9168 	printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
9169 	printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
9170 	printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
9171 	printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
9172 	printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
9173 	printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
9174 	printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
9175 	printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
9176 	printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
9177 	printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
9178 	    table.frame_pointer);
9179 	printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
9180 	    table.stack_pointer);
9181 	printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
9182 	printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
9183 	    table.nic_isr_pref);
9184 }
9185 
9186 #define IWX_FW_SYSASSERT_CPU_MASK 0xf0000000
9187 static struct {
9188 	const char *name;
9189 	uint8_t num;
9190 } advanced_lookup[] = {
9191 	{ "NMI_INTERRUPT_WDG", 0x34 },
9192 	{ "SYSASSERT", 0x35 },
9193 	{ "UCODE_VERSION_MISMATCH", 0x37 },
9194 	{ "BAD_COMMAND", 0x38 },
9195 	{ "BAD_COMMAND", 0x39 },
9196 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
9197 	{ "FATAL_ERROR", 0x3D },
9198 	{ "NMI_TRM_HW_ERR", 0x46 },
9199 	{ "NMI_INTERRUPT_TRM", 0x4C },
9200 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
9201 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
9202 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
9203 	{ "NMI_INTERRUPT_HOST", 0x66 },
9204 	{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
9205 	{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
9206 	{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
9207 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
9208 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
9209 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
9210 	{ "ADVANCED_SYSASSERT", 0 },
9211 };
9212 
9213 const char *
9214 iwx_desc_lookup(uint32_t num)
9215 {
9216 	int i;
9217 
9218 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
9219 		if (advanced_lookup[i].num ==
9220 		    (num & ~IWX_FW_SYSASSERT_CPU_MASK))
9221 			return advanced_lookup[i].name;
9222 
9223 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
9224 	return advanced_lookup[i].name;
9225 }
9226 
9227 /*
9228  * Support for dumping the error log seemed like a good idea ...
9229  * but it's mostly hex junk and the only sensible thing is the
9230  * hw/ucode revision (which we know anyway).  Since it's here,
9231  * I'll just leave it in, just in case e.g. the Intel guys want to
9232  * help us decipher some "ADVANCED_SYSASSERT" later.
9233  */
9234 void
9235 iwx_nic_error(struct iwx_softc *sc)
9236 {
9237 	struct iwx_error_event_table table;
9238 	uint32_t base;
9239 
9240 	printf("%s: dumping device error log\n", DEVNAME(sc));
9241 	base = sc->sc_uc.uc_lmac_error_event_table[0];
9242 	if (base < 0x400000) {
9243 		printf("%s: Invalid error log pointer 0x%08x\n",
9244 		    DEVNAME(sc), base);
9245 		return;
9246 	}
9247 
9248 	if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
9249 		printf("%s: reading errlog failed\n", DEVNAME(sc));
9250 		return;
9251 	}
9252 
9253 	if (!table.valid) {
9254 		printf("%s: errlog not found, skipping\n", DEVNAME(sc));
9255 		return;
9256 	}
9257 
9258 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
9259 		printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
9260 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
9261 		    sc->sc_flags, table.valid);
9262 	}
9263 
9264 	printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
9265 	    iwx_desc_lookup(table.error_id));
9266 	printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
9267 	    table.trm_hw_status0);
9268 	printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
9269 	    table.trm_hw_status1);
9270 	printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
9271 	printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
9272 	printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
9273 	printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
9274 	printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
9275 	printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
9276 	printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
9277 	printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
9278 	printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
9279 	printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
9280 	printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
9281 	printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
9282 	    table.fw_rev_type);
9283 	printf("%s: %08X | uCode version major\n", DEVNAME(sc),
9284 	    table.major);
9285 	printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
9286 	    table.minor);
9287 	printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
9288 	printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
9289 	printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
9290 	printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
9291 	printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
9292 	printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
9293 	printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
9294 	printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
9295 	printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
9296 	printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
9297 	printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
9298 	printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
9299 	printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
9300 	printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
9301 	printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
9302 	printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
9303 	printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
9304 
9305 	if (sc->sc_uc.uc_umac_error_event_table)
9306 		iwx_nic_umac_error(sc);
9307 }
9308 
9309 void
9310 iwx_dump_driver_status(struct iwx_softc *sc)
9311 {
9312 	int i;
9313 
9314 	printf("driver status:\n");
9315 	for (i = 0; i < nitems(sc->txq); i++) {
9316 		struct iwx_tx_ring *ring = &sc->txq[i];
9317 		printf("  tx ring %2d: qid=%-2d cur=%-3d "
9318 		    "cur_hw=%-3d queued=%-3d\n",
9319 		    i, ring->qid, ring->cur, ring->cur_hw,
9320 		    ring->queued);
9321 	}
9322 	printf("  rx ring: cur=%d\n", sc->rxq.cur);
9323 	printf("  802.11 state %s\n",
9324 	    ieee80211_state_name[sc->sc_ic.ic_state]);
9325 }
9326 
9327 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
9328 do {									\
9329 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
9330 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
9331 	_var_ = (void *)((_pkt_)+1);					\
9332 } while (/*CONSTCOND*/0)
9333 
9334 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
9335 do {									\
9336 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
9337 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
9338 	_ptr_ = (void *)((_pkt_)+1);					\
9339 } while (/*CONSTCOND*/0)
9340 
9341 int
9342 iwx_rx_pkt_valid(struct iwx_rx_packet *pkt)
9343 {
9344 	int qid, idx, code;
9345 
9346 	qid = pkt->hdr.qid & ~0x80;
9347 	idx = pkt->hdr.idx;
9348 	code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
9349 
9350 	return (!(qid == 0 && idx == 0 && code == 0) &&
9351 	    pkt->len_n_flags != htole32(IWX_FH_RSCSR_FRAME_INVALID));
9352 }
9353 
9354 void
9355 iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf_list *ml)
9356 {
9357 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
9358 	struct iwx_rx_packet *pkt, *nextpkt;
9359 	uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
9360 	struct mbuf *m0, *m;
9361 	const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
9362 	int qid, idx, code, handled = 1;
9363 
9364 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,
9365 	    BUS_DMASYNC_POSTREAD);
9366 
9367 	m0 = data->m;
9368 	while (m0 && offset + minsz < IWX_RBUF_SIZE) {
9369 		pkt = (struct iwx_rx_packet *)(m0->m_data + offset);
9370 		qid = pkt->hdr.qid;
9371 		idx = pkt->hdr.idx;
9372 
9373 		code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
9374 
9375 		if (!iwx_rx_pkt_valid(pkt))
9376 			break;
9377 
9378 		/*
9379 		 * XXX Intel inside (tm)
9380 		 * Any commands in the LONG_GROUP could actually be in the
9381 		 * LEGACY group. Firmware API versions >= 50 reject commands
9382 		 * in group 0, forcing us to use this hack.
9383 		 */
9384 		if (iwx_cmd_groupid(code) == IWX_LONG_GROUP) {
9385 			struct iwx_tx_ring *ring = &sc->txq[qid];
9386 			struct iwx_tx_data *txdata = &ring->data[idx];
9387 			if (txdata->flags & IWX_TXDATA_FLAG_CMD_IS_NARROW)
9388 				code = iwx_cmd_opcode(code);
9389 		}
9390 
9391 		len = sizeof(pkt->len_n_flags) + iwx_rx_packet_len(pkt);
9392 		if (len < minsz || len > (IWX_RBUF_SIZE - offset))
9393 			break;
9394 
9395 		if (code == IWX_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
9396 			/* Take mbuf m0 off the RX ring. */
9397 			if (iwx_rx_addbuf(sc, IWX_RBUF_SIZE, sc->rxq.cur)) {
9398 				ifp->if_ierrors++;
9399 				break;
9400 			}
9401 			KASSERT(data->m != m0);
9402 		}
9403 
9404 		switch (code) {
9405 		case IWX_REPLY_RX_PHY_CMD:
9406 			iwx_rx_rx_phy_cmd(sc, pkt, data);
9407 			break;
9408 
9409 		case IWX_REPLY_RX_MPDU_CMD: {
9410 			size_t maxlen = IWX_RBUF_SIZE - offset - minsz;
9411 			nextoff = offset +
9412 			    roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
9413 			nextpkt = (struct iwx_rx_packet *)
9414 			    (m0->m_data + nextoff);
9415 			/* AX210 devices ship only one packet per Rx buffer. */
9416 			if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210 ||
9417 			    nextoff + minsz >= IWX_RBUF_SIZE ||
9418 			    !iwx_rx_pkt_valid(nextpkt)) {
9419 				/* No need to copy last frame in buffer. */
9420 				if (offset > 0)
9421 					m_adj(m0, offset);
9422 				iwx_rx_mpdu_mq(sc, m0, pkt->data, maxlen, ml);
9423 				m0 = NULL; /* stack owns m0 now; abort loop */
9424 			} else {
9425 				/*
9426 				 * Create an mbuf which points to the current
9427 				 * packet. Always copy from offset zero to
9428 				 * preserve m_pkthdr.
9429 				 */
9430 				m = m_copym(m0, 0, M_COPYALL, M_DONTWAIT);
9431 				if (m == NULL) {
9432 					ifp->if_ierrors++;
9433 					m_freem(m0);
9434 					m0 = NULL;
9435 					break;
9436 				}
9437 				m_adj(m, offset);
9438 				iwx_rx_mpdu_mq(sc, m, pkt->data, maxlen, ml);
9439 			}
9440  			break;
9441 		}
9442 
9443 		case IWX_BAR_FRAME_RELEASE:
9444 			iwx_rx_bar_frame_release(sc, pkt, ml);
9445 			break;
9446 
9447 		case IWX_TX_CMD:
9448 			iwx_rx_tx_cmd(sc, pkt, data);
9449 			break;
9450 
9451 		case IWX_BA_NOTIF:
9452 			iwx_rx_compressed_ba(sc, pkt);
9453 			break;
9454 
9455 		case IWX_MISSED_BEACONS_NOTIFICATION:
9456 			iwx_rx_bmiss(sc, pkt, data);
9457 			break;
9458 
9459 		case IWX_MFUART_LOAD_NOTIFICATION:
9460 			break;
9461 
9462 		case IWX_ALIVE: {
9463 			struct iwx_alive_resp_v4 *resp4;
9464 			struct iwx_alive_resp_v5 *resp5;
9465 
9466 			DPRINTF(("%s: firmware alive\n", __func__));
9467 			sc->sc_uc.uc_ok = 0;
9468 
9469 			/*
9470 			 * For v5 and above, we can check the version, for older
9471 			 * versions we need to check the size.
9472 			 */
9473 			 if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
9474 			    IWX_ALIVE) == 5) {
9475 				SYNC_RESP_STRUCT(resp5, pkt);
9476 				if (iwx_rx_packet_payload_len(pkt) !=
9477 				    sizeof(*resp5)) {
9478 					sc->sc_uc.uc_intr = 1;
9479 					wakeup(&sc->sc_uc);
9480 					break;
9481 				}
9482 				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9483 				    resp5->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9484 				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9485 				    resp5->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9486 				sc->sc_uc.uc_log_event_table = le32toh(
9487 				    resp5->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9488 				sc->sc_uc.uc_umac_error_event_table = le32toh(
9489 				    resp5->umac_data.dbg_ptrs.error_info_addr);
9490 				sc->sc_sku_id[0] =
9491 				    le32toh(resp5->sku_id.data[0]);
9492 				sc->sc_sku_id[1] =
9493 				    le32toh(resp5->sku_id.data[1]);
9494 				sc->sc_sku_id[2] =
9495 				    le32toh(resp5->sku_id.data[2]);
9496 				if (resp5->status == IWX_ALIVE_STATUS_OK)
9497 					sc->sc_uc.uc_ok = 1;
9498 			} else if (iwx_rx_packet_payload_len(pkt) == sizeof(*resp4)) {
9499 				SYNC_RESP_STRUCT(resp4, pkt);
9500 				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9501 				    resp4->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9502 				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9503 				    resp4->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9504 				sc->sc_uc.uc_log_event_table = le32toh(
9505 				    resp4->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9506 				sc->sc_uc.uc_umac_error_event_table = le32toh(
9507 				    resp4->umac_data.dbg_ptrs.error_info_addr);
9508 				if (resp4->status == IWX_ALIVE_STATUS_OK)
9509 					sc->sc_uc.uc_ok = 1;
9510 			}
9511 
9512 			sc->sc_uc.uc_intr = 1;
9513 			wakeup(&sc->sc_uc);
9514 			break;
9515 		}
9516 
9517 		case IWX_STATISTICS_NOTIFICATION: {
9518 			struct iwx_notif_statistics *stats;
9519 			SYNC_RESP_STRUCT(stats, pkt);
9520 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
9521 			sc->sc_noise = iwx_get_noise(&stats->rx.general);
9522 			break;
9523 		}
9524 
9525 		case IWX_DTS_MEASUREMENT_NOTIFICATION:
9526 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9527 				 IWX_DTS_MEASUREMENT_NOTIF_WIDE):
9528 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9529 				 IWX_TEMP_REPORTING_THRESHOLDS_CMD):
9530 			break;
9531 
9532 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9533 		    IWX_CT_KILL_NOTIFICATION): {
9534 			struct iwx_ct_kill_notif *notif;
9535 			SYNC_RESP_STRUCT(notif, pkt);
9536 			printf("%s: device at critical temperature (%u degC), "
9537 			    "stopping device\n",
9538 			    DEVNAME(sc), le16toh(notif->temperature));
9539 			sc->sc_flags |= IWX_FLAG_HW_ERR;
9540 			task_add(systq, &sc->init_task);
9541 			break;
9542 		}
9543 
9544 		case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
9545 		    IWX_SESSION_PROTECTION_CMD):
9546 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9547 		    IWX_NVM_GET_INFO):
9548 		case IWX_ADD_STA_KEY:
9549 		case IWX_PHY_CONFIGURATION_CMD:
9550 		case IWX_TX_ANT_CONFIGURATION_CMD:
9551 		case IWX_ADD_STA:
9552 		case IWX_MAC_CONTEXT_CMD:
9553 		case IWX_REPLY_SF_CFG_CMD:
9554 		case IWX_POWER_TABLE_CMD:
9555 		case IWX_LTR_CONFIG:
9556 		case IWX_PHY_CONTEXT_CMD:
9557 		case IWX_BINDING_CONTEXT_CMD:
9558 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_CFG_CMD):
9559 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_REQ_UMAC):
9560 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC):
9561 		case IWX_REPLY_BEACON_FILTERING_CMD:
9562 		case IWX_MAC_PM_POWER_TABLE:
9563 		case IWX_TIME_QUOTA_CMD:
9564 		case IWX_REMOVE_STA:
9565 		case IWX_TXPATH_FLUSH:
9566 		case IWX_BT_CONFIG:
9567 		case IWX_MCC_UPDATE_CMD:
9568 		case IWX_TIME_EVENT_CMD:
9569 		case IWX_STATISTICS_CMD:
9570 		case IWX_SCD_QUEUE_CFG: {
9571 			size_t pkt_len;
9572 
9573 			if (sc->sc_cmd_resp_pkt[idx] == NULL)
9574 				break;
9575 
9576 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
9577 			    sizeof(*pkt), BUS_DMASYNC_POSTREAD);
9578 
9579 			pkt_len = sizeof(pkt->len_n_flags) +
9580 			    iwx_rx_packet_len(pkt);
9581 
9582 			if ((pkt->hdr.flags & IWX_CMD_FAILED_MSK) ||
9583 			    pkt_len < sizeof(*pkt) ||
9584 			    pkt_len > sc->sc_cmd_resp_len[idx]) {
9585 				free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
9586 				    sc->sc_cmd_resp_len[idx]);
9587 				sc->sc_cmd_resp_pkt[idx] = NULL;
9588 				break;
9589 			}
9590 
9591 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
9592 			    pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD);
9593 			memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
9594 			break;
9595 		}
9596 
9597 		case IWX_INIT_COMPLETE_NOTIF:
9598 			sc->sc_init_complete |= IWX_INIT_COMPLETE;
9599 			wakeup(&sc->sc_init_complete);
9600 			break;
9601 
9602 		case IWX_SCAN_COMPLETE_UMAC: {
9603 			struct iwx_umac_scan_complete *notif;
9604 			SYNC_RESP_STRUCT(notif, pkt);
9605 			iwx_endscan(sc);
9606 			break;
9607 		}
9608 
9609 		case IWX_SCAN_ITERATION_COMPLETE_UMAC: {
9610 			struct iwx_umac_scan_iter_complete_notif *notif;
9611 			SYNC_RESP_STRUCT(notif, pkt);
9612 			iwx_endscan(sc);
9613 			break;
9614 		}
9615 
9616 		case IWX_MCC_CHUB_UPDATE_CMD: {
9617 			struct iwx_mcc_chub_notif *notif;
9618 			SYNC_RESP_STRUCT(notif, pkt);
9619 			iwx_mcc_update(sc, notif);
9620 			break;
9621 		}
9622 
9623 		case IWX_REPLY_ERROR: {
9624 			struct iwx_error_resp *resp;
9625 			SYNC_RESP_STRUCT(resp, pkt);
9626 			printf("%s: firmware error 0x%x, cmd 0x%x\n",
9627 				DEVNAME(sc), le32toh(resp->error_type),
9628 				resp->cmd_id);
9629 			break;
9630 		}
9631 
9632 		case IWX_TIME_EVENT_NOTIFICATION: {
9633 			struct iwx_time_event_notif *notif;
9634 			uint32_t action;
9635 			SYNC_RESP_STRUCT(notif, pkt);
9636 
9637 			if (sc->sc_time_event_uid != le32toh(notif->unique_id))
9638 				break;
9639 			action = le32toh(notif->action);
9640 			if (action & IWX_TE_V2_NOTIF_HOST_EVENT_END)
9641 				sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
9642 			break;
9643 		}
9644 
9645 		case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
9646 		    IWX_SESSION_PROTECTION_NOTIF): {
9647 			struct iwx_session_prot_notif *notif;
9648 			uint32_t status, start, conf_id;
9649 
9650 			SYNC_RESP_STRUCT(notif, pkt);
9651 
9652 			status = le32toh(notif->status);
9653 			start = le32toh(notif->start);
9654 			conf_id = le32toh(notif->conf_id);
9655 			/* Check for end of successful PROTECT_CONF_ASSOC. */
9656 			if (status == 1 && start == 0 &&
9657 			    conf_id == IWX_SESSION_PROTECT_CONF_ASSOC)
9658 				sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
9659 			break;
9660 		}
9661 
9662 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP,
9663 		    IWX_FSEQ_VER_MISMATCH_NOTIFICATION):
9664 		    break;
9665 
9666 		/*
9667 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
9668 		 * messages. Just ignore them for now.
9669 		 */
9670 		case IWX_DEBUG_LOG_MSG:
9671 			break;
9672 
9673 		case IWX_MCAST_FILTER_CMD:
9674 			break;
9675 
9676 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_DQA_ENABLE_CMD):
9677 			break;
9678 
9679 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_SOC_CONFIGURATION_CMD):
9680 			break;
9681 
9682 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_INIT_EXTENDED_CFG_CMD):
9683 			break;
9684 
9685 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9686 		    IWX_NVM_ACCESS_COMPLETE):
9687 			break;
9688 
9689 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RX_NO_DATA_NOTIF):
9690 			break; /* happens in monitor mode; ignore for now */
9691 
9692 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_TLC_MNG_CONFIG_CMD):
9693 			break;
9694 
9695 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9696 		    IWX_TLC_MNG_UPDATE_NOTIF): {
9697 			struct iwx_tlc_update_notif *notif;
9698 			SYNC_RESP_STRUCT(notif, pkt);
9699 			if (iwx_rx_packet_payload_len(pkt) == sizeof(*notif))
9700 				iwx_rs_update(sc, notif);
9701 			break;
9702 		}
9703 
9704 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9705 		    IWX_PNVM_INIT_COMPLETE):
9706 			sc->sc_init_complete |= IWX_PNVM_COMPLETE;
9707 			wakeup(&sc->sc_init_complete);
9708 			break;
9709 
9710 		default:
9711 			handled = 0;
9712 			printf("%s: unhandled firmware response 0x%x/0x%x "
9713 			    "rx ring %d[%d]\n",
9714 			    DEVNAME(sc), code, pkt->len_n_flags,
9715 			    (qid & ~0x80), idx);
9716 			break;
9717 		}
9718 
9719 		/*
9720 		 * uCode sets bit 0x80 when it originates the notification,
9721 		 * i.e. when the notification is not a direct response to a
9722 		 * command sent by the driver.
9723 		 * For example, uCode issues IWX_REPLY_RX when it sends a
9724 		 * received frame to the driver.
9725 		 */
9726 		if (handled && !(qid & (1 << 7))) {
9727 			iwx_cmd_done(sc, qid, idx, code);
9728 		}
9729 
9730 		offset += roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
9731 
9732 		/* AX210 devices ship only one packet per Rx buffer. */
9733 		if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
9734 			break;
9735 	}
9736 
9737 	if (m0 && m0 != data->m)
9738 		m_freem(m0);
9739 }
9740 
9741 void
9742 iwx_notif_intr(struct iwx_softc *sc)
9743 {
9744 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
9745 	uint16_t hw;
9746 
9747 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
9748 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
9749 
9750 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
9751 		uint16_t *status = sc->rxq.stat_dma.vaddr;
9752 		hw = le16toh(*status) & 0xfff;
9753 	} else
9754 		hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
9755 	hw &= (IWX_RX_MQ_RING_COUNT - 1);
9756 	while (sc->rxq.cur != hw) {
9757 		struct iwx_rx_data *data = &sc->rxq.data[sc->rxq.cur];
9758 		iwx_rx_pkt(sc, data, &ml);
9759 		sc->rxq.cur = (sc->rxq.cur + 1) % IWX_RX_MQ_RING_COUNT;
9760 	}
9761 	if_input(&sc->sc_ic.ic_if, &ml);
9762 
9763 	/*
9764 	 * Tell the firmware what we have processed.
9765 	 * Seems like the hardware gets upset unless we align the write by 8??
9766 	 */
9767 	hw = (hw == 0) ? IWX_RX_MQ_RING_COUNT - 1 : hw - 1;
9768 	IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, hw & ~7);
9769 }
9770 
9771 int
9772 iwx_intr(void *arg)
9773 {
9774 	struct iwx_softc *sc = arg;
9775 	struct ieee80211com *ic = &sc->sc_ic;
9776 	struct ifnet *ifp = IC2IFP(ic);
9777 	int handled = 0;
9778 	int r1, r2, rv = 0;
9779 
9780 	IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
9781 
9782 	if (sc->sc_flags & IWX_FLAG_USE_ICT) {
9783 		uint32_t *ict = sc->ict_dma.vaddr;
9784 		int tmp;
9785 
9786 		tmp = htole32(ict[sc->ict_cur]);
9787 		if (!tmp)
9788 			goto out_ena;
9789 
9790 		/*
9791 		 * ok, there was something.  keep plowing until we have all.
9792 		 */
9793 		r1 = r2 = 0;
9794 		while (tmp) {
9795 			r1 |= tmp;
9796 			ict[sc->ict_cur] = 0;
9797 			sc->ict_cur = (sc->ict_cur+1) % IWX_ICT_COUNT;
9798 			tmp = htole32(ict[sc->ict_cur]);
9799 		}
9800 
9801 		/* this is where the fun begins.  don't ask */
9802 		if (r1 == 0xffffffff)
9803 			r1 = 0;
9804 
9805 		/* i am not expected to understand this */
9806 		if (r1 & 0xc0000)
9807 			r1 |= 0x8000;
9808 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
9809 	} else {
9810 		r1 = IWX_READ(sc, IWX_CSR_INT);
9811 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
9812 			goto out;
9813 		r2 = IWX_READ(sc, IWX_CSR_FH_INT_STATUS);
9814 	}
9815 	if (r1 == 0 && r2 == 0) {
9816 		goto out_ena;
9817 	}
9818 
9819 	IWX_WRITE(sc, IWX_CSR_INT, r1 | ~sc->sc_intmask);
9820 
9821 	if (r1 & IWX_CSR_INT_BIT_ALIVE) {
9822 		int i;
9823 
9824 		/* Firmware has now configured the RFH. */
9825 		for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
9826 			iwx_update_rx_desc(sc, &sc->rxq, i);
9827 		IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
9828 	}
9829 
9830 	handled |= (r1 & (IWX_CSR_INT_BIT_ALIVE /*| IWX_CSR_INT_BIT_SCD*/));
9831 
9832 	if (r1 & IWX_CSR_INT_BIT_RF_KILL) {
9833 		handled |= IWX_CSR_INT_BIT_RF_KILL;
9834 		iwx_check_rfkill(sc);
9835 		task_add(systq, &sc->init_task);
9836 		rv = 1;
9837 		goto out_ena;
9838 	}
9839 
9840 	if (r1 & IWX_CSR_INT_BIT_SW_ERR) {
9841 		if (ifp->if_flags & IFF_DEBUG) {
9842 			iwx_nic_error(sc);
9843 			iwx_dump_driver_status(sc);
9844 		}
9845 		printf("%s: fatal firmware error\n", DEVNAME(sc));
9846 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
9847 			task_add(systq, &sc->init_task);
9848 		rv = 1;
9849 		goto out;
9850 
9851 	}
9852 
9853 	if (r1 & IWX_CSR_INT_BIT_HW_ERR) {
9854 		handled |= IWX_CSR_INT_BIT_HW_ERR;
9855 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
9856 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
9857 			sc->sc_flags |= IWX_FLAG_HW_ERR;
9858 			task_add(systq, &sc->init_task);
9859 		}
9860 		rv = 1;
9861 		goto out;
9862 	}
9863 
9864 	/* firmware chunk loaded */
9865 	if (r1 & IWX_CSR_INT_BIT_FH_TX) {
9866 		IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_TX_MASK);
9867 		handled |= IWX_CSR_INT_BIT_FH_TX;
9868 
9869 		sc->sc_fw_chunk_done = 1;
9870 		wakeup(&sc->sc_fw);
9871 	}
9872 
9873 	if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX |
9874 	    IWX_CSR_INT_BIT_RX_PERIODIC)) {
9875 		if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX)) {
9876 			handled |= (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX);
9877 			IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_RX_MASK);
9878 		}
9879 		if (r1 & IWX_CSR_INT_BIT_RX_PERIODIC) {
9880 			handled |= IWX_CSR_INT_BIT_RX_PERIODIC;
9881 			IWX_WRITE(sc, IWX_CSR_INT, IWX_CSR_INT_BIT_RX_PERIODIC);
9882 		}
9883 
9884 		/* Disable periodic interrupt; we use it as just a one-shot. */
9885 		IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG, IWX_CSR_INT_PERIODIC_DIS);
9886 
9887 		/*
9888 		 * Enable periodic interrupt in 8 msec only if we received
9889 		 * real RX interrupt (instead of just periodic int), to catch
9890 		 * any dangling Rx interrupt.  If it was just the periodic
9891 		 * interrupt, there was no dangling Rx activity, and no need
9892 		 * to extend the periodic interrupt; one-shot is enough.
9893 		 */
9894 		if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX))
9895 			IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG,
9896 			    IWX_CSR_INT_PERIODIC_ENA);
9897 
9898 		iwx_notif_intr(sc);
9899 	}
9900 
9901 	rv = 1;
9902 
9903  out_ena:
9904 	iwx_restore_interrupts(sc);
9905  out:
9906 	return rv;
9907 }
9908 
9909 int
9910 iwx_intr_msix(void *arg)
9911 {
9912 	struct iwx_softc *sc = arg;
9913 	struct ieee80211com *ic = &sc->sc_ic;
9914 	struct ifnet *ifp = IC2IFP(ic);
9915 	uint32_t inta_fh, inta_hw;
9916 	int vector = 0;
9917 
9918 	inta_fh = IWX_READ(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD);
9919 	inta_hw = IWX_READ(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD);
9920 	IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
9921 	IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
9922 	inta_fh &= sc->sc_fh_mask;
9923 	inta_hw &= sc->sc_hw_mask;
9924 
9925 	if (inta_fh & IWX_MSIX_FH_INT_CAUSES_Q0 ||
9926 	    inta_fh & IWX_MSIX_FH_INT_CAUSES_Q1) {
9927 		iwx_notif_intr(sc);
9928 	}
9929 
9930 	/* firmware chunk loaded */
9931 	if (inta_fh & IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
9932 		sc->sc_fw_chunk_done = 1;
9933 		wakeup(&sc->sc_fw);
9934 	}
9935 
9936 	if ((inta_fh & IWX_MSIX_FH_INT_CAUSES_FH_ERR) ||
9937 	    (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
9938 	    (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
9939 		if (ifp->if_flags & IFF_DEBUG) {
9940 			iwx_nic_error(sc);
9941 			iwx_dump_driver_status(sc);
9942 		}
9943 		printf("%s: fatal firmware error\n", DEVNAME(sc));
9944 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
9945 			task_add(systq, &sc->init_task);
9946 		return 1;
9947 	}
9948 
9949 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
9950 		iwx_check_rfkill(sc);
9951 		task_add(systq, &sc->init_task);
9952 	}
9953 
9954 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
9955 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
9956 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
9957 			sc->sc_flags |= IWX_FLAG_HW_ERR;
9958 			task_add(systq, &sc->init_task);
9959 		}
9960 		return 1;
9961 	}
9962 
9963 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_ALIVE) {
9964 		int i;
9965 
9966 		/* Firmware has now configured the RFH. */
9967 		for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
9968 			iwx_update_rx_desc(sc, &sc->rxq, i);
9969 		IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
9970 	}
9971 
9972 	/*
9973 	 * Before sending the interrupt the HW disables it to prevent
9974 	 * a nested interrupt. This is done by writing 1 to the corresponding
9975 	 * bit in the mask register. After handling the interrupt, it should be
9976 	 * re-enabled by clearing this bit. This register is defined as
9977 	 * write 1 clear (W1C) register, meaning that it's being clear
9978 	 * by writing 1 to the bit.
9979 	 */
9980 	IWX_WRITE(sc, IWX_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
9981 	return 1;
9982 }
9983 
9984 typedef void *iwx_match_t;
9985 
9986 static const struct pci_matchid iwx_devices[] = {
9987 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_1 },
9988 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_2 },
9989 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_3 },
9990 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_4,},
9991 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_5,},
9992 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_6,},
9993 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_7,},
9994 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_8,},
9995 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_9,},
9996 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_10,},
9997 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_11,},
9998 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_12,},
9999 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_13,},
10000 	/* _14 is an MA device, not yet supported */
10001 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_15,},
10002 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_16,},
10003 };
10004 
10005 
10006 int
10007 iwx_match(struct device *parent, iwx_match_t match __unused, void *aux)
10008 {
10009 	struct pci_attach_args *pa = aux;
10010 	return pci_matchbyid(pa, iwx_devices, nitems(iwx_devices));
10011 }
10012 
10013 /*
10014  * The device info table below contains device-specific config overrides.
10015  * The most important parameter derived from this table is the name of the
10016  * firmware image to load.
10017  *
10018  * The Linux iwlwifi driver uses an "old" and a "new" device info table.
10019  * The "old" table matches devices based on PCI vendor/product IDs only.
10020  * The "new" table extends this with various device parameters derived
10021  * from MAC type, and RF type.
10022  *
10023  * In iwlwifi "old" and "new" tables share the same array, where "old"
10024  * entries contain dummy values for data defined only for "new" entries.
10025  * As of 2022, Linux developers are still in the process of moving entries
10026  * from "old" to "new" style and it looks like this effort has stalled in
10027  * in some work-in-progress state for quite a while. Linux commits moving
10028  * entries from "old" to "new" have at times been reverted due to regressions.
10029  * Part of this complexity comes from iwlwifi supporting both iwm(4) and iwx(4)
10030  * devices in the same driver.
10031  *
10032  * Our table below contains mostly "new" entries declared in iwlwifi
10033  * with the _IWL_DEV_INFO() macro (with a leading underscore).
10034  * Other devices are matched based on PCI vendor/product ID as usual,
10035  * unless matching specific PCI subsystem vendor/product IDs is required.
10036  *
10037  * Some "old"-style entries are required to identify the firmware image to use.
10038  * Others might be used to print a specific marketing name into Linux dmesg,
10039  * but we can't be sure whether the corresponding devices would be matched
10040  * correctly in the absence of their entries. So we include them just in case.
10041  */
10042 
10043 struct iwx_dev_info {
10044 	uint16_t device;
10045 	uint16_t subdevice;
10046 	uint16_t mac_type;
10047 	uint16_t rf_type;
10048 	uint8_t mac_step;
10049 	uint8_t rf_id;
10050 	uint8_t no_160;
10051 	uint8_t cores;
10052 	uint8_t cdb;
10053 	uint8_t jacket;
10054 	const struct iwx_device_cfg *cfg;
10055 };
10056 
10057 #define _IWX_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
10058 		      _rf_id, _no_160, _cores, _cdb, _jacket, _cfg) \
10059 	{ .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg),  \
10060 	  .mac_type = _mac_type, .rf_type = _rf_type,	   \
10061 	  .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id,		   \
10062 	  .mac_step = _mac_step, .cdb = _cdb, .jacket = _jacket }
10063 
10064 #define IWX_DEV_INFO(_device, _subdevice, _cfg) \
10065 	_IWX_DEV_INFO(_device, _subdevice, IWX_CFG_ANY, IWX_CFG_ANY,	   \
10066 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY,  \
10067 		      IWX_CFG_ANY, IWX_CFG_ANY, _cfg)
10068 
10069 /*
10070  * When adding entries to this table keep in mind that entries must
10071  * be listed in the same order as in the Linux driver. Code walks this
10072  * table backwards and uses the first matching entry it finds.
10073  * Device firmware must be available in fw_update(8).
10074  */
10075 static const struct iwx_dev_info iwx_dev_info_table[] = {
10076 	/* So with HR */
10077 	IWX_DEV_INFO(0x2725, 0x0090, iwx_2ax_cfg_so_gf_a0),
10078 	IWX_DEV_INFO(0x2725, 0x0020, iwx_2ax_cfg_ty_gf_a0),
10079 	IWX_DEV_INFO(0x2725, 0x2020, iwx_2ax_cfg_ty_gf_a0),
10080 	IWX_DEV_INFO(0x2725, 0x0024, iwx_2ax_cfg_ty_gf_a0),
10081 	IWX_DEV_INFO(0x2725, 0x0310, iwx_2ax_cfg_ty_gf_a0),
10082 	IWX_DEV_INFO(0x2725, 0x0510, iwx_2ax_cfg_ty_gf_a0),
10083 	IWX_DEV_INFO(0x2725, 0x0A10, iwx_2ax_cfg_ty_gf_a0),
10084 	IWX_DEV_INFO(0x2725, 0xE020, iwx_2ax_cfg_ty_gf_a0),
10085 	IWX_DEV_INFO(0x2725, 0xE024, iwx_2ax_cfg_ty_gf_a0),
10086 	IWX_DEV_INFO(0x2725, 0x4020, iwx_2ax_cfg_ty_gf_a0),
10087 	IWX_DEV_INFO(0x2725, 0x6020, iwx_2ax_cfg_ty_gf_a0),
10088 	IWX_DEV_INFO(0x2725, 0x6024, iwx_2ax_cfg_ty_gf_a0),
10089 	IWX_DEV_INFO(0x2725, 0x1673, iwx_2ax_cfg_ty_gf_a0), /* killer_1675w */
10090 	IWX_DEV_INFO(0x2725, 0x1674, iwx_2ax_cfg_ty_gf_a0), /* killer_1675x */
10091 	IWX_DEV_INFO(0x51f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
10092 	IWX_DEV_INFO(0x51f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
10093 	IWX_DEV_INFO(0x54f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
10094 	IWX_DEV_INFO(0x54f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
10095 	IWX_DEV_INFO(0x7a70, 0x0090, iwx_2ax_cfg_so_gf_a0_long),
10096 	IWX_DEV_INFO(0x7a70, 0x0098, iwx_2ax_cfg_so_gf_a0_long),
10097 	IWX_DEV_INFO(0x7a70, 0x00b0, iwx_2ax_cfg_so_gf4_a0_long),
10098 	IWX_DEV_INFO(0x7a70, 0x0310, iwx_2ax_cfg_so_gf_a0_long),
10099 	IWX_DEV_INFO(0x7a70, 0x0510, iwx_2ax_cfg_so_gf_a0_long),
10100 	IWX_DEV_INFO(0x7a70, 0x0a10, iwx_2ax_cfg_so_gf_a0_long),
10101 	IWX_DEV_INFO(0x7af0, 0x0090, iwx_2ax_cfg_so_gf_a0),
10102 	IWX_DEV_INFO(0x7af0, 0x0098, iwx_2ax_cfg_so_gf_a0),
10103 	IWX_DEV_INFO(0x7af0, 0x00b0, iwx_2ax_cfg_so_gf4_a0),
10104 	IWX_DEV_INFO(0x7a70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
10105 	IWX_DEV_INFO(0x7a70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
10106 	IWX_DEV_INFO(0x7af0, 0x0310, iwx_2ax_cfg_so_gf_a0),
10107 	IWX_DEV_INFO(0x7af0, 0x0510, iwx_2ax_cfg_so_gf_a0),
10108 	IWX_DEV_INFO(0x7af0, 0x0a10, iwx_2ax_cfg_so_gf_a0),
10109 	IWX_DEV_INFO(0x7f70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
10110 	IWX_DEV_INFO(0x7f70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
10111 
10112 	/* So with GF2 */
10113 	IWX_DEV_INFO(0x2726, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10114 	IWX_DEV_INFO(0x2726, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10115 	IWX_DEV_INFO(0x51f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10116 	IWX_DEV_INFO(0x51f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10117 	IWX_DEV_INFO(0x54f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10118 	IWX_DEV_INFO(0x54f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10119 	IWX_DEV_INFO(0x7a70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10120 	IWX_DEV_INFO(0x7a70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10121 	IWX_DEV_INFO(0x7af0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10122 	IWX_DEV_INFO(0x7af0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10123 	IWX_DEV_INFO(0x7f70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10124 	IWX_DEV_INFO(0x7f70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10125 
10126 	/* Qu with Jf, C step */
10127 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10128 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10129 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
10130 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10131 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9461_160 */
10132 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10133 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10134 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
10135 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10136 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* iwl9461 */
10137 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10138 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10139 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10140 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10141 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462_160 */
10142 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10143 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10144 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10145 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10146 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462 */
10147 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10148 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10149 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10150 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10151 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560_160 */
10152 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10153 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10154 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10155 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10156 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560 */
10157 	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
10158 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10159 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10160 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10161 		      IWX_CFG_ANY,
10162 		      iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550s */
10163 	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
10164 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10165 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10166 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10167 		      IWX_CFG_ANY,
10168 		      iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550i */
10169 
10170 	/* QuZ with Jf */
10171 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10172 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10173 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10174 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10175 		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461_160 */
10176 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10177 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10178 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10179 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10180 		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461 */
10181 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10182 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10183 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10184 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10185 		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462_160 */
10186 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10187 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10188 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10189 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10190 		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462 */
10191 	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
10192 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10193 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10194 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10195 		      IWX_CFG_ANY,
10196 		      iwx_9560_quz_a0_jf_b0_cfg), /* killer_1550s */
10197 	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
10198 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10199 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10200 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10201 		      IWX_CFG_ANY,
10202 		      iwx_9560_quz_a0_jf_b0_cfg), /* 9560_killer_1550i */
10203 
10204 	/* Qu with Hr, B step */
10205 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10206 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
10207 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
10208 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10209 		      iwx_qu_b0_hr1_b0), /* AX101 */
10210 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10211 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
10212 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10213 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10214 		      iwx_qu_b0_hr_b0), /* AX203 */
10215 
10216 	/* Qu with Hr, C step */
10217 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10218 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10219 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
10220 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10221 		      iwx_qu_c0_hr1_b0), /* AX101 */
10222 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10223 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10224 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10225 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10226 		      iwx_qu_c0_hr_b0), /* AX203 */
10227 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10228 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10229 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10230 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10231 		      iwx_qu_c0_hr_b0), /* AX201 */
10232 
10233 	/* QuZ with Hr */
10234 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10235 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10236 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
10237 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10238 		      iwx_quz_a0_hr1_b0), /* AX101 */
10239 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10240 		      IWX_CFG_MAC_TYPE_QUZ, IWX_SILICON_B_STEP,
10241 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10242 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10243 		      iwx_cfg_quz_a0_hr_b0), /* AX203 */
10244 
10245 	/* SoF with JF2 */
10246 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10247 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10248 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10249 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10250 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
10251 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10252 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10253 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10254 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10255 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
10256 
10257 	/* SoF with JF */
10258 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10259 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10260 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
10261 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10262 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
10263 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10264 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10265 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10266 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10267 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
10268 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10269 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10270 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
10271 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10272 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_name */
10273 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10274 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10275 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10276 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10277 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
10278 
10279 	/* So with Hr */
10280 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10281 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10282 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10283 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10284 		      iwx_cfg_so_a0_hr_b0), /* AX203 */
10285 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10286 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10287 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
10288 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10289 		      iwx_cfg_so_a0_hr_b0), /* ax101 */
10290 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10291 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10292 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10293 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10294 		      iwx_cfg_so_a0_hr_b0), /* ax201 */
10295 
10296 	/* So-F with Hr */
10297 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10298 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10299 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10300 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10301 		      iwx_cfg_so_a0_hr_b0), /* AX203 */
10302 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10303 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10304 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
10305 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10306 		      iwx_cfg_so_a0_hr_b0), /* AX101 */
10307 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10308 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10309 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10310 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10311 		      iwx_cfg_so_a0_hr_b0), /* AX201 */
10312 
10313 	/* So-F with GF */
10314 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10315 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10316 		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
10317 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10318 		      iwx_2ax_cfg_so_gf_a0), /* AX211 */
10319 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10320 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10321 		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
10322 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
10323 		      iwx_2ax_cfg_so_gf4_a0), /* AX411 */
10324 
10325 	/* So with GF */
10326 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10327 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10328 		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
10329 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10330 		      iwx_2ax_cfg_so_gf_a0), /* AX211 */
10331 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10332 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10333 		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
10334 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
10335 		      iwx_2ax_cfg_so_gf4_a0), /* AX411 */
10336 
10337 	/* So with JF2 */
10338 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10339 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10340 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10341 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10342 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
10343 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10344 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10345 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10346 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10347 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
10348 
10349 	/* So with JF */
10350 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10351 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10352 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
10353 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10354 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
10355 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10356 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10357 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10358 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10359 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
10360 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10361 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10362 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
10363 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10364 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* iwl9461 */
10365 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10366 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10367 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10368 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10369 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
10370 };
10371 
10372 int
10373 iwx_preinit(struct iwx_softc *sc)
10374 {
10375 	struct ieee80211com *ic = &sc->sc_ic;
10376 	struct ifnet *ifp = IC2IFP(ic);
10377 	int err;
10378 
10379 	err = iwx_prepare_card_hw(sc);
10380 	if (err) {
10381 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
10382 		return err;
10383 	}
10384 
10385 	if (sc->attached) {
10386 		/* Update MAC in case the upper layers changed it. */
10387 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
10388 		    ((struct arpcom *)ifp)->ac_enaddr);
10389 		return 0;
10390 	}
10391 
10392 	err = iwx_start_hw(sc);
10393 	if (err) {
10394 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
10395 		return err;
10396 	}
10397 
10398 	err = iwx_run_init_mvm_ucode(sc, 1);
10399 	iwx_stop_device(sc);
10400 	if (err)
10401 		return err;
10402 
10403 	/* Print version info and MAC address on first successful fw load. */
10404 	sc->attached = 1;
10405 	if (sc->sc_pnvm_ver) {
10406 		printf("%s: hw rev 0x%x, fw %s, pnvm %08x, "
10407 		    "address %s\n",
10408 		    DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
10409 		    sc->sc_fwver, sc->sc_pnvm_ver,
10410 		    ether_sprintf(sc->sc_nvm.hw_addr));
10411 	} else {
10412 		printf("%s: hw rev 0x%x, fw %s, address %s\n",
10413 		    DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
10414 		    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
10415 	}
10416 
10417 	if (sc->sc_nvm.sku_cap_11n_enable)
10418 		iwx_setup_ht_rates(sc);
10419 	if (sc->sc_nvm.sku_cap_11ac_enable)
10420 		iwx_setup_vht_rates(sc);
10421 
10422 	/* not all hardware can do 5GHz band */
10423 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
10424 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
10425 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
10426 
10427 	/* Configure channel information obtained from firmware. */
10428 	ieee80211_channel_init(ifp);
10429 
10430 	/* Configure MAC address. */
10431 	err = if_setlladdr(ifp, ic->ic_myaddr);
10432 	if (err)
10433 		printf("%s: could not set MAC address (error %d)\n",
10434 		    DEVNAME(sc), err);
10435 
10436 	ieee80211_media_init(ifp, iwx_media_change, ieee80211_media_status);
10437 
10438 	return 0;
10439 }
10440 
10441 void
10442 iwx_attach_hook(struct device *self)
10443 {
10444 	struct iwx_softc *sc = (void *)self;
10445 
10446 	KASSERT(!cold);
10447 
10448 	iwx_preinit(sc);
10449 }
10450 
10451 const struct iwx_device_cfg *
10452 iwx_find_device_cfg(struct iwx_softc *sc)
10453 {
10454 	pcireg_t sreg;
10455 	pci_product_id_t sdev_id;
10456 	uint16_t mac_type, rf_type;
10457 	uint8_t mac_step, cdb, jacket, rf_id, no_160, cores;
10458 	int i;
10459 
10460 	sreg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_SUBSYS_ID_REG);
10461 	sdev_id = PCI_PRODUCT(sreg);
10462 	mac_type = IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev);
10463 	mac_step = IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2);
10464 	rf_type = IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id);
10465 	cdb = IWX_CSR_HW_RFID_IS_CDB(sc->sc_hw_rf_id);
10466 	jacket = IWX_CSR_HW_RFID_IS_JACKET(sc->sc_hw_rf_id);
10467 
10468 	rf_id = IWX_SUBDEVICE_RF_ID(sdev_id);
10469 	no_160 = IWX_SUBDEVICE_NO_160(sdev_id);
10470 	cores = IWX_SUBDEVICE_CORES(sdev_id);
10471 
10472 	for (i = nitems(iwx_dev_info_table) - 1; i >= 0; i--) {
10473 		 const struct iwx_dev_info *dev_info = &iwx_dev_info_table[i];
10474 
10475 		if (dev_info->device != (uint16_t)IWX_CFG_ANY &&
10476 		    dev_info->device != sc->sc_pid)
10477 			continue;
10478 
10479 		if (dev_info->subdevice != (uint16_t)IWX_CFG_ANY &&
10480 		    dev_info->subdevice != sdev_id)
10481 			continue;
10482 
10483 		if (dev_info->mac_type != (uint16_t)IWX_CFG_ANY &&
10484 		    dev_info->mac_type != mac_type)
10485 			continue;
10486 
10487 		if (dev_info->mac_step != (uint8_t)IWX_CFG_ANY &&
10488 		    dev_info->mac_step != mac_step)
10489 			continue;
10490 
10491 		if (dev_info->rf_type != (uint16_t)IWX_CFG_ANY &&
10492 		    dev_info->rf_type != rf_type)
10493 			continue;
10494 
10495 		if (dev_info->cdb != (uint8_t)IWX_CFG_ANY &&
10496 		    dev_info->cdb != cdb)
10497 			continue;
10498 
10499 		if (dev_info->jacket != (uint8_t)IWX_CFG_ANY &&
10500 		    dev_info->jacket != jacket)
10501 			continue;
10502 
10503 		if (dev_info->rf_id != (uint8_t)IWX_CFG_ANY &&
10504 		    dev_info->rf_id != rf_id)
10505 			continue;
10506 
10507 		if (dev_info->no_160 != (uint8_t)IWX_CFG_ANY &&
10508 		    dev_info->no_160 != no_160)
10509 			continue;
10510 
10511 		if (dev_info->cores != (uint8_t)IWX_CFG_ANY &&
10512 		    dev_info->cores != cores)
10513 			continue;
10514 
10515 		return dev_info->cfg;
10516 	}
10517 
10518 	return NULL;
10519 }
10520 
10521 
10522 void
10523 iwx_attach(struct device *parent, struct device *self, void *aux)
10524 {
10525 	struct iwx_softc *sc = (void *)self;
10526 	struct pci_attach_args *pa = aux;
10527 	pci_intr_handle_t ih;
10528 	pcireg_t reg, memtype;
10529 	struct ieee80211com *ic = &sc->sc_ic;
10530 	struct ifnet *ifp = &ic->ic_if;
10531 	const char *intrstr;
10532 	const struct iwx_device_cfg *cfg;
10533 	int err;
10534 	int txq_i, i, j;
10535 	size_t ctxt_info_size;
10536 
10537 	sc->sc_pid = PCI_PRODUCT(pa->pa_id);
10538 	sc->sc_pct = pa->pa_pc;
10539 	sc->sc_pcitag = pa->pa_tag;
10540 	sc->sc_dmat = pa->pa_dmat;
10541 
10542 	rw_init(&sc->ioctl_rwl, "iwxioctl");
10543 
10544 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
10545 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
10546 	if (err == 0) {
10547 		printf("%s: PCIe capability structure not found!\n",
10548 		    DEVNAME(sc));
10549 		return;
10550 	}
10551 
10552 	/*
10553 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
10554 	 * PCI Tx retries from interfering with C3 CPU state.
10555 	 */
10556 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
10557 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
10558 
10559 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
10560 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
10561 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
10562 	if (err) {
10563 		printf("%s: can't map mem space\n", DEVNAME(sc));
10564 		return;
10565 	}
10566 
10567 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
10568 		sc->sc_msix = 1;
10569 	} else if (pci_intr_map_msi(pa, &ih)) {
10570 		if (pci_intr_map(pa, &ih)) {
10571 			printf("%s: can't map interrupt\n", DEVNAME(sc));
10572 			return;
10573 		}
10574 		/* Hardware bug workaround. */
10575 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
10576 		    PCI_COMMAND_STATUS_REG);
10577 		if (reg & PCI_COMMAND_INTERRUPT_DISABLE)
10578 			reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
10579 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
10580 		    PCI_COMMAND_STATUS_REG, reg);
10581 	}
10582 
10583 	intrstr = pci_intr_string(sc->sc_pct, ih);
10584 	if (sc->sc_msix)
10585 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
10586 		    iwx_intr_msix, sc, DEVNAME(sc));
10587 	else
10588 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
10589 		    iwx_intr, sc, DEVNAME(sc));
10590 
10591 	if (sc->sc_ih == NULL) {
10592 		printf("\n");
10593 		printf("%s: can't establish interrupt", DEVNAME(sc));
10594 		if (intrstr != NULL)
10595 			printf(" at %s", intrstr);
10596 		printf("\n");
10597 		return;
10598 	}
10599 	printf(", %s\n", intrstr);
10600 
10601 	/* Clear pending interrupts. */
10602 	IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
10603 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
10604 	IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
10605 
10606 	sc->sc_hw_rev = IWX_READ(sc, IWX_CSR_HW_REV);
10607 	sc->sc_hw_rf_id = IWX_READ(sc, IWX_CSR_HW_RF_ID);
10608 
10609 	/*
10610 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
10611 	 * changed, and now the revision step also includes bit 0-1 (no more
10612 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
10613 	 * in the old format.
10614 	 */
10615 	sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
10616 			(IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
10617 
10618 	switch (PCI_PRODUCT(pa->pa_id)) {
10619 	case PCI_PRODUCT_INTEL_WL_22500_1:
10620 		sc->sc_fwname = IWX_CC_A_FW;
10621 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10622 		sc->sc_integrated = 0;
10623 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
10624 		sc->sc_low_latency_xtal = 0;
10625 		sc->sc_xtal_latency = 0;
10626 		sc->sc_tx_with_siso_diversity = 0;
10627 		sc->sc_uhb_supported = 0;
10628 		break;
10629 	case PCI_PRODUCT_INTEL_WL_22500_2:
10630 	case PCI_PRODUCT_INTEL_WL_22500_5:
10631 		/* These devices should be QuZ only. */
10632 		if (sc->sc_hw_rev != IWX_CSR_HW_REV_TYPE_QUZ) {
10633 			printf("%s: unsupported AX201 adapter\n", DEVNAME(sc));
10634 			return;
10635 		}
10636 		sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10637 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10638 		sc->sc_integrated = 1;
10639 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
10640 		sc->sc_low_latency_xtal = 0;
10641 		sc->sc_xtal_latency = 500;
10642 		sc->sc_tx_with_siso_diversity = 0;
10643 		sc->sc_uhb_supported = 0;
10644 		break;
10645 	case PCI_PRODUCT_INTEL_WL_22500_3:
10646 		if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
10647 			sc->sc_fwname = IWX_QU_C_HR_B_FW;
10648 		else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
10649 			sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10650 		else
10651 			sc->sc_fwname = IWX_QU_B_HR_B_FW;
10652 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10653 		sc->sc_integrated = 1;
10654 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
10655 		sc->sc_low_latency_xtal = 0;
10656 		sc->sc_xtal_latency = 500;
10657 		sc->sc_tx_with_siso_diversity = 0;
10658 		sc->sc_uhb_supported = 0;
10659 		break;
10660 	case PCI_PRODUCT_INTEL_WL_22500_4:
10661 	case PCI_PRODUCT_INTEL_WL_22500_7:
10662 	case PCI_PRODUCT_INTEL_WL_22500_8:
10663 		if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
10664 			sc->sc_fwname = IWX_QU_C_HR_B_FW;
10665 		else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
10666 			sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10667 		else
10668 			sc->sc_fwname = IWX_QU_B_HR_B_FW;
10669 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10670 		sc->sc_integrated = 1;
10671 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_1820;
10672 		sc->sc_low_latency_xtal = 0;
10673 		sc->sc_xtal_latency = 1820;
10674 		sc->sc_tx_with_siso_diversity = 0;
10675 		sc->sc_uhb_supported = 0;
10676 		break;
10677 	case PCI_PRODUCT_INTEL_WL_22500_6:
10678 		if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
10679 			sc->sc_fwname = IWX_QU_C_HR_B_FW;
10680 		else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
10681 			sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10682 		else
10683 			sc->sc_fwname = IWX_QU_B_HR_B_FW;
10684 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10685 		sc->sc_integrated = 1;
10686 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
10687 		sc->sc_low_latency_xtal = 1;
10688 		sc->sc_xtal_latency = 12000;
10689 		sc->sc_tx_with_siso_diversity = 0;
10690 		sc->sc_uhb_supported = 0;
10691 		break;
10692 	case PCI_PRODUCT_INTEL_WL_22500_9:
10693 	case PCI_PRODUCT_INTEL_WL_22500_10:
10694 	case PCI_PRODUCT_INTEL_WL_22500_11:
10695 	case PCI_PRODUCT_INTEL_WL_22500_12:
10696 	case PCI_PRODUCT_INTEL_WL_22500_13:
10697 	/* _14 is an MA device, not yet supported */
10698 	case PCI_PRODUCT_INTEL_WL_22500_15:
10699 	case PCI_PRODUCT_INTEL_WL_22500_16:
10700 		sc->sc_fwname = IWX_SO_A_GF_A_FW;
10701 		sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM;
10702 		sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
10703 		sc->sc_integrated = 0;
10704 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
10705 		sc->sc_low_latency_xtal = 0;
10706 		sc->sc_xtal_latency = 0;
10707 		sc->sc_tx_with_siso_diversity = 0;
10708 		sc->sc_uhb_supported = 1;
10709 		break;
10710 	default:
10711 		printf("%s: unknown adapter type\n", DEVNAME(sc));
10712 		return;
10713 	}
10714 
10715 	cfg = iwx_find_device_cfg(sc);
10716 	if (cfg) {
10717 		sc->sc_fwname = cfg->fw_name;
10718 		sc->sc_pnvm_name = cfg->pnvm_name;
10719 		sc->sc_tx_with_siso_diversity = cfg->tx_with_siso_diversity;
10720 		sc->sc_uhb_supported = cfg->uhb_supported;
10721 		if (cfg->xtal_latency) {
10722 			sc->sc_xtal_latency = cfg->xtal_latency;
10723 			sc->sc_low_latency_xtal = cfg->low_latency_xtal;
10724 		}
10725 	}
10726 
10727 	sc->mac_addr_from_csr = 0x380; /* differs on BZ hw generation */
10728 
10729 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
10730 		sc->sc_umac_prph_offset = 0x300000;
10731 		sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX_GEN3;
10732 	} else
10733 		sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX;
10734 
10735 	/* Allocate DMA memory for loading firmware. */
10736 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
10737 		ctxt_info_size = sizeof(struct iwx_context_info_gen3);
10738 	else
10739 		ctxt_info_size = sizeof(struct iwx_context_info);
10740 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ctxt_info_dma,
10741 	    ctxt_info_size, 0);
10742 	if (err) {
10743 		printf("%s: could not allocate memory for loading firmware\n",
10744 		    DEVNAME(sc));
10745 		return;
10746 	}
10747 
10748 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
10749 		err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_scratch_dma,
10750 		    sizeof(struct iwx_prph_scratch), 0);
10751 		if (err) {
10752 			printf("%s: could not allocate prph scratch memory\n",
10753 			    DEVNAME(sc));
10754 			goto fail1;
10755 		}
10756 
10757 		/*
10758 		 * Allocate prph information. The driver doesn't use this.
10759 		 * We use the second half of this page to give the device
10760 		 * some dummy TR/CR tail pointers - which shouldn't be
10761 		 * necessary as we don't use this, but the hardware still
10762 		 * reads/writes there and we can't let it go do that with
10763 		 * a NULL pointer.
10764 		 */
10765 		KASSERT(sizeof(struct iwx_prph_info) < PAGE_SIZE / 2);
10766 		err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_info_dma,
10767 		    PAGE_SIZE, 0);
10768 		if (err) {
10769 			printf("%s: could not allocate prph info memory\n",
10770 			    DEVNAME(sc));
10771 			goto fail1;
10772 		}
10773 	}
10774 
10775 	/* Allocate interrupt cause table (ICT).*/
10776 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
10777 	    IWX_ICT_SIZE, 1<<IWX_ICT_PADDR_SHIFT);
10778 	if (err) {
10779 		printf("%s: could not allocate ICT table\n", DEVNAME(sc));
10780 		goto fail1;
10781 	}
10782 
10783 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
10784 		err = iwx_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
10785 		if (err) {
10786 			printf("%s: could not allocate TX ring %d\n",
10787 			    DEVNAME(sc), txq_i);
10788 			goto fail4;
10789 		}
10790 	}
10791 
10792 	err = iwx_alloc_rx_ring(sc, &sc->rxq);
10793 	if (err) {
10794 		printf("%s: could not allocate RX ring\n", DEVNAME(sc));
10795 		goto fail4;
10796 	}
10797 
10798 	sc->sc_nswq = taskq_create("iwxns", 1, IPL_NET, 0);
10799 	if (sc->sc_nswq == NULL)
10800 		goto fail4;
10801 
10802 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
10803 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
10804 	ic->ic_state = IEEE80211_S_INIT;
10805 
10806 	/* Set device capabilities. */
10807 	ic->ic_caps =
10808 	    IEEE80211_C_QOS | IEEE80211_C_TX_AMPDU | /* A-MPDU */
10809 	    IEEE80211_C_ADDBA_OFFLOAD | /* device sends ADDBA/DELBA frames */
10810 	    IEEE80211_C_WEP |		/* WEP */
10811 	    IEEE80211_C_RSN |		/* WPA/RSN */
10812 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
10813 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
10814 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
10815 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
10816 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
10817 
10818 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20 | IEEE80211_HTCAP_SGI40;
10819 	ic->ic_htcaps |= IEEE80211_HTCAP_CBW20_40;
10820 	ic->ic_htcaps |=
10821 	    (IEEE80211_HTCAP_SMPS_DIS << IEEE80211_HTCAP_SMPS_SHIFT);
10822 	ic->ic_htxcaps = 0;
10823 	ic->ic_txbfcaps = 0;
10824 	ic->ic_aselcaps = 0;
10825 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
10826 
10827 	ic->ic_vhtcaps = IEEE80211_VHTCAP_MAX_MPDU_LENGTH_3895 |
10828 	    (IEEE80211_VHTCAP_MAX_AMPDU_LEN_64K <<
10829 	    IEEE80211_VHTCAP_MAX_AMPDU_LEN_SHIFT) |
10830 	    (IEEE80211_VHTCAP_CHAN_WIDTH_80 <<
10831 	     IEEE80211_VHTCAP_CHAN_WIDTH_SHIFT) | IEEE80211_VHTCAP_SGI80 |
10832 	    IEEE80211_VHTCAP_RX_ANT_PATTERN | IEEE80211_VHTCAP_TX_ANT_PATTERN;
10833 
10834 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
10835 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
10836 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
10837 
10838 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
10839 		sc->sc_phyctxt[i].id = i;
10840 		sc->sc_phyctxt[i].sco = IEEE80211_HTOP0_SCO_SCN;
10841 		sc->sc_phyctxt[i].vht_chan_width =
10842 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT;
10843 	}
10844 
10845 	/* IBSS channel undefined for now. */
10846 	ic->ic_ibss_chan = &ic->ic_channels[1];
10847 
10848 	ic->ic_max_rssi = IWX_MAX_DBM - IWX_MIN_DBM;
10849 
10850 	ifp->if_softc = sc;
10851 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
10852 	ifp->if_ioctl = iwx_ioctl;
10853 	ifp->if_start = iwx_start;
10854 	ifp->if_watchdog = iwx_watchdog;
10855 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
10856 
10857 	if_attach(ifp);
10858 	ieee80211_ifattach(ifp);
10859 	ieee80211_media_init(ifp, iwx_media_change, ieee80211_media_status);
10860 
10861 #if NBPFILTER > 0
10862 	iwx_radiotap_attach(sc);
10863 #endif
10864 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
10865 		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
10866 		rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
10867 		rxba->sc = sc;
10868 		timeout_set(&rxba->session_timer, iwx_rx_ba_session_expired,
10869 		    rxba);
10870 		timeout_set(&rxba->reorder_buf.reorder_timer,
10871 		    iwx_reorder_timer_expired, &rxba->reorder_buf);
10872 		for (j = 0; j < nitems(rxba->entries); j++)
10873 			ml_init(&rxba->entries[j].frames);
10874 	}
10875 	task_set(&sc->init_task, iwx_init_task, sc);
10876 	task_set(&sc->newstate_task, iwx_newstate_task, sc);
10877 	task_set(&sc->ba_task, iwx_ba_task, sc);
10878 	task_set(&sc->setkey_task, iwx_setkey_task, sc);
10879 	task_set(&sc->mac_ctxt_task, iwx_mac_ctxt_task, sc);
10880 	task_set(&sc->phy_ctxt_task, iwx_phy_ctxt_task, sc);
10881 	task_set(&sc->bgscan_done_task, iwx_bgscan_done_task, sc);
10882 
10883 	ic->ic_node_alloc = iwx_node_alloc;
10884 	ic->ic_bgscan_start = iwx_bgscan;
10885 	ic->ic_bgscan_done = iwx_bgscan_done;
10886 	ic->ic_set_key = iwx_set_key;
10887 	ic->ic_delete_key = iwx_delete_key;
10888 
10889 	/* Override 802.11 state transition machine. */
10890 	sc->sc_newstate = ic->ic_newstate;
10891 	ic->ic_newstate = iwx_newstate;
10892 	ic->ic_updateprot = iwx_updateprot;
10893 	ic->ic_updateslot = iwx_updateslot;
10894 	ic->ic_updateedca = iwx_updateedca;
10895 	ic->ic_updatedtim = iwx_updatedtim;
10896 	ic->ic_ampdu_rx_start = iwx_ampdu_rx_start;
10897 	ic->ic_ampdu_rx_stop = iwx_ampdu_rx_stop;
10898 	ic->ic_ampdu_tx_start = iwx_ampdu_tx_start;
10899 	ic->ic_ampdu_tx_stop = NULL;
10900 	/*
10901 	 * We cannot read the MAC address without loading the
10902 	 * firmware from disk. Postpone until mountroot is done.
10903 	 */
10904 	config_mountroot(self, iwx_attach_hook);
10905 
10906 	return;
10907 
10908 fail4:	while (--txq_i >= 0)
10909 		iwx_free_tx_ring(sc, &sc->txq[txq_i]);
10910 	iwx_free_rx_ring(sc, &sc->rxq);
10911 	if (sc->ict_dma.vaddr != NULL)
10912 		iwx_dma_contig_free(&sc->ict_dma);
10913 
10914 fail1:	iwx_dma_contig_free(&sc->ctxt_info_dma);
10915 	iwx_dma_contig_free(&sc->prph_scratch_dma);
10916 	iwx_dma_contig_free(&sc->prph_info_dma);
10917 	return;
10918 }
10919 
10920 #if NBPFILTER > 0
10921 void
10922 iwx_radiotap_attach(struct iwx_softc *sc)
10923 {
10924 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
10925 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
10926 
10927 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
10928 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
10929 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWX_RX_RADIOTAP_PRESENT);
10930 
10931 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
10932 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
10933 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWX_TX_RADIOTAP_PRESENT);
10934 }
10935 #endif
10936 
10937 void
10938 iwx_init_task(void *arg1)
10939 {
10940 	struct iwx_softc *sc = arg1;
10941 	struct ifnet *ifp = &sc->sc_ic.ic_if;
10942 	int s = splnet();
10943 	int generation = sc->sc_generation;
10944 	int fatal = (sc->sc_flags & (IWX_FLAG_HW_ERR | IWX_FLAG_RFKILL));
10945 
10946 	rw_enter_write(&sc->ioctl_rwl);
10947 	if (generation != sc->sc_generation) {
10948 		rw_exit(&sc->ioctl_rwl);
10949 		splx(s);
10950 		return;
10951 	}
10952 
10953 	if (ifp->if_flags & IFF_RUNNING)
10954 		iwx_stop(ifp);
10955 	else
10956 		sc->sc_flags &= ~IWX_FLAG_HW_ERR;
10957 
10958 	if (!fatal && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
10959 		iwx_init(ifp);
10960 
10961 	rw_exit(&sc->ioctl_rwl);
10962 	splx(s);
10963 }
10964 
10965 void
10966 iwx_resume(struct iwx_softc *sc)
10967 {
10968 	pcireg_t reg;
10969 
10970 	/*
10971 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
10972 	 * PCI Tx retries from interfering with C3 CPU state.
10973 	 */
10974 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
10975 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
10976 
10977 	if (!sc->sc_msix) {
10978 		/* Hardware bug workaround. */
10979 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
10980 		    PCI_COMMAND_STATUS_REG);
10981 		if (reg & PCI_COMMAND_INTERRUPT_DISABLE)
10982 			reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
10983 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
10984 		    PCI_COMMAND_STATUS_REG, reg);
10985 	}
10986 
10987 	iwx_disable_interrupts(sc);
10988 }
10989 
10990 int
10991 iwx_wakeup(struct iwx_softc *sc)
10992 {
10993 	struct ieee80211com *ic = &sc->sc_ic;
10994 	struct ifnet *ifp = &sc->sc_ic.ic_if;
10995 	int err;
10996 
10997 	err = iwx_start_hw(sc);
10998 	if (err)
10999 		return err;
11000 
11001 	err = iwx_init_hw(sc);
11002 	if (err)
11003 		return err;
11004 
11005 	refcnt_init(&sc->task_refs);
11006 	ifq_clr_oactive(&ifp->if_snd);
11007 	ifp->if_flags |= IFF_RUNNING;
11008 
11009 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
11010 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
11011 	else
11012 		ieee80211_begin_scan(ifp);
11013 
11014 	return 0;
11015 }
11016 
11017 int
11018 iwx_activate(struct device *self, int act)
11019 {
11020 	struct iwx_softc *sc = (struct iwx_softc *)self;
11021 	struct ifnet *ifp = &sc->sc_ic.ic_if;
11022 	int err = 0;
11023 
11024 	switch (act) {
11025 	case DVACT_QUIESCE:
11026 		if (ifp->if_flags & IFF_RUNNING) {
11027 			rw_enter_write(&sc->ioctl_rwl);
11028 			iwx_stop(ifp);
11029 			rw_exit(&sc->ioctl_rwl);
11030 		}
11031 		break;
11032 	case DVACT_RESUME:
11033 		iwx_resume(sc);
11034 		break;
11035 	case DVACT_WAKEUP:
11036 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP) {
11037 			err = iwx_wakeup(sc);
11038 			if (err)
11039 				printf("%s: could not initialize hardware\n",
11040 				    DEVNAME(sc));
11041 		}
11042 		break;
11043 	}
11044 
11045 	return 0;
11046 }
11047 
11048 struct cfdriver iwx_cd = {
11049 	NULL, "iwx", DV_IFNET
11050 };
11051 
11052 const struct cfattach iwx_ca = {
11053 	sizeof(struct iwx_softc), iwx_match, iwx_attach,
11054 	NULL, iwx_activate
11055 };
11056