xref: /openbsd-src/sys/dev/pci/if_iwx.c (revision c1a45aed656e7d5627c30c92421893a76f370ccb)
1 /*	$OpenBSD: if_iwx.c,v 1.142 2022/04/16 16:21:50 stsp Exp $	*/
2 
3 /*
4  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5  *   Author: Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2014 Fixup Software Ltd.
7  * Copyright (c) 2017, 2019, 2020 Stefan Sperling <stsp@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ******************************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2017 Intel Deutschland GmbH
34  * Copyright(c) 2018 - 2019 Intel Corporation
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * BSD LICENSE
46  *
47  * Copyright(c) 2017 Intel Deutschland GmbH
48  * Copyright(c) 2018 - 2019 Intel Corporation
49  * All rights reserved.
50  *
51  * Redistribution and use in source and binary forms, with or without
52  * modification, are permitted provided that the following conditions
53  * are met:
54  *
55  *  * Redistributions of source code must retain the above copyright
56  *    notice, this list of conditions and the following disclaimer.
57  *  * Redistributions in binary form must reproduce the above copyright
58  *    notice, this list of conditions and the following disclaimer in
59  *    the documentation and/or other materials provided with the
60  *    distribution.
61  *  * Neither the name Intel Corporation nor the names of its
62  *    contributors may be used to endorse or promote products derived
63  *    from this software without specific prior written permission.
64  *
65  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
66  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
67  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
68  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
69  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
70  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
71  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
72  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
73  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
74  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
75  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
76  *
77  *****************************************************************************
78  */
79 
80 /*-
81  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
82  *
83  * Permission to use, copy, modify, and distribute this software for any
84  * purpose with or without fee is hereby granted, provided that the above
85  * copyright notice and this permission notice appear in all copies.
86  *
87  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
88  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
89  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
90  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
91  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
92  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
93  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
94  */
95 
96 #include "bpfilter.h"
97 
98 #include <sys/param.h>
99 #include <sys/conf.h>
100 #include <sys/kernel.h>
101 #include <sys/malloc.h>
102 #include <sys/mbuf.h>
103 #include <sys/mutex.h>
104 #include <sys/proc.h>
105 #include <sys/rwlock.h>
106 #include <sys/socket.h>
107 #include <sys/sockio.h>
108 #include <sys/systm.h>
109 #include <sys/endian.h>
110 
111 #include <sys/refcnt.h>
112 #include <sys/task.h>
113 #include <machine/bus.h>
114 #include <machine/intr.h>
115 
116 #include <dev/pci/pcireg.h>
117 #include <dev/pci/pcivar.h>
118 #include <dev/pci/pcidevs.h>
119 
120 #if NBPFILTER > 0
121 #include <net/bpf.h>
122 #endif
123 #include <net/if.h>
124 #include <net/if_dl.h>
125 #include <net/if_media.h>
126 
127 #include <netinet/in.h>
128 #include <netinet/if_ether.h>
129 
130 #include <net80211/ieee80211_var.h>
131 #include <net80211/ieee80211_radiotap.h>
132 #include <net80211/ieee80211_priv.h> /* for SEQ_LT */
133 #undef DPRINTF /* defined in ieee80211_priv.h */
134 
135 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
136 
137 #define IC2IFP(_ic_) (&(_ic_)->ic_if)
138 
139 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
140 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
141 
142 #ifdef IWX_DEBUG
143 #define DPRINTF(x)	do { if (iwx_debug > 0) printf x; } while (0)
144 #define DPRINTFN(n, x)	do { if (iwx_debug >= (n)) printf x; } while (0)
145 int iwx_debug = 1;
146 #else
147 #define DPRINTF(x)	do { ; } while (0)
148 #define DPRINTFN(n, x)	do { ; } while (0)
149 #endif
150 
151 #include <dev/pci/if_iwxreg.h>
152 #include <dev/pci/if_iwxvar.h>
153 
154 const uint8_t iwx_nvm_channels_8000[] = {
155 	/* 2.4 GHz */
156 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
157 	/* 5 GHz */
158 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
159 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
160 	149, 153, 157, 161, 165, 169, 173, 177, 181
161 };
162 
163 static const uint8_t iwx_nvm_channels_uhb[] = {
164 	/* 2.4 GHz */
165 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
166 	/* 5 GHz */
167 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
168 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
169 	149, 153, 157, 161, 165, 169, 173, 177, 181,
170 	/* 6-7 GHz */
171 	1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69,
172 	73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129,
173 	133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185,
174 	189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233
175 };
176 
177 #define IWX_NUM_2GHZ_CHANNELS	14
178 
179 const struct iwx_rate {
180 	uint16_t rate;
181 	uint8_t plcp;
182 	uint8_t ht_plcp;
183 } iwx_rates[] = {
184 		/* Legacy */		/* HT */
185 	{   2,	IWX_RATE_1M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
186 	{   4,	IWX_RATE_2M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP },
187 	{  11,	IWX_RATE_5M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
188 	{  22,	IWX_RATE_11M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP },
189 	{  12,	IWX_RATE_6M_PLCP,	IWX_RATE_HT_SISO_MCS_0_PLCP },
190 	{  18,	IWX_RATE_9M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
191 	{  24,	IWX_RATE_12M_PLCP,	IWX_RATE_HT_SISO_MCS_1_PLCP },
192 	{  26,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_8_PLCP },
193 	{  36,	IWX_RATE_18M_PLCP,	IWX_RATE_HT_SISO_MCS_2_PLCP },
194 	{  48,	IWX_RATE_24M_PLCP,	IWX_RATE_HT_SISO_MCS_3_PLCP },
195 	{  52,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_9_PLCP },
196 	{  72,	IWX_RATE_36M_PLCP,	IWX_RATE_HT_SISO_MCS_4_PLCP },
197 	{  78,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_10_PLCP },
198 	{  96,	IWX_RATE_48M_PLCP,	IWX_RATE_HT_SISO_MCS_5_PLCP },
199 	{ 104,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_11_PLCP },
200 	{ 108,	IWX_RATE_54M_PLCP,	IWX_RATE_HT_SISO_MCS_6_PLCP },
201 	{ 128,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_SISO_MCS_7_PLCP },
202 	{ 156,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_12_PLCP },
203 	{ 208,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_13_PLCP },
204 	{ 234,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_14_PLCP },
205 	{ 260,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_15_PLCP },
206 };
207 #define IWX_RIDX_CCK	0
208 #define IWX_RIDX_OFDM	4
209 #define IWX_RIDX_MAX	(nitems(iwx_rates)-1)
210 #define IWX_RIDX_IS_CCK(_i_) ((_i_) < IWX_RIDX_OFDM)
211 #define IWX_RIDX_IS_OFDM(_i_) ((_i_) >= IWX_RIDX_OFDM)
212 #define IWX_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
213 
214 /* Convert an MCS index into an iwx_rates[] index. */
215 const int iwx_mcs2ridx[] = {
216 	IWX_RATE_MCS_0_INDEX,
217 	IWX_RATE_MCS_1_INDEX,
218 	IWX_RATE_MCS_2_INDEX,
219 	IWX_RATE_MCS_3_INDEX,
220 	IWX_RATE_MCS_4_INDEX,
221 	IWX_RATE_MCS_5_INDEX,
222 	IWX_RATE_MCS_6_INDEX,
223 	IWX_RATE_MCS_7_INDEX,
224 	IWX_RATE_MCS_8_INDEX,
225 	IWX_RATE_MCS_9_INDEX,
226 	IWX_RATE_MCS_10_INDEX,
227 	IWX_RATE_MCS_11_INDEX,
228 	IWX_RATE_MCS_12_INDEX,
229 	IWX_RATE_MCS_13_INDEX,
230 	IWX_RATE_MCS_14_INDEX,
231 	IWX_RATE_MCS_15_INDEX,
232 };
233 
234 uint8_t	iwx_lookup_cmd_ver(struct iwx_softc *, uint8_t, uint8_t);
235 uint8_t	iwx_lookup_notif_ver(struct iwx_softc *, uint8_t, uint8_t);
236 int	iwx_is_mimo_ht_plcp(uint8_t);
237 int	iwx_is_mimo_mcs(int);
238 int	iwx_store_cscheme(struct iwx_softc *, uint8_t *, size_t);
239 int	iwx_alloc_fw_monitor_block(struct iwx_softc *, uint8_t, uint8_t);
240 int	iwx_alloc_fw_monitor(struct iwx_softc *, uint8_t);
241 int	iwx_apply_debug_destination(struct iwx_softc *);
242 int	iwx_ctxt_info_init(struct iwx_softc *, const struct iwx_fw_sects *);
243 void	iwx_ctxt_info_free_fw_img(struct iwx_softc *);
244 void	iwx_ctxt_info_free_paging(struct iwx_softc *);
245 int	iwx_init_fw_sec(struct iwx_softc *, const struct iwx_fw_sects *,
246 	    struct iwx_context_info_dram *);
247 void	iwx_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
248 int	iwx_firmware_store_section(struct iwx_softc *, enum iwx_ucode_type,
249 	    uint8_t *, size_t);
250 int	iwx_set_default_calib(struct iwx_softc *, const void *);
251 void	iwx_fw_info_free(struct iwx_fw_info *);
252 int	iwx_read_firmware(struct iwx_softc *);
253 uint32_t iwx_read_prph_unlocked(struct iwx_softc *, uint32_t);
254 uint32_t iwx_read_prph(struct iwx_softc *, uint32_t);
255 void	iwx_write_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t);
256 void	iwx_write_prph(struct iwx_softc *, uint32_t, uint32_t);
257 int	iwx_read_mem(struct iwx_softc *, uint32_t, void *, int);
258 int	iwx_write_mem(struct iwx_softc *, uint32_t, const void *, int);
259 int	iwx_write_mem32(struct iwx_softc *, uint32_t, uint32_t);
260 int	iwx_poll_bit(struct iwx_softc *, int, uint32_t, uint32_t, int);
261 int	iwx_nic_lock(struct iwx_softc *);
262 void	iwx_nic_assert_locked(struct iwx_softc *);
263 void	iwx_nic_unlock(struct iwx_softc *);
264 int	iwx_set_bits_mask_prph(struct iwx_softc *, uint32_t, uint32_t,
265 	    uint32_t);
266 int	iwx_set_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
267 int	iwx_clear_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
268 int	iwx_dma_contig_alloc(bus_dma_tag_t, struct iwx_dma_info *, bus_size_t,
269 	    bus_size_t);
270 void	iwx_dma_contig_free(struct iwx_dma_info *);
271 int	iwx_alloc_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
272 void	iwx_disable_rx_dma(struct iwx_softc *);
273 void	iwx_reset_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
274 void	iwx_free_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
275 int	iwx_alloc_tx_ring(struct iwx_softc *, struct iwx_tx_ring *, int);
276 void	iwx_reset_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
277 void	iwx_free_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
278 void	iwx_enable_rfkill_int(struct iwx_softc *);
279 int	iwx_check_rfkill(struct iwx_softc *);
280 void	iwx_enable_interrupts(struct iwx_softc *);
281 void	iwx_enable_fwload_interrupt(struct iwx_softc *);
282 void	iwx_restore_interrupts(struct iwx_softc *);
283 void	iwx_disable_interrupts(struct iwx_softc *);
284 void	iwx_ict_reset(struct iwx_softc *);
285 int	iwx_set_hw_ready(struct iwx_softc *);
286 int	iwx_prepare_card_hw(struct iwx_softc *);
287 int	iwx_force_power_gating(struct iwx_softc *);
288 void	iwx_apm_config(struct iwx_softc *);
289 int	iwx_apm_init(struct iwx_softc *);
290 void	iwx_apm_stop(struct iwx_softc *);
291 int	iwx_allow_mcast(struct iwx_softc *);
292 void	iwx_init_msix_hw(struct iwx_softc *);
293 void	iwx_conf_msix_hw(struct iwx_softc *, int);
294 int	iwx_clear_persistence_bit(struct iwx_softc *);
295 int	iwx_start_hw(struct iwx_softc *);
296 void	iwx_stop_device(struct iwx_softc *);
297 void	iwx_nic_config(struct iwx_softc *);
298 int	iwx_nic_rx_init(struct iwx_softc *);
299 int	iwx_nic_init(struct iwx_softc *);
300 int	iwx_enable_txq(struct iwx_softc *, int, int, int, int);
301 int	iwx_disable_txq(struct iwx_softc *sc, int, int, uint8_t);
302 void	iwx_post_alive(struct iwx_softc *);
303 int	iwx_schedule_session_protection(struct iwx_softc *, struct iwx_node *,
304 	    uint32_t);
305 void	iwx_init_channel_map(struct iwx_softc *, uint16_t *, uint32_t *, int);
306 void	iwx_setup_ht_rates(struct iwx_softc *);
307 void	iwx_setup_vht_rates(struct iwx_softc *);
308 int	iwx_mimo_enabled(struct iwx_softc *);
309 void	iwx_mac_ctxt_task(void *);
310 void	iwx_phy_ctxt_task(void *);
311 void	iwx_updatechan(struct ieee80211com *);
312 void	iwx_updateprot(struct ieee80211com *);
313 void	iwx_updateslot(struct ieee80211com *);
314 void	iwx_updateedca(struct ieee80211com *);
315 void	iwx_updatedtim(struct ieee80211com *);
316 void	iwx_init_reorder_buffer(struct iwx_reorder_buffer *, uint16_t,
317 	    uint16_t);
318 void	iwx_clear_reorder_buffer(struct iwx_softc *, struct iwx_rxba_data *);
319 int	iwx_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
320 	    uint8_t);
321 void	iwx_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
322 	    uint8_t);
323 int	iwx_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
324 	    uint8_t);
325 void	iwx_rx_ba_session_expired(void *);
326 void	iwx_rx_bar_frame_release(struct iwx_softc *, struct iwx_rx_packet *,
327 	    struct mbuf_list *);
328 void	iwx_reorder_timer_expired(void *);
329 void	iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t,
330 	    uint16_t, uint16_t, int, int);
331 void	iwx_sta_tx_agg_start(struct iwx_softc *, struct ieee80211_node *,
332 	    uint8_t);
333 void	iwx_ba_task(void *);
334 
335 int	iwx_set_mac_addr_from_csr(struct iwx_softc *, struct iwx_nvm_data *);
336 int	iwx_is_valid_mac_addr(const uint8_t *);
337 int	iwx_nvm_get(struct iwx_softc *);
338 int	iwx_load_firmware(struct iwx_softc *);
339 int	iwx_start_fw(struct iwx_softc *);
340 int	iwx_send_tx_ant_cfg(struct iwx_softc *, uint8_t);
341 int	iwx_send_phy_cfg_cmd(struct iwx_softc *);
342 int	iwx_load_ucode_wait_alive(struct iwx_softc *);
343 int	iwx_send_dqa_cmd(struct iwx_softc *);
344 int	iwx_run_init_mvm_ucode(struct iwx_softc *, int);
345 int	iwx_config_ltr(struct iwx_softc *);
346 void	iwx_update_rx_desc(struct iwx_softc *, struct iwx_rx_ring *, int);
347 int	iwx_rx_addbuf(struct iwx_softc *, int, int);
348 int	iwx_rxmq_get_signal_strength(struct iwx_softc *, struct iwx_rx_mpdu_desc *);
349 void	iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *,
350 	    struct iwx_rx_data *);
351 int	iwx_get_noise(const struct iwx_statistics_rx_non_phy *);
352 int	iwx_rx_hwdecrypt(struct iwx_softc *, struct mbuf *, uint32_t,
353 	    struct ieee80211_rxinfo *);
354 int	iwx_ccmp_decap(struct iwx_softc *, struct mbuf *,
355 	    struct ieee80211_node *, struct ieee80211_rxinfo *);
356 void	iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, uint32_t, int, int,
357 	    uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
358 void	iwx_clear_tx_desc(struct iwx_softc *, struct iwx_tx_ring *, int);
359 void	iwx_txd_done(struct iwx_softc *, struct iwx_tx_data *);
360 void	iwx_txq_advance(struct iwx_softc *, struct iwx_tx_ring *, int);
361 void	iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *,
362 	    struct iwx_rx_data *);
363 void	iwx_clear_oactive(struct iwx_softc *, struct iwx_tx_ring *);
364 void	iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *,
365 	    struct iwx_rx_data *);
366 int	iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t);
367 uint8_t	iwx_get_vht_ctrl_pos(struct ieee80211com *, struct ieee80211_channel *);
368 int	iwx_phy_ctxt_cmd_uhb_v3(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
369 	    uint8_t, uint32_t, uint8_t, uint8_t);
370 int	iwx_phy_ctxt_cmd_v3(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
371 	    uint8_t, uint32_t, uint8_t, uint8_t);
372 int	iwx_phy_ctxt_cmd(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
373 	    uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
374 int	iwx_send_cmd(struct iwx_softc *, struct iwx_host_cmd *);
375 int	iwx_send_cmd_pdu(struct iwx_softc *, uint32_t, uint32_t, uint16_t,
376 	    const void *);
377 int	iwx_send_cmd_status(struct iwx_softc *, struct iwx_host_cmd *,
378 	    uint32_t *);
379 int	iwx_send_cmd_pdu_status(struct iwx_softc *, uint32_t, uint16_t,
380 	    const void *, uint32_t *);
381 void	iwx_free_resp(struct iwx_softc *, struct iwx_host_cmd *);
382 void	iwx_cmd_done(struct iwx_softc *, int, int, int);
383 const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *, struct iwx_node *,
384 	    struct ieee80211_frame *, struct iwx_tx_cmd_gen2 *);
385 void	iwx_tx_update_byte_tbl(struct iwx_tx_ring *, int, uint16_t, uint16_t);
386 int	iwx_tx(struct iwx_softc *, struct mbuf *, struct ieee80211_node *);
387 int	iwx_flush_sta_tids(struct iwx_softc *, int, uint16_t);
388 int	iwx_wait_tx_queues_empty(struct iwx_softc *);
389 int	iwx_drain_sta(struct iwx_softc *sc, struct iwx_node *, int);
390 int	iwx_flush_sta(struct iwx_softc *, struct iwx_node *);
391 int	iwx_beacon_filter_send_cmd(struct iwx_softc *,
392 	    struct iwx_beacon_filter_cmd *);
393 int	iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *, int);
394 void	iwx_power_build_cmd(struct iwx_softc *, struct iwx_node *,
395 	    struct iwx_mac_power_cmd *);
396 int	iwx_power_mac_update_mode(struct iwx_softc *, struct iwx_node *);
397 int	iwx_power_update_device(struct iwx_softc *);
398 int	iwx_enable_beacon_filter(struct iwx_softc *, struct iwx_node *);
399 int	iwx_disable_beacon_filter(struct iwx_softc *);
400 int	iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int);
401 int	iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *);
402 int	iwx_rm_sta(struct iwx_softc *, struct iwx_node *);
403 int	iwx_fill_probe_req(struct iwx_softc *, struct iwx_scan_probe_req *);
404 int	iwx_config_umac_scan_reduced(struct iwx_softc *);
405 uint16_t iwx_scan_umac_flags_v2(struct iwx_softc *, int);
406 void	iwx_scan_umac_dwell_v10(struct iwx_softc *,
407 	    struct iwx_scan_general_params_v10 *, int);
408 void	iwx_scan_umac_fill_general_p_v10(struct iwx_softc *,
409 	    struct iwx_scan_general_params_v10 *, uint16_t, int);
410 void	iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *,
411 	    struct iwx_scan_channel_params_v6 *, uint32_t, int, int);
412 int	iwx_umac_scan_v14(struct iwx_softc *, int);
413 void	iwx_mcc_update(struct iwx_softc *, struct iwx_mcc_chub_notif *);
414 uint8_t	iwx_ridx2rate(struct ieee80211_rateset *, int);
415 int	iwx_rval2ridx(int);
416 void	iwx_ack_rates(struct iwx_softc *, struct iwx_node *, int *, int *);
417 void	iwx_mac_ctxt_cmd_common(struct iwx_softc *, struct iwx_node *,
418 	    struct iwx_mac_ctx_cmd *, uint32_t);
419 void	iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *, struct iwx_node *,
420 	    struct iwx_mac_data_sta *, int);
421 int	iwx_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *, uint32_t, int);
422 int	iwx_clear_statistics(struct iwx_softc *);
423 void	iwx_add_task(struct iwx_softc *, struct taskq *, struct task *);
424 void	iwx_del_task(struct iwx_softc *, struct taskq *, struct task *);
425 int	iwx_scan(struct iwx_softc *);
426 int	iwx_bgscan(struct ieee80211com *);
427 void	iwx_bgscan_done(struct ieee80211com *,
428 	    struct ieee80211_node_switch_bss_arg *, size_t);
429 void	iwx_bgscan_done_task(void *);
430 int	iwx_umac_scan_abort(struct iwx_softc *);
431 int	iwx_scan_abort(struct iwx_softc *);
432 int	iwx_enable_mgmt_queue(struct iwx_softc *);
433 int	iwx_rs_rval2idx(uint8_t);
434 uint16_t iwx_rs_ht_rates(struct iwx_softc *, struct ieee80211_node *, int);
435 uint16_t iwx_rs_vht_rates(struct iwx_softc *, struct ieee80211_node *, int);
436 int	iwx_rs_init(struct iwx_softc *, struct iwx_node *);
437 int	iwx_enable_data_tx_queues(struct iwx_softc *);
438 int	iwx_phy_ctxt_update(struct iwx_softc *, struct iwx_phy_ctxt *,
439 	    struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t,
440 	    uint8_t);
441 int	iwx_auth(struct iwx_softc *);
442 int	iwx_deauth(struct iwx_softc *);
443 int	iwx_run(struct iwx_softc *);
444 int	iwx_run_stop(struct iwx_softc *);
445 struct ieee80211_node *iwx_node_alloc(struct ieee80211com *);
446 int	iwx_set_key(struct ieee80211com *, struct ieee80211_node *,
447 	    struct ieee80211_key *);
448 void	iwx_setkey_task(void *);
449 void	iwx_delete_key(struct ieee80211com *,
450 	    struct ieee80211_node *, struct ieee80211_key *);
451 int	iwx_media_change(struct ifnet *);
452 void	iwx_newstate_task(void *);
453 int	iwx_newstate(struct ieee80211com *, enum ieee80211_state, int);
454 void	iwx_endscan(struct iwx_softc *);
455 void	iwx_fill_sf_command(struct iwx_softc *, struct iwx_sf_cfg_cmd *,
456 	    struct ieee80211_node *);
457 int	iwx_sf_config(struct iwx_softc *, int);
458 int	iwx_send_bt_init_conf(struct iwx_softc *);
459 int	iwx_send_soc_conf(struct iwx_softc *);
460 int	iwx_send_update_mcc_cmd(struct iwx_softc *, const char *);
461 int	iwx_send_temp_report_ths_cmd(struct iwx_softc *);
462 int	iwx_init_hw(struct iwx_softc *);
463 int	iwx_init(struct ifnet *);
464 void	iwx_start(struct ifnet *);
465 void	iwx_stop(struct ifnet *);
466 void	iwx_watchdog(struct ifnet *);
467 int	iwx_ioctl(struct ifnet *, u_long, caddr_t);
468 const char *iwx_desc_lookup(uint32_t);
469 void	iwx_nic_error(struct iwx_softc *);
470 void	iwx_dump_driver_status(struct iwx_softc *);
471 void	iwx_nic_umac_error(struct iwx_softc *);
472 int	iwx_detect_duplicate(struct iwx_softc *, struct mbuf *,
473 	    struct iwx_rx_mpdu_desc *, struct ieee80211_rxinfo *);
474 int	iwx_is_sn_less(uint16_t, uint16_t, uint16_t);
475 void	iwx_release_frames(struct iwx_softc *, struct ieee80211_node *,
476 	    struct iwx_rxba_data *, struct iwx_reorder_buffer *, uint16_t,
477 	    struct mbuf_list *);
478 int	iwx_oldsn_workaround(struct iwx_softc *, struct ieee80211_node *,
479 	    int, struct iwx_reorder_buffer *, uint32_t, uint32_t);
480 int	iwx_rx_reorder(struct iwx_softc *, struct mbuf *, int,
481 	    struct iwx_rx_mpdu_desc *, int, int, uint32_t,
482 	    struct ieee80211_rxinfo *, struct mbuf_list *);
483 void	iwx_rx_mpdu_mq(struct iwx_softc *, struct mbuf *, void *, size_t,
484 	    struct mbuf_list *);
485 int	iwx_rx_pkt_valid(struct iwx_rx_packet *);
486 void	iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *,
487 	    struct mbuf_list *);
488 void	iwx_notif_intr(struct iwx_softc *);
489 int	iwx_intr(void *);
490 int	iwx_intr_msix(void *);
491 int	iwx_match(struct device *, void *, void *);
492 int	iwx_preinit(struct iwx_softc *);
493 void	iwx_attach_hook(struct device *);
494 const struct iwx_device_cfg *iwx_find_device_cfg(struct iwx_softc *);
495 void	iwx_attach(struct device *, struct device *, void *);
496 void	iwx_init_task(void *);
497 int	iwx_activate(struct device *, int);
498 void	iwx_resume(struct iwx_softc *);
499 int	iwx_wakeup(struct iwx_softc *);
500 
501 #if NBPFILTER > 0
502 void	iwx_radiotap_attach(struct iwx_softc *);
503 #endif
504 
505 uint8_t
506 iwx_lookup_cmd_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
507 {
508 	const struct iwx_fw_cmd_version *entry;
509 	int i;
510 
511 	for (i = 0; i < sc->n_cmd_versions; i++) {
512 		entry = &sc->cmd_versions[i];
513 		if (entry->group == grp && entry->cmd == cmd)
514 			return entry->cmd_ver;
515 	}
516 
517 	return IWX_FW_CMD_VER_UNKNOWN;
518 }
519 
520 uint8_t
521 iwx_lookup_notif_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
522 {
523 	const struct iwx_fw_cmd_version *entry;
524 	int i;
525 
526 	for (i = 0; i < sc->n_cmd_versions; i++) {
527 		entry = &sc->cmd_versions[i];
528 		if (entry->group == grp && entry->cmd == cmd)
529 			return entry->notif_ver;
530 	}
531 
532 	return IWX_FW_CMD_VER_UNKNOWN;
533 }
534 
535 int
536 iwx_is_mimo_ht_plcp(uint8_t ht_plcp)
537 {
538 	return (ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP &&
539 	    (ht_plcp & IWX_RATE_HT_MCS_NSS_MSK));
540 }
541 
542 int
543 iwx_is_mimo_mcs(int mcs)
544 {
545 	int ridx = iwx_mcs2ridx[mcs];
546 	return iwx_is_mimo_ht_plcp(iwx_rates[ridx].ht_plcp);
547 
548 }
549 
550 int
551 iwx_store_cscheme(struct iwx_softc *sc, uint8_t *data, size_t dlen)
552 {
553 	struct iwx_fw_cscheme_list *l = (void *)data;
554 
555 	if (dlen < sizeof(*l) ||
556 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
557 		return EINVAL;
558 
559 	/* we don't actually store anything for now, always use s/w crypto */
560 
561 	return 0;
562 }
563 
564 int
565 iwx_ctxt_info_alloc_dma(struct iwx_softc *sc,
566     const struct iwx_fw_onesect *sec, struct iwx_dma_info *dram)
567 {
568 	int err = iwx_dma_contig_alloc(sc->sc_dmat, dram, sec->fws_len, 0);
569 	if (err) {
570 		printf("%s: could not allocate context info DMA memory\n",
571 		    DEVNAME(sc));
572 		return err;
573 	}
574 
575 	memcpy(dram->vaddr, sec->fws_data, sec->fws_len);
576 
577 	return 0;
578 }
579 
580 void iwx_ctxt_info_free_paging(struct iwx_softc *sc)
581 {
582 	struct iwx_self_init_dram *dram = &sc->init_dram;
583 	int i;
584 
585 	if (!dram->paging)
586 		return;
587 
588 	/* free paging*/
589 	for (i = 0; i < dram->paging_cnt; i++)
590 		iwx_dma_contig_free(&dram->paging[i]);
591 
592 	free(dram->paging, M_DEVBUF, dram->paging_cnt * sizeof(*dram->paging));
593 	dram->paging_cnt = 0;
594 	dram->paging = NULL;
595 }
596 
597 int
598 iwx_get_num_sections(const struct iwx_fw_sects *fws, int start)
599 {
600 	int i = 0;
601 
602 	while (start < fws->fw_count &&
603 	       fws->fw_sect[start].fws_devoff != IWX_CPU1_CPU2_SEPARATOR_SECTION &&
604 	       fws->fw_sect[start].fws_devoff != IWX_PAGING_SEPARATOR_SECTION) {
605 		start++;
606 		i++;
607 	}
608 
609 	return i;
610 }
611 
612 int
613 iwx_init_fw_sec(struct iwx_softc *sc, const struct iwx_fw_sects *fws,
614     struct iwx_context_info_dram *ctxt_dram)
615 {
616 	struct iwx_self_init_dram *dram = &sc->init_dram;
617 	int i, ret, fw_cnt = 0;
618 
619 	KASSERT(dram->paging == NULL);
620 
621 	dram->lmac_cnt = iwx_get_num_sections(fws, 0);
622 	/* add 1 due to separator */
623 	dram->umac_cnt = iwx_get_num_sections(fws, dram->lmac_cnt + 1);
624 	/* add 2 due to separators */
625 	dram->paging_cnt = iwx_get_num_sections(fws,
626 	    dram->lmac_cnt + dram->umac_cnt + 2);
627 
628 	dram->fw = mallocarray(dram->umac_cnt + dram->lmac_cnt,
629 	    sizeof(*dram->fw), M_DEVBUF,  M_ZERO | M_NOWAIT);
630 	if (!dram->fw) {
631 		printf("%s: could not allocate memory for firmware sections\n",
632 		    DEVNAME(sc));
633 		return ENOMEM;
634 	}
635 
636 	dram->paging = mallocarray(dram->paging_cnt, sizeof(*dram->paging),
637 	    M_DEVBUF, M_ZERO | M_NOWAIT);
638 	if (!dram->paging) {
639 		printf("%s: could not allocate memory for firmware paging\n",
640 		    DEVNAME(sc));
641 		return ENOMEM;
642 	}
643 
644 	/* initialize lmac sections */
645 	for (i = 0; i < dram->lmac_cnt; i++) {
646 		ret = iwx_ctxt_info_alloc_dma(sc, &fws->fw_sect[i],
647 						   &dram->fw[fw_cnt]);
648 		if (ret)
649 			return ret;
650 		ctxt_dram->lmac_img[i] =
651 			htole64(dram->fw[fw_cnt].paddr);
652 		DPRINTF(("%s: firmware LMAC section %d at 0x%llx size %lld\n", __func__, i,
653 		    (unsigned long long)dram->fw[fw_cnt].paddr,
654 		    (unsigned long long)dram->fw[fw_cnt].size));
655 		fw_cnt++;
656 	}
657 
658 	/* initialize umac sections */
659 	for (i = 0; i < dram->umac_cnt; i++) {
660 		/* access FW with +1 to make up for lmac separator */
661 		ret = iwx_ctxt_info_alloc_dma(sc,
662 		    &fws->fw_sect[fw_cnt + 1], &dram->fw[fw_cnt]);
663 		if (ret)
664 			return ret;
665 		ctxt_dram->umac_img[i] =
666 			htole64(dram->fw[fw_cnt].paddr);
667 		DPRINTF(("%s: firmware UMAC section %d at 0x%llx size %lld\n", __func__, i,
668 			(unsigned long long)dram->fw[fw_cnt].paddr,
669 			(unsigned long long)dram->fw[fw_cnt].size));
670 		fw_cnt++;
671 	}
672 
673 	/*
674 	 * Initialize paging.
675 	 * Paging memory isn't stored in dram->fw as the umac and lmac - it is
676 	 * stored separately.
677 	 * This is since the timing of its release is different -
678 	 * while fw memory can be released on alive, the paging memory can be
679 	 * freed only when the device goes down.
680 	 * Given that, the logic here in accessing the fw image is a bit
681 	 * different - fw_cnt isn't changing so loop counter is added to it.
682 	 */
683 	for (i = 0; i < dram->paging_cnt; i++) {
684 		/* access FW with +2 to make up for lmac & umac separators */
685 		int fw_idx = fw_cnt + i + 2;
686 
687 		ret = iwx_ctxt_info_alloc_dma(sc,
688 		    &fws->fw_sect[fw_idx], &dram->paging[i]);
689 		if (ret)
690 			return ret;
691 
692 		ctxt_dram->virtual_img[i] = htole64(dram->paging[i].paddr);
693 		DPRINTF(("%s: firmware paging section %d at 0x%llx size %lld\n", __func__, i,
694 		    (unsigned long long)dram->paging[i].paddr,
695 		    (unsigned long long)dram->paging[i].size));
696 	}
697 
698 	return 0;
699 }
700 
701 void
702 iwx_fw_version_str(char *buf, size_t bufsize,
703     uint32_t major, uint32_t minor, uint32_t api)
704 {
705 	/*
706 	 * Starting with major version 35 the Linux driver prints the minor
707 	 * version in hexadecimal.
708 	 */
709 	if (major >= 35)
710 		snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
711 	else
712 		snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
713 }
714 
715 int
716 iwx_alloc_fw_monitor_block(struct iwx_softc *sc, uint8_t max_power,
717     uint8_t min_power)
718 {
719 	struct iwx_dma_info *fw_mon = &sc->fw_mon;
720 	uint32_t size = 0;
721 	uint8_t power;
722 	int err;
723 
724 	if (fw_mon->size)
725 		return 0;
726 
727 	for (power = max_power; power >= min_power; power--) {
728 		size = (1 << power);
729 
730 		err = iwx_dma_contig_alloc(sc->sc_dmat, fw_mon, size, 0);
731 		if (err)
732 			continue;
733 
734 		DPRINTF(("%s: allocated 0x%08x bytes for firmware monitor.\n",
735 			 DEVNAME(sc), size));
736 		break;
737 	}
738 
739 	if (err) {
740 		fw_mon->size = 0;
741 		return err;
742 	}
743 
744 	if (power != max_power)
745 		DPRINTF(("%s: Sorry - debug buffer is only %luK while you requested %luK\n",
746 			DEVNAME(sc), (unsigned long)(1 << (power - 10)),
747 			(unsigned long)(1 << (max_power - 10))));
748 
749 	return 0;
750 }
751 
752 int
753 iwx_alloc_fw_monitor(struct iwx_softc *sc, uint8_t max_power)
754 {
755 	if (!max_power) {
756 		/* default max_power is maximum */
757 		max_power = 26;
758 	} else {
759 		max_power += 11;
760 	}
761 
762 	if (max_power > 26) {
763 		 DPRINTF(("%s: External buffer size for monitor is too big %d, "
764 		     "check the FW TLV\n", DEVNAME(sc), max_power));
765 		return 0;
766 	}
767 
768 	if (sc->fw_mon.size)
769 		return 0;
770 
771 	return iwx_alloc_fw_monitor_block(sc, max_power, 11);
772 }
773 
774 int
775 iwx_apply_debug_destination(struct iwx_softc *sc)
776 {
777 	struct iwx_fw_dbg_dest_tlv_v1 *dest_v1;
778 	int i, err;
779 	uint8_t mon_mode, size_power, base_shift, end_shift;
780 	uint32_t base_reg, end_reg;
781 
782 	dest_v1 = sc->sc_fw.dbg_dest_tlv_v1;
783 	mon_mode = dest_v1->monitor_mode;
784 	size_power = dest_v1->size_power;
785 	base_reg = le32toh(dest_v1->base_reg);
786 	end_reg = le32toh(dest_v1->end_reg);
787 	base_shift = dest_v1->base_shift;
788 	end_shift = dest_v1->end_shift;
789 
790 	DPRINTF(("%s: applying debug destination %d\n", DEVNAME(sc), mon_mode));
791 
792 	if (mon_mode == EXTERNAL_MODE) {
793 		err = iwx_alloc_fw_monitor(sc, size_power);
794 		if (err)
795 			return err;
796 	}
797 
798 	if (!iwx_nic_lock(sc))
799 		return EBUSY;
800 
801 	for (i = 0; i < sc->sc_fw.n_dest_reg; i++) {
802 		uint32_t addr, val;
803 		uint8_t op;
804 
805 		addr = le32toh(dest_v1->reg_ops[i].addr);
806 		val = le32toh(dest_v1->reg_ops[i].val);
807 		op = dest_v1->reg_ops[i].op;
808 
809 		DPRINTF(("%s: op=%u addr=%u val=%u\n", __func__, op, addr, val));
810 		switch (op) {
811 		case CSR_ASSIGN:
812 			IWX_WRITE(sc, addr, val);
813 			break;
814 		case CSR_SETBIT:
815 			IWX_SETBITS(sc, addr, (1 << val));
816 			break;
817 		case CSR_CLEARBIT:
818 			IWX_CLRBITS(sc, addr, (1 << val));
819 			break;
820 		case PRPH_ASSIGN:
821 			iwx_write_prph(sc, addr, val);
822 			break;
823 		case PRPH_SETBIT:
824 			err = iwx_set_bits_prph(sc, addr, (1 << val));
825 			if (err)
826 				return err;
827 			break;
828 		case PRPH_CLEARBIT:
829 			err = iwx_clear_bits_prph(sc, addr, (1 << val));
830 			if (err)
831 				return err;
832 			break;
833 		case PRPH_BLOCKBIT:
834 			if (iwx_read_prph(sc, addr) & (1 << val))
835 				goto monitor;
836 			break;
837 		default:
838 			DPRINTF(("%s: FW debug - unknown OP %d\n",
839 			    DEVNAME(sc), op));
840 			break;
841 		}
842 	}
843 
844 monitor:
845 	if (mon_mode == EXTERNAL_MODE && sc->fw_mon.size) {
846 		iwx_write_prph(sc, le32toh(base_reg),
847 		    sc->fw_mon.paddr >> base_shift);
848 		iwx_write_prph(sc, end_reg,
849 		    (sc->fw_mon.paddr + sc->fw_mon.size - 256)
850 		    >> end_shift);
851 	}
852 
853 	iwx_nic_unlock(sc);
854 	return 0;
855 }
856 
857 int
858 iwx_ctxt_info_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
859 {
860 	struct iwx_context_info *ctxt_info;
861 	struct iwx_context_info_rbd_cfg *rx_cfg;
862 	uint32_t control_flags = 0, rb_size;
863 	uint64_t paddr;
864 	int err;
865 
866 	ctxt_info = sc->ctxt_info_dma.vaddr;
867 
868 	ctxt_info->version.version = 0;
869 	ctxt_info->version.mac_id =
870 		htole16((uint16_t)IWX_READ(sc, IWX_CSR_HW_REV));
871 	/* size is in DWs */
872 	ctxt_info->version.size = htole16(sizeof(*ctxt_info) / 4);
873 
874 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
875 		rb_size = IWX_CTXT_INFO_RB_SIZE_2K;
876 	else
877 		rb_size = IWX_CTXT_INFO_RB_SIZE_4K;
878 
879 	KASSERT(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF);
880 	control_flags = IWX_CTXT_INFO_TFD_FORMAT_LONG |
881 			(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) <<
882 			 IWX_CTXT_INFO_RB_CB_SIZE_POS) |
883 			(rb_size << IWX_CTXT_INFO_RB_SIZE_POS);
884 	ctxt_info->control.control_flags = htole32(control_flags);
885 
886 	/* initialize RX default queue */
887 	rx_cfg = &ctxt_info->rbd_cfg;
888 	rx_cfg->free_rbd_addr = htole64(sc->rxq.free_desc_dma.paddr);
889 	rx_cfg->used_rbd_addr = htole64(sc->rxq.used_desc_dma.paddr);
890 	rx_cfg->status_wr_ptr = htole64(sc->rxq.stat_dma.paddr);
891 
892 	/* initialize TX command queue */
893 	ctxt_info->hcmd_cfg.cmd_queue_addr =
894 		htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
895 	ctxt_info->hcmd_cfg.cmd_queue_size =
896 		IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
897 
898 	/* allocate ucode sections in dram and set addresses */
899 	err = iwx_init_fw_sec(sc, fws, &ctxt_info->dram);
900 	if (err) {
901 		iwx_ctxt_info_free_fw_img(sc);
902 		return err;
903 	}
904 
905 	/* Configure debug, if exists */
906 	if (sc->sc_fw.dbg_dest_tlv_v1) {
907 		err = iwx_apply_debug_destination(sc);
908 		if (err) {
909 			iwx_ctxt_info_free_fw_img(sc);
910 			return err;
911 		}
912 	}
913 
914 	/*
915 	 * Write the context info DMA base address. The device expects a
916 	 * 64-bit address but a simple bus_space_write_8 to this register
917 	 * won't work on some devices, such as the AX201.
918 	 */
919 	paddr = sc->ctxt_info_dma.paddr;
920 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA, paddr & 0xffffffff);
921 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA + 4, paddr >> 32);
922 
923 	/* kick FW self load */
924 	if (!iwx_nic_lock(sc)) {
925 		iwx_ctxt_info_free_fw_img(sc);
926 		return EBUSY;
927 	}
928 	iwx_write_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
929 	iwx_nic_unlock(sc);
930 
931 	/* Context info will be released upon alive or failure to get one */
932 
933 	return 0;
934 }
935 
936 void
937 iwx_ctxt_info_free_fw_img(struct iwx_softc *sc)
938 {
939 	struct iwx_self_init_dram *dram = &sc->init_dram;
940 	int i;
941 
942 	if (!dram->fw)
943 		return;
944 
945 	for (i = 0; i < dram->lmac_cnt + dram->umac_cnt; i++)
946 		iwx_dma_contig_free(&dram->fw[i]);
947 
948 	free(dram->fw, M_DEVBUF,
949 	    (dram->lmac_cnt + dram->umac_cnt) * sizeof(*dram->fw));
950 	dram->lmac_cnt = 0;
951 	dram->umac_cnt = 0;
952 	dram->fw = NULL;
953 }
954 
955 int
956 iwx_firmware_store_section(struct iwx_softc *sc, enum iwx_ucode_type type,
957     uint8_t *data, size_t dlen)
958 {
959 	struct iwx_fw_sects *fws;
960 	struct iwx_fw_onesect *fwone;
961 
962 	if (type >= IWX_UCODE_TYPE_MAX)
963 		return EINVAL;
964 	if (dlen < sizeof(uint32_t))
965 		return EINVAL;
966 
967 	fws = &sc->sc_fw.fw_sects[type];
968 	DPRINTF(("%s: ucode type %d section %d\n", DEVNAME(sc), type, fws->fw_count));
969 	if (fws->fw_count >= IWX_UCODE_SECT_MAX)
970 		return EINVAL;
971 
972 	fwone = &fws->fw_sect[fws->fw_count];
973 
974 	/* first 32bit are device load offset */
975 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
976 
977 	/* rest is data */
978 	fwone->fws_data = data + sizeof(uint32_t);
979 	fwone->fws_len = dlen - sizeof(uint32_t);
980 
981 	fws->fw_count++;
982 	fws->fw_totlen += fwone->fws_len;
983 
984 	return 0;
985 }
986 
987 #define IWX_DEFAULT_SCAN_CHANNELS	40
988 /* Newer firmware might support more channels. Raise this value if needed. */
989 #define IWX_MAX_SCAN_CHANNELS		67 /* as of iwx-cc-a0-62 firmware */
990 
991 struct iwx_tlv_calib_data {
992 	uint32_t ucode_type;
993 	struct iwx_tlv_calib_ctrl calib;
994 } __packed;
995 
996 int
997 iwx_set_default_calib(struct iwx_softc *sc, const void *data)
998 {
999 	const struct iwx_tlv_calib_data *def_calib = data;
1000 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
1001 
1002 	if (ucode_type >= IWX_UCODE_TYPE_MAX)
1003 		return EINVAL;
1004 
1005 	sc->sc_default_calib[ucode_type].flow_trigger =
1006 	    def_calib->calib.flow_trigger;
1007 	sc->sc_default_calib[ucode_type].event_trigger =
1008 	    def_calib->calib.event_trigger;
1009 
1010 	return 0;
1011 }
1012 
1013 void
1014 iwx_fw_info_free(struct iwx_fw_info *fw)
1015 {
1016 	free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
1017 	fw->fw_rawdata = NULL;
1018 	fw->fw_rawsize = 0;
1019 	/* don't touch fw->fw_status */
1020 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
1021 }
1022 
1023 #define IWX_FW_ADDR_CACHE_CONTROL 0xC0000000
1024 
1025 int
1026 iwx_read_firmware(struct iwx_softc *sc)
1027 {
1028 	struct iwx_fw_info *fw = &sc->sc_fw;
1029 	struct iwx_tlv_ucode_header *uhdr;
1030 	struct iwx_ucode_tlv tlv;
1031 	uint32_t tlv_type;
1032 	uint8_t *data;
1033 	int err;
1034 	size_t len;
1035 
1036 	if (fw->fw_status == IWX_FW_STATUS_DONE)
1037 		return 0;
1038 
1039 	while (fw->fw_status == IWX_FW_STATUS_INPROGRESS)
1040 		tsleep_nsec(&sc->sc_fw, 0, "iwxfwp", INFSLP);
1041 	fw->fw_status = IWX_FW_STATUS_INPROGRESS;
1042 
1043 	if (fw->fw_rawdata != NULL)
1044 		iwx_fw_info_free(fw);
1045 
1046 	err = loadfirmware(sc->sc_fwname,
1047 	    (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
1048 	if (err) {
1049 		printf("%s: could not read firmware %s (error %d)\n",
1050 		    DEVNAME(sc), sc->sc_fwname, err);
1051 		goto out;
1052 	}
1053 
1054 	sc->sc_capaflags = 0;
1055 	sc->sc_capa_n_scan_channels = IWX_DEFAULT_SCAN_CHANNELS;
1056 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
1057 	memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api));
1058 	sc->n_cmd_versions = 0;
1059 
1060 	uhdr = (void *)fw->fw_rawdata;
1061 	if (*(uint32_t *)fw->fw_rawdata != 0
1062 	    || le32toh(uhdr->magic) != IWX_TLV_UCODE_MAGIC) {
1063 		printf("%s: invalid firmware %s\n",
1064 		    DEVNAME(sc), sc->sc_fwname);
1065 		err = EINVAL;
1066 		goto out;
1067 	}
1068 
1069 	iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1070 	    IWX_UCODE_MAJOR(le32toh(uhdr->ver)),
1071 	    IWX_UCODE_MINOR(le32toh(uhdr->ver)),
1072 	    IWX_UCODE_API(le32toh(uhdr->ver)));
1073 
1074 	data = uhdr->data;
1075 	len = fw->fw_rawsize - sizeof(*uhdr);
1076 
1077 	while (len >= sizeof(tlv)) {
1078 		size_t tlv_len;
1079 		void *tlv_data;
1080 
1081 		memcpy(&tlv, data, sizeof(tlv));
1082 		tlv_len = le32toh(tlv.length);
1083 		tlv_type = le32toh(tlv.type);
1084 
1085 		len -= sizeof(tlv);
1086 		data += sizeof(tlv);
1087 		tlv_data = data;
1088 
1089 		if (len < tlv_len) {
1090 			printf("%s: firmware too short: %zu bytes\n",
1091 			    DEVNAME(sc), len);
1092 			err = EINVAL;
1093 			goto parse_out;
1094 		}
1095 
1096 		switch (tlv_type) {
1097 		case IWX_UCODE_TLV_PROBE_MAX_LEN:
1098 			if (tlv_len < sizeof(uint32_t)) {
1099 				err = EINVAL;
1100 				goto parse_out;
1101 			}
1102 			sc->sc_capa_max_probe_len
1103 			    = le32toh(*(uint32_t *)tlv_data);
1104 			if (sc->sc_capa_max_probe_len >
1105 			    IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
1106 				err = EINVAL;
1107 				goto parse_out;
1108 			}
1109 			break;
1110 		case IWX_UCODE_TLV_PAN:
1111 			if (tlv_len) {
1112 				err = EINVAL;
1113 				goto parse_out;
1114 			}
1115 			sc->sc_capaflags |= IWX_UCODE_TLV_FLAGS_PAN;
1116 			break;
1117 		case IWX_UCODE_TLV_FLAGS:
1118 			if (tlv_len < sizeof(uint32_t)) {
1119 				err = EINVAL;
1120 				goto parse_out;
1121 			}
1122 			/*
1123 			 * Apparently there can be many flags, but Linux driver
1124 			 * parses only the first one, and so do we.
1125 			 *
1126 			 * XXX: why does this override IWX_UCODE_TLV_PAN?
1127 			 * Intentional or a bug?  Observations from
1128 			 * current firmware file:
1129 			 *  1) TLV_PAN is parsed first
1130 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
1131 			 * ==> this resets TLV_PAN to itself... hnnnk
1132 			 */
1133 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
1134 			break;
1135 		case IWX_UCODE_TLV_CSCHEME:
1136 			err = iwx_store_cscheme(sc, tlv_data, tlv_len);
1137 			if (err)
1138 				goto parse_out;
1139 			break;
1140 		case IWX_UCODE_TLV_NUM_OF_CPU: {
1141 			uint32_t num_cpu;
1142 			if (tlv_len != sizeof(uint32_t)) {
1143 				err = EINVAL;
1144 				goto parse_out;
1145 			}
1146 			num_cpu = le32toh(*(uint32_t *)tlv_data);
1147 			if (num_cpu < 1 || num_cpu > 2) {
1148 				err = EINVAL;
1149 				goto parse_out;
1150 			}
1151 			break;
1152 		}
1153 		case IWX_UCODE_TLV_SEC_RT:
1154 			err = iwx_firmware_store_section(sc,
1155 			    IWX_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
1156 			if (err)
1157 				goto parse_out;
1158 			break;
1159 		case IWX_UCODE_TLV_SEC_INIT:
1160 			err = iwx_firmware_store_section(sc,
1161 			    IWX_UCODE_TYPE_INIT, tlv_data, tlv_len);
1162 			if (err)
1163 				goto parse_out;
1164 			break;
1165 		case IWX_UCODE_TLV_SEC_WOWLAN:
1166 			err = iwx_firmware_store_section(sc,
1167 			    IWX_UCODE_TYPE_WOW, tlv_data, tlv_len);
1168 			if (err)
1169 				goto parse_out;
1170 			break;
1171 		case IWX_UCODE_TLV_DEF_CALIB:
1172 			if (tlv_len != sizeof(struct iwx_tlv_calib_data)) {
1173 				err = EINVAL;
1174 				goto parse_out;
1175 			}
1176 			err = iwx_set_default_calib(sc, tlv_data);
1177 			if (err)
1178 				goto parse_out;
1179 			break;
1180 		case IWX_UCODE_TLV_PHY_SKU:
1181 			if (tlv_len != sizeof(uint32_t)) {
1182 				err = EINVAL;
1183 				goto parse_out;
1184 			}
1185 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
1186 			break;
1187 
1188 		case IWX_UCODE_TLV_API_CHANGES_SET: {
1189 			struct iwx_ucode_api *api;
1190 			int idx, i;
1191 			if (tlv_len != sizeof(*api)) {
1192 				err = EINVAL;
1193 				goto parse_out;
1194 			}
1195 			api = (struct iwx_ucode_api *)tlv_data;
1196 			idx = le32toh(api->api_index);
1197 			if (idx >= howmany(IWX_NUM_UCODE_TLV_API, 32)) {
1198 				err = EINVAL;
1199 				goto parse_out;
1200 			}
1201 			for (i = 0; i < 32; i++) {
1202 				if ((le32toh(api->api_flags) & (1 << i)) == 0)
1203 					continue;
1204 				setbit(sc->sc_ucode_api, i + (32 * idx));
1205 			}
1206 			break;
1207 		}
1208 
1209 		case IWX_UCODE_TLV_ENABLED_CAPABILITIES: {
1210 			struct iwx_ucode_capa *capa;
1211 			int idx, i;
1212 			if (tlv_len != sizeof(*capa)) {
1213 				err = EINVAL;
1214 				goto parse_out;
1215 			}
1216 			capa = (struct iwx_ucode_capa *)tlv_data;
1217 			idx = le32toh(capa->api_index);
1218 			if (idx >= howmany(IWX_NUM_UCODE_TLV_CAPA, 32)) {
1219 				goto parse_out;
1220 			}
1221 			for (i = 0; i < 32; i++) {
1222 				if ((le32toh(capa->api_capa) & (1 << i)) == 0)
1223 					continue;
1224 				setbit(sc->sc_enabled_capa, i + (32 * idx));
1225 			}
1226 			break;
1227 		}
1228 
1229 		case IWX_UCODE_TLV_SDIO_ADMA_ADDR:
1230 		case IWX_UCODE_TLV_FW_GSCAN_CAPA:
1231 			/* ignore, not used by current driver */
1232 			break;
1233 
1234 		case IWX_UCODE_TLV_SEC_RT_USNIFFER:
1235 			err = iwx_firmware_store_section(sc,
1236 			    IWX_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
1237 			    tlv_len);
1238 			if (err)
1239 				goto parse_out;
1240 			break;
1241 
1242 		case IWX_UCODE_TLV_PAGING:
1243 			if (tlv_len != sizeof(uint32_t)) {
1244 				err = EINVAL;
1245 				goto parse_out;
1246 			}
1247 			break;
1248 
1249 		case IWX_UCODE_TLV_N_SCAN_CHANNELS:
1250 			if (tlv_len != sizeof(uint32_t)) {
1251 				err = EINVAL;
1252 				goto parse_out;
1253 			}
1254 			sc->sc_capa_n_scan_channels =
1255 			  le32toh(*(uint32_t *)tlv_data);
1256 			if (sc->sc_capa_n_scan_channels > IWX_MAX_SCAN_CHANNELS) {
1257 				err = ERANGE;
1258 				goto parse_out;
1259 			}
1260 			break;
1261 
1262 		case IWX_UCODE_TLV_FW_VERSION:
1263 			if (tlv_len != sizeof(uint32_t) * 3) {
1264 				err = EINVAL;
1265 				goto parse_out;
1266 			}
1267 
1268 			iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1269 			    le32toh(((uint32_t *)tlv_data)[0]),
1270 			    le32toh(((uint32_t *)tlv_data)[1]),
1271 			    le32toh(((uint32_t *)tlv_data)[2]));
1272 			break;
1273 
1274 		case IWX_UCODE_TLV_FW_DBG_DEST: {
1275 			struct iwx_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
1276 
1277 			fw->dbg_dest_ver = (uint8_t *)tlv_data;
1278 			if (*fw->dbg_dest_ver != 0) {
1279 				err = EINVAL;
1280 				goto parse_out;
1281 			}
1282 
1283 			if (fw->dbg_dest_tlv_init)
1284 				break;
1285 			fw->dbg_dest_tlv_init = true;
1286 
1287 			dest_v1 = (void *)tlv_data;
1288 			fw->dbg_dest_tlv_v1 = dest_v1;
1289 			fw->n_dest_reg = tlv_len -
1290 			    offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops);
1291 			fw->n_dest_reg /= sizeof(dest_v1->reg_ops[0]);
1292 			DPRINTF(("%s: found debug dest; n_dest_reg=%d\n", __func__, fw->n_dest_reg));
1293 			break;
1294 		}
1295 
1296 		case IWX_UCODE_TLV_FW_DBG_CONF: {
1297 			struct iwx_fw_dbg_conf_tlv *conf = (void *)tlv_data;
1298 
1299 			if (!fw->dbg_dest_tlv_init ||
1300 			    conf->id >= nitems(fw->dbg_conf_tlv) ||
1301 			    fw->dbg_conf_tlv[conf->id] != NULL)
1302 				break;
1303 
1304 			DPRINTF(("Found debug configuration: %d\n", conf->id));
1305 			fw->dbg_conf_tlv[conf->id] = conf;
1306 			fw->dbg_conf_tlv_len[conf->id] = tlv_len;
1307 			break;
1308 		}
1309 
1310 		case IWX_UCODE_TLV_UMAC_DEBUG_ADDRS: {
1311 			struct iwx_umac_debug_addrs *dbg_ptrs =
1312 				(void *)tlv_data;
1313 
1314 			if (tlv_len != sizeof(*dbg_ptrs)) {
1315 				err = EINVAL;
1316 				goto parse_out;
1317 			}
1318 			if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1319 				break;
1320 			sc->sc_uc.uc_umac_error_event_table =
1321 				le32toh(dbg_ptrs->error_info_addr) &
1322 				~IWX_FW_ADDR_CACHE_CONTROL;
1323 			sc->sc_uc.error_event_table_tlv_status |=
1324 				IWX_ERROR_EVENT_TABLE_UMAC;
1325 			break;
1326 		}
1327 
1328 		case IWX_UCODE_TLV_LMAC_DEBUG_ADDRS: {
1329 			struct iwx_lmac_debug_addrs *dbg_ptrs =
1330 				(void *)tlv_data;
1331 
1332 			if (tlv_len != sizeof(*dbg_ptrs)) {
1333 				err = EINVAL;
1334 				goto parse_out;
1335 			}
1336 			if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1337 				break;
1338 			sc->sc_uc.uc_lmac_error_event_table[0] =
1339 				le32toh(dbg_ptrs->error_event_table_ptr) &
1340 				~IWX_FW_ADDR_CACHE_CONTROL;
1341 			sc->sc_uc.error_event_table_tlv_status |=
1342 				IWX_ERROR_EVENT_TABLE_LMAC1;
1343 			break;
1344 		}
1345 
1346 		case IWX_UCODE_TLV_FW_MEM_SEG:
1347 			break;
1348 
1349 		case IWX_UCODE_TLV_CMD_VERSIONS:
1350 			if (tlv_len % sizeof(struct iwx_fw_cmd_version)) {
1351 				tlv_len /= sizeof(struct iwx_fw_cmd_version);
1352 				tlv_len *= sizeof(struct iwx_fw_cmd_version);
1353 			}
1354 			if (sc->n_cmd_versions != 0) {
1355 				err = EINVAL;
1356 				goto parse_out;
1357 			}
1358 			if (tlv_len > sizeof(sc->cmd_versions)) {
1359 				err = EINVAL;
1360 				goto parse_out;
1361 			}
1362 			memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
1363 			sc->n_cmd_versions = tlv_len / sizeof(struct iwx_fw_cmd_version);
1364 			break;
1365 
1366 		case IWX_UCODE_TLV_FW_RECOVERY_INFO:
1367 			break;
1368 
1369 		case IWX_UCODE_TLV_FW_FSEQ_VERSION:
1370 		case IWX_UCODE_TLV_PHY_INTEGRATION_VERSION:
1371 		case IWX_UCODE_TLV_FW_NUM_STATIONS:
1372 			break;
1373 
1374 		/* undocumented TLVs found in iwx-cc-a0-46 image */
1375 		case 58:
1376 		case 0x1000003:
1377 		case 0x1000004:
1378 			break;
1379 
1380 		/* undocumented TLVs found in iwx-cc-a0-48 image */
1381 		case 0x1000000:
1382 		case 0x1000002:
1383 			break;
1384 
1385 		case IWX_UCODE_TLV_TYPE_DEBUG_INFO:
1386 		case IWX_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
1387 		case IWX_UCODE_TLV_TYPE_HCMD:
1388 		case IWX_UCODE_TLV_TYPE_REGIONS:
1389 		case IWX_UCODE_TLV_TYPE_TRIGGERS:
1390 		case IWX_UCODE_TLV_TYPE_CONF_SET:
1391 			break;
1392 
1393 		/* undocumented TLV found in iwx-cc-a0-67 image */
1394 		case 0x100000b:
1395 			break;
1396 
1397 		default:
1398 			err = EINVAL;
1399 			goto parse_out;
1400 		}
1401 
1402 		len -= roundup(tlv_len, 4);
1403 		data += roundup(tlv_len, 4);
1404 	}
1405 
1406 	KASSERT(err == 0);
1407 
1408  parse_out:
1409 	if (err) {
1410 		printf("%s: firmware parse error %d, "
1411 		    "section type %d\n", DEVNAME(sc), err, tlv_type);
1412 	}
1413 
1414  out:
1415 	if (err) {
1416 		fw->fw_status = IWX_FW_STATUS_NONE;
1417 		if (fw->fw_rawdata != NULL)
1418 			iwx_fw_info_free(fw);
1419 	} else
1420 		fw->fw_status = IWX_FW_STATUS_DONE;
1421 	wakeup(&sc->sc_fw);
1422 
1423 	return err;
1424 }
1425 
1426 uint32_t
1427 iwx_read_prph_unlocked(struct iwx_softc *sc, uint32_t addr)
1428 {
1429 	IWX_WRITE(sc,
1430 	    IWX_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
1431 	IWX_BARRIER_READ_WRITE(sc);
1432 	return IWX_READ(sc, IWX_HBUS_TARG_PRPH_RDAT);
1433 }
1434 
1435 uint32_t
1436 iwx_read_prph(struct iwx_softc *sc, uint32_t addr)
1437 {
1438 	iwx_nic_assert_locked(sc);
1439 	return iwx_read_prph_unlocked(sc, addr);
1440 }
1441 
1442 void
1443 iwx_write_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1444 {
1445 	IWX_WRITE(sc,
1446 	    IWX_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
1447 	IWX_BARRIER_WRITE(sc);
1448 	IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WDAT, val);
1449 }
1450 
1451 void
1452 iwx_write_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1453 {
1454 	iwx_nic_assert_locked(sc);
1455 	iwx_write_prph_unlocked(sc, addr, val);
1456 }
1457 
1458 void
1459 iwx_write_prph64(struct iwx_softc *sc, uint64_t addr, uint64_t val)
1460 {
1461 	iwx_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
1462 	iwx_write_prph(sc, (uint32_t)addr + 4, val >> 32);
1463 }
1464 
1465 int
1466 iwx_read_mem(struct iwx_softc *sc, uint32_t addr, void *buf, int dwords)
1467 {
1468 	int offs, err = 0;
1469 	uint32_t *vals = buf;
1470 
1471 	if (iwx_nic_lock(sc)) {
1472 		IWX_WRITE(sc, IWX_HBUS_TARG_MEM_RADDR, addr);
1473 		for (offs = 0; offs < dwords; offs++)
1474 			vals[offs] = le32toh(IWX_READ(sc, IWX_HBUS_TARG_MEM_RDAT));
1475 		iwx_nic_unlock(sc);
1476 	} else {
1477 		err = EBUSY;
1478 	}
1479 	return err;
1480 }
1481 
1482 int
1483 iwx_write_mem(struct iwx_softc *sc, uint32_t addr, const void *buf, int dwords)
1484 {
1485 	int offs;
1486 	const uint32_t *vals = buf;
1487 
1488 	if (iwx_nic_lock(sc)) {
1489 		IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WADDR, addr);
1490 		/* WADDR auto-increments */
1491 		for (offs = 0; offs < dwords; offs++) {
1492 			uint32_t val = vals ? vals[offs] : 0;
1493 			IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WDAT, val);
1494 		}
1495 		iwx_nic_unlock(sc);
1496 	} else {
1497 		return EBUSY;
1498 	}
1499 	return 0;
1500 }
1501 
1502 int
1503 iwx_write_mem32(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1504 {
1505 	return iwx_write_mem(sc, addr, &val, 1);
1506 }
1507 
1508 int
1509 iwx_poll_bit(struct iwx_softc *sc, int reg, uint32_t bits, uint32_t mask,
1510     int timo)
1511 {
1512 	for (;;) {
1513 		if ((IWX_READ(sc, reg) & mask) == (bits & mask)) {
1514 			return 1;
1515 		}
1516 		if (timo < 10) {
1517 			return 0;
1518 		}
1519 		timo -= 10;
1520 		DELAY(10);
1521 	}
1522 }
1523 
1524 int
1525 iwx_nic_lock(struct iwx_softc *sc)
1526 {
1527 	if (sc->sc_nic_locks > 0) {
1528 		iwx_nic_assert_locked(sc);
1529 		sc->sc_nic_locks++;
1530 		return 1; /* already locked */
1531 	}
1532 
1533 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
1534 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1535 
1536 	DELAY(2);
1537 
1538 	if (iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
1539 	    IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1540 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1541 	     | IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1542 		sc->sc_nic_locks++;
1543 		return 1;
1544 	}
1545 
1546 	printf("%s: acquiring device failed\n", DEVNAME(sc));
1547 	return 0;
1548 }
1549 
1550 void
1551 iwx_nic_assert_locked(struct iwx_softc *sc)
1552 {
1553 	if (sc->sc_nic_locks <= 0)
1554 		panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1555 }
1556 
1557 void
1558 iwx_nic_unlock(struct iwx_softc *sc)
1559 {
1560 	if (sc->sc_nic_locks > 0) {
1561 		if (--sc->sc_nic_locks == 0)
1562 			IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
1563 			    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1564 	} else
1565 		printf("%s: NIC already unlocked\n", DEVNAME(sc));
1566 }
1567 
1568 int
1569 iwx_set_bits_mask_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits,
1570     uint32_t mask)
1571 {
1572 	uint32_t val;
1573 
1574 	if (iwx_nic_lock(sc)) {
1575 		val = iwx_read_prph(sc, reg) & mask;
1576 		val |= bits;
1577 		iwx_write_prph(sc, reg, val);
1578 		iwx_nic_unlock(sc);
1579 		return 0;
1580 	}
1581 	return EBUSY;
1582 }
1583 
1584 int
1585 iwx_set_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1586 {
1587 	return iwx_set_bits_mask_prph(sc, reg, bits, ~0);
1588 }
1589 
1590 int
1591 iwx_clear_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1592 {
1593 	return iwx_set_bits_mask_prph(sc, reg, 0, ~bits);
1594 }
1595 
1596 int
1597 iwx_dma_contig_alloc(bus_dma_tag_t tag, struct iwx_dma_info *dma,
1598     bus_size_t size, bus_size_t alignment)
1599 {
1600 	int nsegs, err;
1601 	caddr_t va;
1602 
1603 	dma->tag = tag;
1604 	dma->size = size;
1605 
1606 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1607 	    &dma->map);
1608 	if (err)
1609 		goto fail;
1610 
1611 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1612 	    BUS_DMA_NOWAIT);
1613 	if (err)
1614 		goto fail;
1615 
1616 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
1617 	    BUS_DMA_NOWAIT);
1618 	if (err)
1619 		goto fail;
1620 	dma->vaddr = va;
1621 
1622 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1623 	    BUS_DMA_NOWAIT);
1624 	if (err)
1625 		goto fail;
1626 
1627 	memset(dma->vaddr, 0, size);
1628 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1629 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1630 
1631 	return 0;
1632 
1633 fail:	iwx_dma_contig_free(dma);
1634 	return err;
1635 }
1636 
1637 void
1638 iwx_dma_contig_free(struct iwx_dma_info *dma)
1639 {
1640 	if (dma->map != NULL) {
1641 		if (dma->vaddr != NULL) {
1642 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1643 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1644 			bus_dmamap_unload(dma->tag, dma->map);
1645 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1646 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1647 			dma->vaddr = NULL;
1648 		}
1649 		bus_dmamap_destroy(dma->tag, dma->map);
1650 		dma->map = NULL;
1651 	}
1652 }
1653 
1654 int
1655 iwx_alloc_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1656 {
1657 	bus_size_t size;
1658 	int i, err;
1659 
1660 	ring->cur = 0;
1661 
1662 	/* Allocate RX descriptors (256-byte aligned). */
1663 	size = IWX_RX_MQ_RING_COUNT * sizeof(uint64_t);
1664 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 256);
1665 	if (err) {
1666 		printf("%s: could not allocate RX ring DMA memory\n",
1667 		    DEVNAME(sc));
1668 		goto fail;
1669 	}
1670 	ring->desc = ring->free_desc_dma.vaddr;
1671 
1672 	/* Allocate RX status area (16-byte aligned). */
1673 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1674 	    sizeof(*ring->stat), 16);
1675 	if (err) {
1676 		printf("%s: could not allocate RX status DMA memory\n",
1677 		    DEVNAME(sc));
1678 		goto fail;
1679 	}
1680 	ring->stat = ring->stat_dma.vaddr;
1681 
1682 	size = IWX_RX_MQ_RING_COUNT * sizeof(uint32_t);
1683 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1684 	    size, 256);
1685 	if (err) {
1686 		printf("%s: could not allocate RX ring DMA memory\n",
1687 		    DEVNAME(sc));
1688 		goto fail;
1689 	}
1690 
1691 	for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
1692 		struct iwx_rx_data *data = &ring->data[i];
1693 
1694 		memset(data, 0, sizeof(*data));
1695 		err = bus_dmamap_create(sc->sc_dmat, IWX_RBUF_SIZE, 1,
1696 		    IWX_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1697 		    &data->map);
1698 		if (err) {
1699 			printf("%s: could not create RX buf DMA map\n",
1700 			    DEVNAME(sc));
1701 			goto fail;
1702 		}
1703 
1704 		err = iwx_rx_addbuf(sc, IWX_RBUF_SIZE, i);
1705 		if (err)
1706 			goto fail;
1707 	}
1708 	return 0;
1709 
1710 fail:	iwx_free_rx_ring(sc, ring);
1711 	return err;
1712 }
1713 
1714 void
1715 iwx_disable_rx_dma(struct iwx_softc *sc)
1716 {
1717 	int ntries;
1718 
1719 	if (iwx_nic_lock(sc)) {
1720 		iwx_write_prph(sc, IWX_RFH_RXF_DMA_CFG, 0);
1721 		for (ntries = 0; ntries < 1000; ntries++) {
1722 			if (iwx_read_prph(sc, IWX_RFH_GEN_STATUS) &
1723 			    IWX_RXF_DMA_IDLE)
1724 				break;
1725 			DELAY(10);
1726 		}
1727 		iwx_nic_unlock(sc);
1728 	}
1729 }
1730 
1731 void
1732 iwx_reset_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1733 {
1734 	ring->cur = 0;
1735 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1736 	    ring->stat_dma.size, BUS_DMASYNC_PREWRITE);
1737 	memset(ring->stat, 0, sizeof(*ring->stat));
1738 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1739 	    ring->stat_dma.size, BUS_DMASYNC_POSTWRITE);
1740 
1741 }
1742 
1743 void
1744 iwx_free_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1745 {
1746 	int i;
1747 
1748 	iwx_dma_contig_free(&ring->free_desc_dma);
1749 	iwx_dma_contig_free(&ring->stat_dma);
1750 	iwx_dma_contig_free(&ring->used_desc_dma);
1751 
1752 	for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
1753 		struct iwx_rx_data *data = &ring->data[i];
1754 
1755 		if (data->m != NULL) {
1756 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1757 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1758 			bus_dmamap_unload(sc->sc_dmat, data->map);
1759 			m_freem(data->m);
1760 			data->m = NULL;
1761 		}
1762 		if (data->map != NULL)
1763 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1764 	}
1765 }
1766 
1767 int
1768 iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid)
1769 {
1770 	bus_addr_t paddr;
1771 	bus_size_t size;
1772 	int i, err;
1773 
1774 	ring->qid = qid;
1775 	ring->queued = 0;
1776 	ring->cur = 0;
1777 	ring->tail = 0;
1778 
1779 	/* Allocate TX descriptors (256-byte aligned). */
1780 	size = IWX_TX_RING_COUNT * sizeof(struct iwx_tfh_tfd);
1781 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1782 	if (err) {
1783 		printf("%s: could not allocate TX ring DMA memory\n",
1784 		    DEVNAME(sc));
1785 		goto fail;
1786 	}
1787 	ring->desc = ring->desc_dma.vaddr;
1788 
1789 	/*
1790 	 * The hardware supports up to 512 Tx rings which is more
1791 	 * than we currently need.
1792 	 *
1793 	 * In DQA mode we use 1 command queue + 1 default queue for
1794 	 * management, control, and non-QoS data frames.
1795 	 * The command is queue sc->txq[0], our default queue is sc->txq[1].
1796 	 *
1797 	 * Tx aggregation requires additional queues, one queue per TID for
1798 	 * which aggregation is enabled. We map TID 0-7 to sc->txq[2:9].
1799 	 * Firmware may assign its own internal IDs for these queues
1800 	 * depending on which TID gets aggregation enabled first.
1801 	 * The driver maintains a table mapping driver-side queue IDs
1802 	 * to firmware-side queue IDs.
1803 	 */
1804 
1805 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl,
1806 	    sizeof(struct iwx_agn_scd_bc_tbl), 0);
1807 	if (err) {
1808 		printf("%s: could not allocate byte count table DMA memory\n",
1809 		    DEVNAME(sc));
1810 		goto fail;
1811 	}
1812 
1813 	size = IWX_TX_RING_COUNT * sizeof(struct iwx_device_cmd);
1814 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size,
1815 	    IWX_FIRST_TB_SIZE_ALIGN);
1816 	if (err) {
1817 		printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc));
1818 		goto fail;
1819 	}
1820 	ring->cmd = ring->cmd_dma.vaddr;
1821 
1822 	paddr = ring->cmd_dma.paddr;
1823 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
1824 		struct iwx_tx_data *data = &ring->data[i];
1825 		size_t mapsize;
1826 
1827 		data->cmd_paddr = paddr;
1828 		paddr += sizeof(struct iwx_device_cmd);
1829 
1830 		/* FW commands may require more mapped space than packets. */
1831 		if (qid == IWX_DQA_CMD_QUEUE)
1832 			mapsize = (sizeof(struct iwx_cmd_header) +
1833 			    IWX_MAX_CMD_PAYLOAD_SIZE);
1834 		else
1835 			mapsize = MCLBYTES;
1836 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
1837 		    IWX_TFH_NUM_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,
1838 		    &data->map);
1839 		if (err) {
1840 			printf("%s: could not create TX buf DMA map\n",
1841 			    DEVNAME(sc));
1842 			goto fail;
1843 		}
1844 	}
1845 	KASSERT(paddr == ring->cmd_dma.paddr + size);
1846 	return 0;
1847 
1848 fail:	iwx_free_tx_ring(sc, ring);
1849 	return err;
1850 }
1851 
1852 void
1853 iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
1854 {
1855 	int i;
1856 
1857 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
1858 		struct iwx_tx_data *data = &ring->data[i];
1859 
1860 		if (data->m != NULL) {
1861 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1862 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1863 			bus_dmamap_unload(sc->sc_dmat, data->map);
1864 			m_freem(data->m);
1865 			data->m = NULL;
1866 		}
1867 	}
1868 
1869 	/* Clear byte count table. */
1870 	memset(ring->bc_tbl.vaddr, 0, ring->bc_tbl.size);
1871 
1872 	/* Clear TX descriptors. */
1873 	memset(ring->desc, 0, ring->desc_dma.size);
1874 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1875 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1876 	sc->qfullmsk &= ~(1 << ring->qid);
1877 	sc->qenablemsk &= ~(1 << ring->qid);
1878 	for (i = 0; i < nitems(sc->aggqid); i++) {
1879 		if (sc->aggqid[i] == ring->qid) {
1880 			sc->aggqid[i] = 0;
1881 			break;
1882 		}
1883 	}
1884 	ring->queued = 0;
1885 	ring->cur = 0;
1886 	ring->tail = 0;
1887 	ring->tid = 0;
1888 }
1889 
1890 void
1891 iwx_free_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
1892 {
1893 	int i;
1894 
1895 	iwx_dma_contig_free(&ring->desc_dma);
1896 	iwx_dma_contig_free(&ring->cmd_dma);
1897 	iwx_dma_contig_free(&ring->bc_tbl);
1898 
1899 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
1900 		struct iwx_tx_data *data = &ring->data[i];
1901 
1902 		if (data->m != NULL) {
1903 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1904 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1905 			bus_dmamap_unload(sc->sc_dmat, data->map);
1906 			m_freem(data->m);
1907 			data->m = NULL;
1908 		}
1909 		if (data->map != NULL)
1910 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1911 	}
1912 }
1913 
1914 void
1915 iwx_enable_rfkill_int(struct iwx_softc *sc)
1916 {
1917 	if (!sc->sc_msix) {
1918 		sc->sc_intmask = IWX_CSR_INT_BIT_RF_KILL;
1919 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
1920 	} else {
1921 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
1922 		    sc->sc_fh_init_mask);
1923 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
1924 		    ~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL);
1925 		sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL;
1926 	}
1927 
1928 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
1929 	    IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
1930 }
1931 
1932 int
1933 iwx_check_rfkill(struct iwx_softc *sc)
1934 {
1935 	uint32_t v;
1936 	int rv;
1937 
1938 	/*
1939 	 * "documentation" is not really helpful here:
1940 	 *  27:	HW_RF_KILL_SW
1941 	 *	Indicates state of (platform's) hardware RF-Kill switch
1942 	 *
1943 	 * But apparently when it's off, it's on ...
1944 	 */
1945 	v = IWX_READ(sc, IWX_CSR_GP_CNTRL);
1946 	rv = (v & IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1947 	if (rv) {
1948 		sc->sc_flags |= IWX_FLAG_RFKILL;
1949 	} else {
1950 		sc->sc_flags &= ~IWX_FLAG_RFKILL;
1951 	}
1952 
1953 	return rv;
1954 }
1955 
1956 void
1957 iwx_enable_interrupts(struct iwx_softc *sc)
1958 {
1959 	if (!sc->sc_msix) {
1960 		sc->sc_intmask = IWX_CSR_INI_SET_MASK;
1961 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
1962 	} else {
1963 		/*
1964 		 * fh/hw_mask keeps all the unmasked causes.
1965 		 * Unlike msi, in msix cause is enabled when it is unset.
1966 		 */
1967 		sc->sc_hw_mask = sc->sc_hw_init_mask;
1968 		sc->sc_fh_mask = sc->sc_fh_init_mask;
1969 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
1970 		    ~sc->sc_fh_mask);
1971 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
1972 		    ~sc->sc_hw_mask);
1973 	}
1974 }
1975 
1976 void
1977 iwx_enable_fwload_interrupt(struct iwx_softc *sc)
1978 {
1979 	if (!sc->sc_msix) {
1980 		sc->sc_intmask = IWX_CSR_INT_BIT_ALIVE | IWX_CSR_INT_BIT_FH_RX;
1981 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
1982 	} else {
1983 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
1984 		    ~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE);
1985 		sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_ALIVE;
1986 		/*
1987 		 * Leave all the FH causes enabled to get the ALIVE
1988 		 * notification.
1989 		 */
1990 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
1991 		    ~sc->sc_fh_init_mask);
1992 		sc->sc_fh_mask = sc->sc_fh_init_mask;
1993 	}
1994 }
1995 
1996 void
1997 iwx_restore_interrupts(struct iwx_softc *sc)
1998 {
1999 	IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2000 }
2001 
2002 void
2003 iwx_disable_interrupts(struct iwx_softc *sc)
2004 {
2005 	if (!sc->sc_msix) {
2006 		IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
2007 
2008 		/* acknowledge all interrupts */
2009 		IWX_WRITE(sc, IWX_CSR_INT, ~0);
2010 		IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
2011 	} else {
2012 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2013 		    sc->sc_fh_init_mask);
2014 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2015 		    sc->sc_hw_init_mask);
2016 	}
2017 }
2018 
2019 void
2020 iwx_ict_reset(struct iwx_softc *sc)
2021 {
2022 	iwx_disable_interrupts(sc);
2023 
2024 	memset(sc->ict_dma.vaddr, 0, IWX_ICT_SIZE);
2025 	sc->ict_cur = 0;
2026 
2027 	/* Set physical address of ICT (4KB aligned). */
2028 	IWX_WRITE(sc, IWX_CSR_DRAM_INT_TBL_REG,
2029 	    IWX_CSR_DRAM_INT_TBL_ENABLE
2030 	    | IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK
2031 	    | IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER
2032 	    | sc->ict_dma.paddr >> IWX_ICT_PADDR_SHIFT);
2033 
2034 	/* Switch to ICT interrupt mode in driver. */
2035 	sc->sc_flags |= IWX_FLAG_USE_ICT;
2036 
2037 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
2038 	iwx_enable_interrupts(sc);
2039 }
2040 
2041 #define IWX_HW_READY_TIMEOUT 50
2042 int
2043 iwx_set_hw_ready(struct iwx_softc *sc)
2044 {
2045 	int ready;
2046 
2047 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2048 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
2049 
2050 	ready = iwx_poll_bit(sc, IWX_CSR_HW_IF_CONFIG_REG,
2051 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2052 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2053 	    IWX_HW_READY_TIMEOUT);
2054 	if (ready)
2055 		IWX_SETBITS(sc, IWX_CSR_MBOX_SET_REG,
2056 		    IWX_CSR_MBOX_SET_REG_OS_ALIVE);
2057 
2058 	return ready;
2059 }
2060 #undef IWX_HW_READY_TIMEOUT
2061 
2062 int
2063 iwx_prepare_card_hw(struct iwx_softc *sc)
2064 {
2065 	int t = 0;
2066 	int ntries;
2067 
2068 	if (iwx_set_hw_ready(sc))
2069 		return 0;
2070 
2071 	IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2072 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2073 	DELAY(1000);
2074 
2075 	for (ntries = 0; ntries < 10; ntries++) {
2076 		/* If HW is not ready, prepare the conditions to check again */
2077 		IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2078 		    IWX_CSR_HW_IF_CONFIG_REG_PREPARE);
2079 
2080 		do {
2081 			if (iwx_set_hw_ready(sc))
2082 				return 0;
2083 			DELAY(200);
2084 			t += 200;
2085 		} while (t < 150000);
2086 		DELAY(25000);
2087 	}
2088 
2089 	return ETIMEDOUT;
2090 }
2091 
2092 int
2093 iwx_force_power_gating(struct iwx_softc *sc)
2094 {
2095 	int err;
2096 
2097 	err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2098 	    IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2099 	if (err)
2100 		return err;
2101 	DELAY(20);
2102 	err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2103 	    IWX_HPM_HIPM_GEN_CFG_CR_PG_EN |
2104 	    IWX_HPM_HIPM_GEN_CFG_CR_SLP_EN);
2105 	if (err)
2106 		return err;
2107 	DELAY(20);
2108 	err = iwx_clear_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2109 	    IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2110 	return err;
2111 }
2112 
2113 void
2114 iwx_apm_config(struct iwx_softc *sc)
2115 {
2116 	pcireg_t lctl, cap;
2117 
2118 	/*
2119 	 * L0S states have been found to be unstable with our devices
2120 	 * and in newer hardware they are not officially supported at
2121 	 * all, so we must always set the L0S_DISABLED bit.
2122 	 */
2123 	IWX_SETBITS(sc, IWX_CSR_GIO_REG, IWX_CSR_GIO_REG_VAL_L0S_DISABLED);
2124 
2125 	lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
2126 	    sc->sc_cap_off + PCI_PCIE_LCSR);
2127 	sc->sc_pm_support = !(lctl & PCI_PCIE_LCSR_ASPM_L0S);
2128 	cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
2129 	    sc->sc_cap_off + PCI_PCIE_DCSR2);
2130 	sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
2131 	DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
2132 	    DEVNAME(sc),
2133 	    (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
2134 	    sc->sc_ltr_enabled ? "En" : "Dis"));
2135 }
2136 
2137 /*
2138  * Start up NIC's basic functionality after it has been reset
2139  * e.g. after platform boot or shutdown.
2140  * NOTE:  This does not load uCode nor start the embedded processor
2141  */
2142 int
2143 iwx_apm_init(struct iwx_softc *sc)
2144 {
2145 	int err = 0;
2146 
2147 	/*
2148 	 * Disable L0s without affecting L1;
2149 	 *  don't wait for ICH L0s (ICH bug W/A)
2150 	 */
2151 	IWX_SETBITS(sc, IWX_CSR_GIO_CHICKEN_BITS,
2152 	    IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
2153 
2154 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
2155 	IWX_SETBITS(sc, IWX_CSR_DBG_HPET_MEM_REG, IWX_CSR_DBG_HPET_MEM_REG_VAL);
2156 
2157 	/*
2158 	 * Enable HAP INTA (interrupt from management bus) to
2159 	 * wake device's PCI Express link L1a -> L0s
2160 	 */
2161 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2162 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
2163 
2164 	iwx_apm_config(sc);
2165 
2166 	/*
2167 	 * Set "initialization complete" bit to move adapter from
2168 	 * D0U* --> D0A* (powered-up active) state.
2169 	 */
2170 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2171 
2172 	/*
2173 	 * Wait for clock stabilization; once stabilized, access to
2174 	 * device-internal resources is supported, e.g. iwx_write_prph()
2175 	 * and accesses to uCode SRAM.
2176 	 */
2177 	if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2178 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2179 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2180 		printf("%s: timeout waiting for clock stabilization\n",
2181 		    DEVNAME(sc));
2182 		err = ETIMEDOUT;
2183 		goto out;
2184 	}
2185  out:
2186 	if (err)
2187 		printf("%s: apm init error %d\n", DEVNAME(sc), err);
2188 	return err;
2189 }
2190 
2191 void
2192 iwx_apm_stop(struct iwx_softc *sc)
2193 {
2194 	IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2195 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2196 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2197 	    IWX_CSR_HW_IF_CONFIG_REG_PREPARE |
2198 	    IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
2199 	DELAY(1000);
2200 	IWX_CLRBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2201 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2202 	DELAY(5000);
2203 
2204 	/* stop device's busmaster DMA activity */
2205 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_STOP_MASTER);
2206 
2207 	if (!iwx_poll_bit(sc, IWX_CSR_RESET,
2208 	    IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED,
2209 	    IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
2210 		printf("%s: timeout waiting for master\n", DEVNAME(sc));
2211 
2212 	/*
2213 	 * Clear "initialization complete" bit to move adapter from
2214 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
2215 	 */
2216 	IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2217 	    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2218 }
2219 
2220 void
2221 iwx_init_msix_hw(struct iwx_softc *sc)
2222 {
2223 	iwx_conf_msix_hw(sc, 0);
2224 
2225 	if (!sc->sc_msix)
2226 		return;
2227 
2228 	sc->sc_fh_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_FH_INT_MASK_AD);
2229 	sc->sc_fh_mask = sc->sc_fh_init_mask;
2230 	sc->sc_hw_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_HW_INT_MASK_AD);
2231 	sc->sc_hw_mask = sc->sc_hw_init_mask;
2232 }
2233 
2234 void
2235 iwx_conf_msix_hw(struct iwx_softc *sc, int stopped)
2236 {
2237 	int vector = 0;
2238 
2239 	if (!sc->sc_msix) {
2240 		/* Newer chips default to MSIX. */
2241 		if (!stopped && iwx_nic_lock(sc)) {
2242 			iwx_write_prph(sc, IWX_UREG_CHICK,
2243 			    IWX_UREG_CHICK_MSI_ENABLE);
2244 			iwx_nic_unlock(sc);
2245 		}
2246 		return;
2247 	}
2248 
2249 	if (!stopped && iwx_nic_lock(sc)) {
2250 		iwx_write_prph(sc, IWX_UREG_CHICK, IWX_UREG_CHICK_MSIX_ENABLE);
2251 		iwx_nic_unlock(sc);
2252 	}
2253 
2254 	/* Disable all interrupts */
2255 	IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, ~0);
2256 	IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, ~0);
2257 
2258 	/* Map fallback-queue (command/mgmt) to a single vector */
2259 	IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(0),
2260 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2261 	/* Map RSS queue (data) to the same vector */
2262 	IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(1),
2263 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2264 
2265 	/* Enable the RX queues cause interrupts */
2266 	IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2267 	    IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1);
2268 
2269 	/* Map non-RX causes to the same vector */
2270 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
2271 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2272 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
2273 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2274 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_S2D),
2275 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2276 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_FH_ERR),
2277 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2278 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_ALIVE),
2279 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2280 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_WAKEUP),
2281 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2282 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_IML),
2283 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2284 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_CT_KILL),
2285 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2286 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RF_KILL),
2287 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2288 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_PERIODIC),
2289 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2290 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SW_ERR),
2291 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2292 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SCD),
2293 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2294 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_FH_TX),
2295 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2296 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HW_ERR),
2297 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2298 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HAP),
2299 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2300 
2301 	/* Enable non-RX causes interrupts */
2302 	IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2303 	    IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
2304 	    IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
2305 	    IWX_MSIX_FH_INT_CAUSES_S2D |
2306 	    IWX_MSIX_FH_INT_CAUSES_FH_ERR);
2307 	IWX_CLRBITS(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2308 	    IWX_MSIX_HW_INT_CAUSES_REG_ALIVE |
2309 	    IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP |
2310 	    IWX_MSIX_HW_INT_CAUSES_REG_IML |
2311 	    IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL |
2312 	    IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL |
2313 	    IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC |
2314 	    IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR |
2315 	    IWX_MSIX_HW_INT_CAUSES_REG_SCD |
2316 	    IWX_MSIX_HW_INT_CAUSES_REG_FH_TX |
2317 	    IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR |
2318 	    IWX_MSIX_HW_INT_CAUSES_REG_HAP);
2319 }
2320 
2321 int
2322 iwx_clear_persistence_bit(struct iwx_softc *sc)
2323 {
2324 	uint32_t hpm, wprot;
2325 
2326 	hpm = iwx_read_prph_unlocked(sc, IWX_HPM_DEBUG);
2327 	if (hpm != 0xa5a5a5a0 && (hpm & IWX_PERSISTENCE_BIT)) {
2328 		wprot = iwx_read_prph_unlocked(sc, IWX_PREG_PRPH_WPROT_22000);
2329 		if (wprot & IWX_PREG_WFPM_ACCESS) {
2330 			printf("%s: cannot clear persistence bit\n",
2331 			    DEVNAME(sc));
2332 			return EPERM;
2333 		}
2334 		iwx_write_prph_unlocked(sc, IWX_HPM_DEBUG,
2335 		    hpm & ~IWX_PERSISTENCE_BIT);
2336 	}
2337 
2338 	return 0;
2339 }
2340 
2341 int
2342 iwx_start_hw(struct iwx_softc *sc)
2343 {
2344 	int err;
2345 
2346 	err = iwx_prepare_card_hw(sc);
2347 	if (err)
2348 		return err;
2349 
2350 	err = iwx_clear_persistence_bit(sc);
2351 	if (err)
2352 		return err;
2353 
2354 	/* Reset the entire device */
2355 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2356 	DELAY(5000);
2357 
2358 	if (sc->sc_integrated) {
2359 		IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2360 		    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2361 		DELAY(20);
2362 		if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2363 		    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2364 		    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2365 			printf("%s: timeout waiting for clock stabilization\n",
2366 			    DEVNAME(sc));
2367 			return ETIMEDOUT;
2368 		}
2369 
2370 		err = iwx_force_power_gating(sc);
2371 		if (err)
2372 			return err;
2373 
2374 		/* Reset the entire device */
2375 		IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2376 		DELAY(5000);
2377 	}
2378 
2379 	err = iwx_apm_init(sc);
2380 	if (err)
2381 		return err;
2382 
2383 	iwx_init_msix_hw(sc);
2384 
2385 	iwx_enable_rfkill_int(sc);
2386 	iwx_check_rfkill(sc);
2387 
2388 	return 0;
2389 }
2390 
2391 void
2392 iwx_stop_device(struct iwx_softc *sc)
2393 {
2394 	struct ieee80211com *ic = &sc->sc_ic;
2395 	struct ieee80211_node *ni = ic->ic_bss;
2396 	int i;
2397 
2398 	iwx_disable_interrupts(sc);
2399 	sc->sc_flags &= ~IWX_FLAG_USE_ICT;
2400 
2401 	iwx_disable_rx_dma(sc);
2402 	iwx_reset_rx_ring(sc, &sc->rxq);
2403 	for (i = 0; i < nitems(sc->txq); i++)
2404 		iwx_reset_tx_ring(sc, &sc->txq[i]);
2405 	for (i = 0; i < IEEE80211_NUM_TID; i++) {
2406 		struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
2407 		if (ba->ba_state != IEEE80211_BA_AGREED)
2408 			continue;
2409 		ieee80211_delba_request(ic, ni, 0, 1, i);
2410 	}
2411 
2412 	/* Make sure (redundant) we've released our request to stay awake */
2413 	IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2414 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2415 	if (sc->sc_nic_locks > 0)
2416 		printf("%s: %d active NIC locks forcefully cleared\n",
2417 		    DEVNAME(sc), sc->sc_nic_locks);
2418 	sc->sc_nic_locks = 0;
2419 
2420 	/* Stop the device, and put it in low power state */
2421 	iwx_apm_stop(sc);
2422 
2423 	/* Reset the on-board processor. */
2424 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2425 	DELAY(5000);
2426 
2427 	/*
2428 	 * Upon stop, the IVAR table gets erased, so msi-x won't
2429 	 * work. This causes a bug in RF-KILL flows, since the interrupt
2430 	 * that enables radio won't fire on the correct irq, and the
2431 	 * driver won't be able to handle the interrupt.
2432 	 * Configure the IVAR table again after reset.
2433 	 */
2434 	iwx_conf_msix_hw(sc, 1);
2435 
2436 	/*
2437 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2438 	 * Clear the interrupt again.
2439 	 */
2440 	iwx_disable_interrupts(sc);
2441 
2442 	/* Even though we stop the HW we still want the RF kill interrupt. */
2443 	iwx_enable_rfkill_int(sc);
2444 	iwx_check_rfkill(sc);
2445 
2446 	iwx_prepare_card_hw(sc);
2447 
2448 	iwx_ctxt_info_free_paging(sc);
2449 }
2450 
2451 void
2452 iwx_nic_config(struct iwx_softc *sc)
2453 {
2454 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2455 	uint32_t mask, val, reg_val = 0;
2456 
2457 	radio_cfg_type = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_TYPE) >>
2458 	    IWX_FW_PHY_CFG_RADIO_TYPE_POS;
2459 	radio_cfg_step = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_STEP) >>
2460 	    IWX_FW_PHY_CFG_RADIO_STEP_POS;
2461 	radio_cfg_dash = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_DASH) >>
2462 	    IWX_FW_PHY_CFG_RADIO_DASH_POS;
2463 
2464 	reg_val |= IWX_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2465 	    IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2466 	reg_val |= IWX_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2467 	    IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2468 
2469 	/* radio configuration */
2470 	reg_val |= radio_cfg_type << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2471 	reg_val |= radio_cfg_step << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2472 	reg_val |= radio_cfg_dash << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2473 
2474 	mask = IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2475 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2476 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2477 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2478 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2479 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2480 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2481 
2482 	val = IWX_READ(sc, IWX_CSR_HW_IF_CONFIG_REG);
2483 	val &= ~mask;
2484 	val |= reg_val;
2485 	IWX_WRITE(sc, IWX_CSR_HW_IF_CONFIG_REG, val);
2486 }
2487 
2488 int
2489 iwx_nic_rx_init(struct iwx_softc *sc)
2490 {
2491 	IWX_WRITE_1(sc, IWX_CSR_INT_COALESCING, IWX_HOST_INT_TIMEOUT_DEF);
2492 
2493 	/*
2494 	 * We don't configure the RFH; the firmware will do that.
2495 	 * Rx descriptors are set when firmware sends an ALIVE interrupt.
2496 	 */
2497 	return 0;
2498 }
2499 
2500 int
2501 iwx_nic_init(struct iwx_softc *sc)
2502 {
2503 	int err;
2504 
2505 	iwx_apm_init(sc);
2506 	iwx_nic_config(sc);
2507 
2508 	err = iwx_nic_rx_init(sc);
2509 	if (err)
2510 		return err;
2511 
2512 	IWX_SETBITS(sc, IWX_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2513 
2514 	return 0;
2515 }
2516 
2517 /* Map a TID to an ieee80211_edca_ac category. */
2518 const uint8_t iwx_tid_to_ac[IWX_MAX_TID_COUNT] = {
2519 	EDCA_AC_BE,
2520 	EDCA_AC_BK,
2521 	EDCA_AC_BK,
2522 	EDCA_AC_BE,
2523 	EDCA_AC_VI,
2524 	EDCA_AC_VI,
2525 	EDCA_AC_VO,
2526 	EDCA_AC_VO,
2527 };
2528 
2529 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2530 const uint8_t iwx_ac_to_tx_fifo[] = {
2531 	IWX_GEN2_EDCA_TX_FIFO_BE,
2532 	IWX_GEN2_EDCA_TX_FIFO_BK,
2533 	IWX_GEN2_EDCA_TX_FIFO_VI,
2534 	IWX_GEN2_EDCA_TX_FIFO_VO,
2535 };
2536 
2537 int
2538 iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid,
2539     int num_slots)
2540 {
2541 	struct iwx_tx_queue_cfg_cmd cmd;
2542 	struct iwx_rx_packet *pkt;
2543 	struct iwx_tx_queue_cfg_rsp *resp;
2544 	struct iwx_host_cmd hcmd = {
2545 		.id = IWX_SCD_QUEUE_CFG,
2546 		.flags = IWX_CMD_WANT_RESP,
2547 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2548 	};
2549 	struct iwx_tx_ring *ring = &sc->txq[qid];
2550 	int err, fwqid;
2551 	uint32_t wr_idx;
2552 	size_t resp_len;
2553 
2554 	iwx_reset_tx_ring(sc, ring);
2555 
2556 	memset(&cmd, 0, sizeof(cmd));
2557 	cmd.sta_id = sta_id;
2558 	cmd.tid = tid;
2559 	cmd.flags = htole16(IWX_TX_QUEUE_CFG_ENABLE_QUEUE);
2560 	cmd.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2561 	cmd.byte_cnt_addr = htole64(ring->bc_tbl.paddr);
2562 	cmd.tfdq_addr = htole64(ring->desc_dma.paddr);
2563 
2564 	hcmd.data[0] = &cmd;
2565 	hcmd.len[0] = sizeof(cmd);
2566 
2567 	err = iwx_send_cmd(sc, &hcmd);
2568 	if (err)
2569 		return err;
2570 
2571 	pkt = hcmd.resp_pkt;
2572 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2573 		DPRINTF(("SCD_QUEUE_CFG command failed\n"));
2574 		err = EIO;
2575 		goto out;
2576 	}
2577 
2578 	resp_len = iwx_rx_packet_payload_len(pkt);
2579 	if (resp_len != sizeof(*resp)) {
2580 		DPRINTF(("SCD_QUEUE_CFG returned %zu bytes, expected %zu bytes\n", resp_len, sizeof(*resp)));
2581 		err = EIO;
2582 		goto out;
2583 	}
2584 
2585 	resp = (void *)pkt->data;
2586 	fwqid = le16toh(resp->queue_number);
2587 	wr_idx = le16toh(resp->write_pointer);
2588 
2589 	/* Unlike iwlwifi, we do not support dynamic queue ID assignment. */
2590 	if (fwqid != qid) {
2591 		DPRINTF(("requested qid %d but %d was assigned\n", qid, fwqid));
2592 		err = EIO;
2593 		goto out;
2594 	}
2595 
2596 	if (wr_idx != ring->cur) {
2597 		DPRINTF(("fw write index is %d but ring is %d\n", wr_idx, ring->cur));
2598 		err = EIO;
2599 		goto out;
2600 	}
2601 
2602 	sc->qenablemsk |= (1 << qid);
2603 	ring->tid = tid;
2604 out:
2605 	iwx_free_resp(sc, &hcmd);
2606 	return err;
2607 }
2608 
2609 int
2610 iwx_disable_txq(struct iwx_softc *sc, int sta_id, int qid, uint8_t tid)
2611 {
2612 	struct iwx_tx_queue_cfg_cmd cmd;
2613 	struct iwx_rx_packet *pkt;
2614 	struct iwx_tx_queue_cfg_rsp *resp;
2615 	struct iwx_host_cmd hcmd = {
2616 		.id = IWX_SCD_QUEUE_CFG,
2617 		.flags = IWX_CMD_WANT_RESP,
2618 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2619 	};
2620 	struct iwx_tx_ring *ring = &sc->txq[qid];
2621 	int err;
2622 
2623 	memset(&cmd, 0, sizeof(cmd));
2624 	cmd.sta_id = sta_id;
2625 	cmd.tid = tid;
2626 	cmd.flags = htole16(0); /* clear "queue enabled" flag */
2627 	cmd.cb_size = htole32(0);
2628 	cmd.byte_cnt_addr = htole64(0);
2629 	cmd.tfdq_addr = htole64(0);
2630 
2631 	hcmd.data[0] = &cmd;
2632 	hcmd.len[0] = sizeof(cmd);
2633 
2634 	err = iwx_send_cmd(sc, &hcmd);
2635 	if (err)
2636 		return err;
2637 
2638 	pkt = hcmd.resp_pkt;
2639 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2640 		DPRINTF(("SCD_QUEUE_CFG command failed\n"));
2641 		err = EIO;
2642 		goto out;
2643 	}
2644 
2645 	sc->qenablemsk &= ~(1 << qid);
2646 	iwx_reset_tx_ring(sc, ring);
2647 out:
2648 	iwx_free_resp(sc, &hcmd);
2649 	return err;
2650 }
2651 
2652 void
2653 iwx_post_alive(struct iwx_softc *sc)
2654 {
2655 	iwx_ict_reset(sc);
2656 }
2657 
2658 /*
2659  * For the high priority TE use a time event type that has similar priority to
2660  * the FW's action scan priority.
2661  */
2662 #define IWX_ROC_TE_TYPE_NORMAL IWX_TE_P2P_DEVICE_DISCOVERABLE
2663 #define IWX_ROC_TE_TYPE_MGMT_TX IWX_TE_P2P_CLIENT_ASSOC
2664 
2665 int
2666 iwx_send_time_event_cmd(struct iwx_softc *sc,
2667     const struct iwx_time_event_cmd *cmd)
2668 {
2669 	struct iwx_rx_packet *pkt;
2670 	struct iwx_time_event_resp *resp;
2671 	struct iwx_host_cmd hcmd = {
2672 		.id = IWX_TIME_EVENT_CMD,
2673 		.flags = IWX_CMD_WANT_RESP,
2674 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2675 	};
2676 	uint32_t resp_len;
2677 	int err;
2678 
2679 	hcmd.data[0] = cmd;
2680 	hcmd.len[0] = sizeof(*cmd);
2681 	err = iwx_send_cmd(sc, &hcmd);
2682 	if (err)
2683 		return err;
2684 
2685 	pkt = hcmd.resp_pkt;
2686 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2687 		err = EIO;
2688 		goto out;
2689 	}
2690 
2691 	resp_len = iwx_rx_packet_payload_len(pkt);
2692 	if (resp_len != sizeof(*resp)) {
2693 		err = EIO;
2694 		goto out;
2695 	}
2696 
2697 	resp = (void *)pkt->data;
2698 	if (le32toh(resp->status) == 0)
2699 		sc->sc_time_event_uid = le32toh(resp->unique_id);
2700 	else
2701 		err = EIO;
2702 out:
2703 	iwx_free_resp(sc, &hcmd);
2704 	return err;
2705 }
2706 
2707 int
2708 iwx_schedule_session_protection(struct iwx_softc *sc, struct iwx_node *in,
2709     uint32_t duration)
2710 {
2711 	struct iwx_session_prot_cmd cmd = {
2712 		.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
2713 		    in->in_color)),
2714 		.action = htole32(IWX_FW_CTXT_ACTION_ADD),
2715 		.conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
2716 		.duration_tu = htole32(duration * IEEE80211_DUR_TU),
2717 	};
2718 	uint32_t cmd_id;
2719 
2720 	cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
2721 	return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
2722 }
2723 
2724 /*
2725  * NVM read access and content parsing.  We do not support
2726  * external NVM or writing NVM.
2727  */
2728 
2729 uint8_t
2730 iwx_fw_valid_tx_ant(struct iwx_softc *sc)
2731 {
2732 	uint8_t tx_ant;
2733 
2734 	tx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_TX_CHAIN)
2735 	    >> IWX_FW_PHY_CFG_TX_CHAIN_POS);
2736 
2737 	if (sc->sc_nvm.valid_tx_ant)
2738 		tx_ant &= sc->sc_nvm.valid_tx_ant;
2739 
2740 	return tx_ant;
2741 }
2742 
2743 uint8_t
2744 iwx_fw_valid_rx_ant(struct iwx_softc *sc)
2745 {
2746 	uint8_t rx_ant;
2747 
2748 	rx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RX_CHAIN)
2749 	    >> IWX_FW_PHY_CFG_RX_CHAIN_POS);
2750 
2751 	if (sc->sc_nvm.valid_rx_ant)
2752 		rx_ant &= sc->sc_nvm.valid_rx_ant;
2753 
2754 	return rx_ant;
2755 }
2756 
2757 void
2758 iwx_init_channel_map(struct iwx_softc *sc, uint16_t *channel_profile_v3,
2759     uint32_t *channel_profile_v4, int nchan_profile)
2760 {
2761 	struct ieee80211com *ic = &sc->sc_ic;
2762 	struct iwx_nvm_data *data = &sc->sc_nvm;
2763 	int ch_idx;
2764 	struct ieee80211_channel *channel;
2765 	uint32_t ch_flags;
2766 	int is_5ghz;
2767 	int flags, hw_value;
2768 	int nchan;
2769 	const uint8_t *nvm_channels;
2770 
2771 	if (sc->sc_uhb_supported) {
2772 		nchan = nitems(iwx_nvm_channels_uhb);
2773 		nvm_channels = iwx_nvm_channels_uhb;
2774 	} else {
2775 		nchan = nitems(iwx_nvm_channels_8000);
2776 		nvm_channels = iwx_nvm_channels_8000;
2777 	}
2778 
2779 	for (ch_idx = 0; ch_idx < nchan && ch_idx < nchan_profile; ch_idx++) {
2780 		if (channel_profile_v4)
2781 			ch_flags = le32_to_cpup(channel_profile_v4 + ch_idx);
2782 		else
2783 			ch_flags = le16_to_cpup(channel_profile_v3 + ch_idx);
2784 
2785 		is_5ghz = ch_idx >= IWX_NUM_2GHZ_CHANNELS;
2786 		if (is_5ghz && !data->sku_cap_band_52GHz_enable)
2787 			ch_flags &= ~IWX_NVM_CHANNEL_VALID;
2788 
2789 		hw_value = nvm_channels[ch_idx];
2790 		channel = &ic->ic_channels[hw_value];
2791 
2792 		if (!(ch_flags & IWX_NVM_CHANNEL_VALID)) {
2793 			channel->ic_freq = 0;
2794 			channel->ic_flags = 0;
2795 			continue;
2796 		}
2797 
2798 		if (!is_5ghz) {
2799 			flags = IEEE80211_CHAN_2GHZ;
2800 			channel->ic_flags
2801 			    = IEEE80211_CHAN_CCK
2802 			    | IEEE80211_CHAN_OFDM
2803 			    | IEEE80211_CHAN_DYN
2804 			    | IEEE80211_CHAN_2GHZ;
2805 		} else {
2806 			flags = IEEE80211_CHAN_5GHZ;
2807 			channel->ic_flags =
2808 			    IEEE80211_CHAN_A;
2809 		}
2810 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2811 
2812 		if (!(ch_flags & IWX_NVM_CHANNEL_ACTIVE))
2813 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2814 
2815 		if (data->sku_cap_11n_enable) {
2816 			channel->ic_flags |= IEEE80211_CHAN_HT;
2817 			if (ch_flags & IWX_NVM_CHANNEL_40MHZ)
2818 				channel->ic_flags |= IEEE80211_CHAN_40MHZ;
2819 		}
2820 
2821 		if (is_5ghz && data->sku_cap_11ac_enable) {
2822 			channel->ic_flags |= IEEE80211_CHAN_VHT;
2823 			if (ch_flags & IWX_NVM_CHANNEL_80MHZ)
2824 				channel->ic_xflags |= IEEE80211_CHANX_80MHZ;
2825 		}
2826 	}
2827 }
2828 
2829 int
2830 iwx_mimo_enabled(struct iwx_softc *sc)
2831 {
2832 	struct ieee80211com *ic = &sc->sc_ic;
2833 
2834 	return !sc->sc_nvm.sku_cap_mimo_disable &&
2835 	    (ic->ic_userflags & IEEE80211_F_NOMIMO) == 0;
2836 }
2837 
2838 void
2839 iwx_setup_ht_rates(struct iwx_softc *sc)
2840 {
2841 	struct ieee80211com *ic = &sc->sc_ic;
2842 	uint8_t rx_ant;
2843 
2844 	/* TX is supported with the same MCS as RX. */
2845 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2846 
2847 	memset(ic->ic_sup_mcs, 0, sizeof(ic->ic_sup_mcs));
2848 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
2849 
2850 	if (!iwx_mimo_enabled(sc))
2851 		return;
2852 
2853 	rx_ant = iwx_fw_valid_rx_ant(sc);
2854 	if ((rx_ant & IWX_ANT_AB) == IWX_ANT_AB ||
2855 	    (rx_ant & IWX_ANT_BC) == IWX_ANT_BC)
2856 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
2857 }
2858 
2859 void
2860 iwx_setup_vht_rates(struct iwx_softc *sc)
2861 {
2862 	struct ieee80211com *ic = &sc->sc_ic;
2863 	uint8_t rx_ant = iwx_fw_valid_rx_ant(sc);
2864 	int n;
2865 
2866 	ic->ic_vht_rxmcs = (IEEE80211_VHT_MCS_0_9 <<
2867 	    IEEE80211_VHT_MCS_FOR_SS_SHIFT(1));
2868 
2869 	if (iwx_mimo_enabled(sc) &&
2870 	    ((rx_ant & IWX_ANT_AB) == IWX_ANT_AB ||
2871 	    (rx_ant & IWX_ANT_BC) == IWX_ANT_BC)) {
2872 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_0_9 <<
2873 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2));
2874 	} else {
2875 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_SS_NOT_SUPP <<
2876 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2));
2877 	}
2878 
2879 	for (n = 3; n <= IEEE80211_VHT_NUM_SS; n++) {
2880 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_SS_NOT_SUPP <<
2881 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(n));
2882 	}
2883 
2884 	ic->ic_vht_txmcs = ic->ic_vht_rxmcs;
2885 }
2886 
2887 void
2888 iwx_init_reorder_buffer(struct iwx_reorder_buffer *reorder_buf,
2889     uint16_t ssn, uint16_t buf_size)
2890 {
2891 	reorder_buf->head_sn = ssn;
2892 	reorder_buf->num_stored = 0;
2893 	reorder_buf->buf_size = buf_size;
2894 	reorder_buf->last_amsdu = 0;
2895 	reorder_buf->last_sub_index = 0;
2896 	reorder_buf->removed = 0;
2897 	reorder_buf->valid = 0;
2898 	reorder_buf->consec_oldsn_drops = 0;
2899 	reorder_buf->consec_oldsn_ampdu_gp2 = 0;
2900 	reorder_buf->consec_oldsn_prev_drop = 0;
2901 }
2902 
2903 void
2904 iwx_clear_reorder_buffer(struct iwx_softc *sc, struct iwx_rxba_data *rxba)
2905 {
2906 	int i;
2907 	struct iwx_reorder_buffer *reorder_buf = &rxba->reorder_buf;
2908 	struct iwx_reorder_buf_entry *entry;
2909 
2910 	for (i = 0; i < reorder_buf->buf_size; i++) {
2911 		entry = &rxba->entries[i];
2912 		ml_purge(&entry->frames);
2913 		timerclear(&entry->reorder_time);
2914 	}
2915 
2916 	reorder_buf->removed = 1;
2917 	timeout_del(&reorder_buf->reorder_timer);
2918 	timerclear(&rxba->last_rx);
2919 	timeout_del(&rxba->session_timer);
2920 	rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
2921 }
2922 
2923 #define RX_REORDER_BUF_TIMEOUT_MQ_USEC (100000ULL)
2924 
2925 void
2926 iwx_rx_ba_session_expired(void *arg)
2927 {
2928 	struct iwx_rxba_data *rxba = arg;
2929 	struct iwx_softc *sc = rxba->sc;
2930 	struct ieee80211com *ic = &sc->sc_ic;
2931 	struct ieee80211_node *ni = ic->ic_bss;
2932 	struct timeval now, timeout, expiry;
2933 	int s;
2934 
2935 	s = splnet();
2936 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0 &&
2937 	    ic->ic_state == IEEE80211_S_RUN &&
2938 	    rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
2939 		getmicrouptime(&now);
2940 		USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
2941 		timeradd(&rxba->last_rx, &timeout, &expiry);
2942 		if (timercmp(&now, &expiry, <)) {
2943 			timeout_add_usec(&rxba->session_timer, rxba->timeout);
2944 		} else {
2945 			ic->ic_stats.is_ht_rx_ba_timeout++;
2946 			ieee80211_delba_request(ic, ni,
2947 			    IEEE80211_REASON_TIMEOUT, 0, rxba->tid);
2948 		}
2949 	}
2950 	splx(s);
2951 }
2952 
2953 void
2954 iwx_rx_bar_frame_release(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
2955     struct mbuf_list *ml)
2956 {
2957 	struct ieee80211com *ic = &sc->sc_ic;
2958 	struct ieee80211_node *ni = ic->ic_bss;
2959 	struct iwx_bar_frame_release *release = (void *)pkt->data;
2960 	struct iwx_reorder_buffer *buf;
2961 	struct iwx_rxba_data *rxba;
2962 	unsigned int baid, nssn, sta_id, tid;
2963 
2964 	if (iwx_rx_packet_payload_len(pkt) < sizeof(*release))
2965 		return;
2966 
2967 	baid = (le32toh(release->ba_info) & IWX_BAR_FRAME_RELEASE_BAID_MASK) >>
2968 	    IWX_BAR_FRAME_RELEASE_BAID_SHIFT;
2969 	if (baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
2970 	    baid >= nitems(sc->sc_rxba_data))
2971 		return;
2972 
2973 	rxba = &sc->sc_rxba_data[baid];
2974 	if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID)
2975 		return;
2976 
2977 	tid = le32toh(release->sta_tid) & IWX_BAR_FRAME_RELEASE_TID_MASK;
2978 	sta_id = (le32toh(release->sta_tid) &
2979 	    IWX_BAR_FRAME_RELEASE_STA_MASK) >> IWX_BAR_FRAME_RELEASE_STA_SHIFT;
2980 	if (tid != rxba->tid || rxba->sta_id != IWX_STATION_ID)
2981 		return;
2982 
2983 	nssn = le32toh(release->ba_info) & IWX_BAR_FRAME_RELEASE_NSSN_MASK;
2984 	buf = &rxba->reorder_buf;
2985 	iwx_release_frames(sc, ni, rxba, buf, nssn, ml);
2986 }
2987 
2988 void
2989 iwx_reorder_timer_expired(void *arg)
2990 {
2991 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2992 	struct iwx_reorder_buffer *buf = arg;
2993 	struct iwx_rxba_data *rxba = iwx_rxba_data_from_reorder_buf(buf);
2994 	struct iwx_reorder_buf_entry *entries = &rxba->entries[0];
2995 	struct iwx_softc *sc = rxba->sc;
2996 	struct ieee80211com *ic = &sc->sc_ic;
2997 	struct ieee80211_node *ni = ic->ic_bss;
2998 	int i, s;
2999 	uint16_t sn = 0, index = 0;
3000 	int expired = 0;
3001 	int cont = 0;
3002 	struct timeval now, timeout, expiry;
3003 
3004 	if (!buf->num_stored || buf->removed)
3005 		return;
3006 
3007 	s = splnet();
3008 	getmicrouptime(&now);
3009 	USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
3010 
3011 	for (i = 0; i < buf->buf_size ; i++) {
3012 		index = (buf->head_sn + i) % buf->buf_size;
3013 
3014 		if (ml_empty(&entries[index].frames)) {
3015 			/*
3016 			 * If there is a hole and the next frame didn't expire
3017 			 * we want to break and not advance SN.
3018 			 */
3019 			cont = 0;
3020 			continue;
3021 		}
3022 		timeradd(&entries[index].reorder_time, &timeout, &expiry);
3023 		if (!cont && timercmp(&now, &expiry, <))
3024 			break;
3025 
3026 		expired = 1;
3027 		/* continue until next hole after this expired frame */
3028 		cont = 1;
3029 		sn = (buf->head_sn + (i + 1)) & 0xfff;
3030 	}
3031 
3032 	if (expired) {
3033 		/* SN is set to the last expired frame + 1 */
3034 		iwx_release_frames(sc, ni, rxba, buf, sn, &ml);
3035 		if_input(&sc->sc_ic.ic_if, &ml);
3036 		ic->ic_stats.is_ht_rx_ba_window_gap_timeout++;
3037 	} else {
3038 		/*
3039 		 * If no frame expired and there are stored frames, index is now
3040 		 * pointing to the first unexpired frame - modify reorder timeout
3041 		 * accordingly.
3042 		 */
3043 		timeout_add_usec(&buf->reorder_timer,
3044 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
3045 	}
3046 
3047 	splx(s);
3048 }
3049 
3050 #define IWX_MAX_RX_BA_SESSIONS 16
3051 
3052 void
3053 iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3054     uint16_t ssn, uint16_t winsize, int timeout_val, int start)
3055 {
3056 	struct ieee80211com *ic = &sc->sc_ic;
3057 	struct iwx_add_sta_cmd cmd;
3058 	struct iwx_node *in = (void *)ni;
3059 	int err, s;
3060 	uint32_t status;
3061 	struct iwx_rxba_data *rxba = NULL;
3062 	uint8_t baid = 0;
3063 
3064 	s = splnet();
3065 
3066 	if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS) {
3067 		ieee80211_addba_req_refuse(ic, ni, tid);
3068 		splx(s);
3069 		return;
3070 	}
3071 
3072 	memset(&cmd, 0, sizeof(cmd));
3073 
3074 	cmd.sta_id = IWX_STATION_ID;
3075 	cmd.mac_id_n_color
3076 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3077 	cmd.add_modify = IWX_STA_MODE_MODIFY;
3078 
3079 	if (start) {
3080 		cmd.add_immediate_ba_tid = (uint8_t)tid;
3081 		cmd.add_immediate_ba_ssn = htole16(ssn);
3082 		cmd.rx_ba_window = htole16(winsize);
3083 	} else {
3084 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
3085 	}
3086 	cmd.modify_mask = start ? IWX_STA_MODIFY_ADD_BA_TID :
3087 	    IWX_STA_MODIFY_REMOVE_BA_TID;
3088 
3089 	status = IWX_ADD_STA_SUCCESS;
3090 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(cmd), &cmd,
3091 	    &status);
3092 
3093 	if (err || (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS) {
3094 		if (start)
3095 			ieee80211_addba_req_refuse(ic, ni, tid);
3096 		splx(s);
3097 		return;
3098 	}
3099 
3100 	/* Deaggregation is done in hardware. */
3101 	if (start) {
3102 		if (!(status & IWX_ADD_STA_BAID_VALID_MASK)) {
3103 			ieee80211_addba_req_refuse(ic, ni, tid);
3104 			splx(s);
3105 			return;
3106 		}
3107 		baid = (status & IWX_ADD_STA_BAID_MASK) >>
3108 		    IWX_ADD_STA_BAID_SHIFT;
3109 		if (baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
3110 		    baid >= nitems(sc->sc_rxba_data)) {
3111 			ieee80211_addba_req_refuse(ic, ni, tid);
3112 			splx(s);
3113 			return;
3114 		}
3115 		rxba = &sc->sc_rxba_data[baid];
3116 		if (rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
3117 			ieee80211_addba_req_refuse(ic, ni, tid);
3118 			splx(s);
3119 			return;
3120 		}
3121 		rxba->sta_id = IWX_STATION_ID;
3122 		rxba->tid = tid;
3123 		rxba->baid = baid;
3124 		rxba->timeout = timeout_val;
3125 		getmicrouptime(&rxba->last_rx);
3126 		iwx_init_reorder_buffer(&rxba->reorder_buf, ssn,
3127 		    winsize);
3128 		if (timeout_val != 0) {
3129 			struct ieee80211_rx_ba *ba;
3130 			timeout_add_usec(&rxba->session_timer,
3131 			    timeout_val);
3132 			/* XXX disable net80211's BA timeout handler */
3133 			ba = &ni->ni_rx_ba[tid];
3134 			ba->ba_timeout_val = 0;
3135 		}
3136 	} else {
3137 		int i;
3138 		for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
3139 			rxba = &sc->sc_rxba_data[i];
3140 			if (rxba->baid ==
3141 			    IWX_RX_REORDER_DATA_INVALID_BAID)
3142 				continue;
3143 			if (rxba->tid != tid)
3144 				continue;
3145 			iwx_clear_reorder_buffer(sc, rxba);
3146 			break;
3147 		}
3148 	}
3149 
3150 	if (start) {
3151 		sc->sc_rx_ba_sessions++;
3152 		ieee80211_addba_req_accept(ic, ni, tid);
3153 	} else if (sc->sc_rx_ba_sessions > 0)
3154 		sc->sc_rx_ba_sessions--;
3155 
3156 	splx(s);
3157 }
3158 
3159 void
3160 iwx_mac_ctxt_task(void *arg)
3161 {
3162 	struct iwx_softc *sc = arg;
3163 	struct ieee80211com *ic = &sc->sc_ic;
3164 	struct iwx_node *in = (void *)ic->ic_bss;
3165 	int err, s = splnet();
3166 
3167 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) ||
3168 	    ic->ic_state != IEEE80211_S_RUN) {
3169 		refcnt_rele_wake(&sc->task_refs);
3170 		splx(s);
3171 		return;
3172 	}
3173 
3174 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
3175 	if (err)
3176 		printf("%s: failed to update MAC\n", DEVNAME(sc));
3177 
3178 	refcnt_rele_wake(&sc->task_refs);
3179 	splx(s);
3180 }
3181 
3182 void
3183 iwx_phy_ctxt_task(void *arg)
3184 {
3185 	struct iwx_softc *sc = arg;
3186 	struct ieee80211com *ic = &sc->sc_ic;
3187 	struct iwx_node *in = (void *)ic->ic_bss;
3188 	struct ieee80211_node *ni = &in->in_ni;
3189 	uint8_t chains, sco, vht_chan_width;
3190 	int err, s = splnet();
3191 
3192 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) ||
3193 	    ic->ic_state != IEEE80211_S_RUN ||
3194 	    in->in_phyctxt == NULL) {
3195 		refcnt_rele_wake(&sc->task_refs);
3196 		splx(s);
3197 		return;
3198 	}
3199 
3200 	chains = iwx_mimo_enabled(sc) ? 2 : 1;
3201 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
3202 	    IEEE80211_CHAN_40MHZ_ALLOWED(ni->ni_chan) &&
3203 	    ieee80211_node_supports_ht_chan40(ni))
3204 		sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
3205 	else
3206 		sco = IEEE80211_HTOP0_SCO_SCN;
3207 	if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
3208 	    IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
3209 	    ieee80211_node_supports_vht_chan80(ni))
3210 		vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
3211 	else
3212 		vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
3213 	if (in->in_phyctxt->sco != sco ||
3214 	    in->in_phyctxt->vht_chan_width != vht_chan_width) {
3215 		err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
3216 		    in->in_phyctxt->channel, chains, chains, 0, sco,
3217 		    vht_chan_width);
3218 		if (err)
3219 			printf("%s: failed to update PHY\n", DEVNAME(sc));
3220 	}
3221 
3222 	refcnt_rele_wake(&sc->task_refs);
3223 	splx(s);
3224 }
3225 
3226 void
3227 iwx_updatechan(struct ieee80211com *ic)
3228 {
3229 	struct iwx_softc *sc = ic->ic_softc;
3230 
3231 	if (ic->ic_state == IEEE80211_S_RUN &&
3232 	    !task_pending(&sc->newstate_task))
3233 		iwx_add_task(sc, systq, &sc->phy_ctxt_task);
3234 }
3235 
3236 void
3237 iwx_updateprot(struct ieee80211com *ic)
3238 {
3239 	struct iwx_softc *sc = ic->ic_softc;
3240 
3241 	if (ic->ic_state == IEEE80211_S_RUN &&
3242 	    !task_pending(&sc->newstate_task))
3243 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3244 }
3245 
3246 void
3247 iwx_updateslot(struct ieee80211com *ic)
3248 {
3249 	struct iwx_softc *sc = ic->ic_softc;
3250 
3251 	if (ic->ic_state == IEEE80211_S_RUN &&
3252 	    !task_pending(&sc->newstate_task))
3253 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3254 }
3255 
3256 void
3257 iwx_updateedca(struct ieee80211com *ic)
3258 {
3259 	struct iwx_softc *sc = ic->ic_softc;
3260 
3261 	if (ic->ic_state == IEEE80211_S_RUN &&
3262 	    !task_pending(&sc->newstate_task))
3263 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3264 }
3265 
3266 void
3267 iwx_updatedtim(struct ieee80211com *ic)
3268 {
3269 	struct iwx_softc *sc = ic->ic_softc;
3270 
3271 	if (ic->ic_state == IEEE80211_S_RUN &&
3272 	    !task_pending(&sc->newstate_task))
3273 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3274 }
3275 
3276 void
3277 iwx_sta_tx_agg_start(struct iwx_softc *sc, struct ieee80211_node *ni,
3278     uint8_t tid)
3279 {
3280 	struct ieee80211com *ic = &sc->sc_ic;
3281 	struct ieee80211_tx_ba *ba;
3282 	int err, qid;
3283 	struct iwx_tx_ring *ring;
3284 
3285 	/* Ensure we can map this TID to an aggregation queue. */
3286 	if (tid >= IWX_MAX_TID_COUNT)
3287 		return;
3288 
3289 	ba = &ni->ni_tx_ba[tid];
3290 	if (ba->ba_state != IEEE80211_BA_REQUESTED)
3291 		return;
3292 
3293 	qid = sc->aggqid[tid];
3294 	if (qid == 0) {
3295 		/* Firmware should pick the next unused Tx queue. */
3296 		qid = fls(sc->qenablemsk);
3297 	}
3298 
3299 	/*
3300 	 * Simply enable the queue.
3301 	 * Firmware handles Tx Ba session setup and teardown.
3302 	 */
3303 	if ((sc->qenablemsk & (1 << qid)) == 0) {
3304 		if (!iwx_nic_lock(sc)) {
3305 			ieee80211_addba_resp_refuse(ic, ni, tid,
3306 			    IEEE80211_STATUS_UNSPECIFIED);
3307 			return;
3308 		}
3309 		err = iwx_enable_txq(sc, IWX_STATION_ID, qid, tid,
3310 		    IWX_TX_RING_COUNT);
3311 		iwx_nic_unlock(sc);
3312 		if (err) {
3313 			printf("%s: could not enable Tx queue %d "
3314 			    "(error %d)\n", DEVNAME(sc), qid, err);
3315 			ieee80211_addba_resp_refuse(ic, ni, tid,
3316 			    IEEE80211_STATUS_UNSPECIFIED);
3317 			return;
3318 		}
3319 
3320 		ba->ba_winstart = 0;
3321 	} else
3322 		ba->ba_winstart = ni->ni_qos_txseqs[tid];
3323 
3324 	ba->ba_winend = (ba->ba_winstart + ba->ba_winsize - 1) & 0xfff;
3325 
3326 	ring = &sc->txq[qid];
3327 	ba->ba_timeout_val = 0;
3328 	ieee80211_addba_resp_accept(ic, ni, tid);
3329 	sc->aggqid[tid] = qid;
3330 }
3331 
3332 void
3333 iwx_ba_task(void *arg)
3334 {
3335 	struct iwx_softc *sc = arg;
3336 	struct ieee80211com *ic = &sc->sc_ic;
3337 	struct ieee80211_node *ni = ic->ic_bss;
3338 	int s = splnet();
3339 	int tid;
3340 
3341 	for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3342 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3343 			break;
3344 		if (sc->ba_rx.start_tidmask & (1 << tid)) {
3345 			struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
3346 			iwx_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3347 			    ba->ba_winsize, ba->ba_timeout_val, 1);
3348 			sc->ba_rx.start_tidmask &= ~(1 << tid);
3349 		} else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3350 			iwx_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3351 			sc->ba_rx.stop_tidmask &= ~(1 << tid);
3352 		}
3353 	}
3354 
3355 	for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3356 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3357 			break;
3358 		if (sc->ba_tx.start_tidmask & (1 << tid)) {
3359 			iwx_sta_tx_agg_start(sc, ni, tid);
3360 			sc->ba_tx.start_tidmask &= ~(1 << tid);
3361 		}
3362 	}
3363 
3364 	refcnt_rele_wake(&sc->task_refs);
3365 	splx(s);
3366 }
3367 
3368 /*
3369  * This function is called by upper layer when an ADDBA request is received
3370  * from another STA and before the ADDBA response is sent.
3371  */
3372 int
3373 iwx_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3374     uint8_t tid)
3375 {
3376 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
3377 
3378 	if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS ||
3379 	    tid >= IWX_MAX_TID_COUNT)
3380 		return ENOSPC;
3381 
3382 	if (sc->ba_rx.start_tidmask & (1 << tid))
3383 		return EBUSY;
3384 
3385 	sc->ba_rx.start_tidmask |= (1 << tid);
3386 	iwx_add_task(sc, systq, &sc->ba_task);
3387 
3388 	return EBUSY;
3389 }
3390 
3391 /*
3392  * This function is called by upper layer on teardown of an HT-immediate
3393  * Block Ack agreement (eg. upon receipt of a DELBA frame).
3394  */
3395 void
3396 iwx_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3397     uint8_t tid)
3398 {
3399 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
3400 
3401 	if (tid >= IWX_MAX_TID_COUNT || sc->ba_rx.stop_tidmask & (1 << tid))
3402 		return;
3403 
3404 	sc->ba_rx.stop_tidmask = (1 << tid);
3405 	iwx_add_task(sc, systq, &sc->ba_task);
3406 }
3407 
3408 int
3409 iwx_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3410     uint8_t tid)
3411 {
3412 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
3413 	struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3414 
3415 	/*
3416 	 * Require a firmware version which uses an internal AUX queue.
3417 	 * The value of IWX_FIRST_AGG_TX_QUEUE would be incorrect otherwise.
3418 	 */
3419 	if (sc->first_data_qid != IWX_DQA_CMD_QUEUE + 1)
3420 		return ENOTSUP;
3421 
3422 	/* Ensure we can map this TID to an aggregation queue. */
3423 	if (tid >= IWX_MAX_TID_COUNT)
3424 		return EINVAL;
3425 
3426 	/* We only support a fixed Tx aggregation window size, for now. */
3427 	if (ba->ba_winsize != IWX_FRAME_LIMIT)
3428 		return ENOTSUP;
3429 
3430 	/* Is firmware already using an agg queue with this TID? */
3431 	if (sc->aggqid[tid] != 0)
3432 		return ENOSPC;
3433 
3434 	/* Are we already processing an ADDBA request? */
3435 	if (sc->ba_tx.start_tidmask & (1 << tid))
3436 		return EBUSY;
3437 
3438 	sc->ba_tx.start_tidmask |= (1 << tid);
3439 	iwx_add_task(sc, systq, &sc->ba_task);
3440 
3441 	return EBUSY;
3442 }
3443 
3444 /* Read the mac address from WFMP registers. */
3445 int
3446 iwx_set_mac_addr_from_csr(struct iwx_softc *sc, struct iwx_nvm_data *data)
3447 {
3448 	const uint8_t *hw_addr;
3449 	uint32_t mac_addr0, mac_addr1;
3450 
3451 	if (!iwx_nic_lock(sc))
3452 		return EBUSY;
3453 
3454 	mac_addr0 = htole32(iwx_read_prph(sc, IWX_WFMP_MAC_ADDR_0));
3455 	mac_addr1 = htole32(iwx_read_prph(sc, IWX_WFMP_MAC_ADDR_1));
3456 
3457 	hw_addr = (const uint8_t *)&mac_addr0;
3458 	data->hw_addr[0] = hw_addr[3];
3459 	data->hw_addr[1] = hw_addr[2];
3460 	data->hw_addr[2] = hw_addr[1];
3461 	data->hw_addr[3] = hw_addr[0];
3462 
3463 	hw_addr = (const uint8_t *)&mac_addr1;
3464 	data->hw_addr[4] = hw_addr[1];
3465 	data->hw_addr[5] = hw_addr[0];
3466 
3467 	iwx_nic_unlock(sc);
3468 	return 0;
3469 }
3470 
3471 int
3472 iwx_is_valid_mac_addr(const uint8_t *addr)
3473 {
3474 	static const uint8_t reserved_mac[] = {
3475 		0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3476 	};
3477 
3478 	return (memcmp(reserved_mac, addr, ETHER_ADDR_LEN) != 0 &&
3479 	    memcmp(etherbroadcastaddr, addr, sizeof(etherbroadcastaddr)) != 0 &&
3480 	    memcmp(etheranyaddr, addr, sizeof(etheranyaddr)) != 0 &&
3481 	    !ETHER_IS_MULTICAST(addr));
3482 }
3483 
3484 int
3485 iwx_nvm_get(struct iwx_softc *sc)
3486 {
3487 	struct iwx_nvm_get_info cmd = {};
3488 	struct iwx_nvm_data *nvm = &sc->sc_nvm;
3489 	struct iwx_host_cmd hcmd = {
3490 		.flags = IWX_CMD_WANT_RESP | IWX_CMD_SEND_IN_RFKILL,
3491 		.data = { &cmd, },
3492 		.len = { sizeof(cmd) },
3493 		.id = IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
3494 		    IWX_NVM_GET_INFO)
3495 	};
3496 	int err;
3497 	uint32_t mac_flags;
3498 	/*
3499 	 * All the values in iwx_nvm_get_info_rsp v4 are the same as
3500 	 * in v3, except for the channel profile part of the
3501 	 * regulatory.  So we can just access the new struct, with the
3502 	 * exception of the latter.
3503 	 */
3504 	struct iwx_nvm_get_info_rsp *rsp;
3505 	struct iwx_nvm_get_info_rsp_v3 *rsp_v3;
3506 	int v4 = isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REGULATORY_NVM_INFO);
3507 	size_t resp_len = v4 ? sizeof(*rsp) : sizeof(*rsp_v3);
3508 
3509 	hcmd.resp_pkt_len = sizeof(struct iwx_rx_packet) + resp_len;
3510 	err = iwx_send_cmd(sc, &hcmd);
3511 	if (err)
3512 		return err;
3513 
3514 	if (iwx_rx_packet_payload_len(hcmd.resp_pkt) != resp_len) {
3515 		err = EIO;
3516 		goto out;
3517 	}
3518 
3519 	memset(nvm, 0, sizeof(*nvm));
3520 
3521 	iwx_set_mac_addr_from_csr(sc, nvm);
3522 	if (!iwx_is_valid_mac_addr(nvm->hw_addr)) {
3523 		printf("%s: no valid mac address was found\n", DEVNAME(sc));
3524 		err = EINVAL;
3525 		goto out;
3526 	}
3527 
3528 	rsp = (void *)hcmd.resp_pkt->data;
3529 
3530 	/* Initialize general data */
3531 	nvm->nvm_version = le16toh(rsp->general.nvm_version);
3532 	nvm->n_hw_addrs = rsp->general.n_hw_addrs;
3533 
3534 	/* Initialize MAC sku data */
3535 	mac_flags = le32toh(rsp->mac_sku.mac_sku_flags);
3536 	nvm->sku_cap_11ac_enable =
3537 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
3538 	nvm->sku_cap_11n_enable =
3539 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
3540 	nvm->sku_cap_11ax_enable =
3541 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
3542 	nvm->sku_cap_band_24GHz_enable =
3543 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
3544 	nvm->sku_cap_band_52GHz_enable =
3545 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
3546 	nvm->sku_cap_mimo_disable =
3547 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
3548 
3549 	/* Initialize PHY sku data */
3550 	nvm->valid_tx_ant = (uint8_t)le32toh(rsp->phy_sku.tx_chains);
3551 	nvm->valid_rx_ant = (uint8_t)le32toh(rsp->phy_sku.rx_chains);
3552 
3553 	if (le32toh(rsp->regulatory.lar_enabled) &&
3554 	    isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_SUPPORT)) {
3555 		nvm->lar_enabled = 1;
3556 	}
3557 
3558 	if (v4) {
3559 		iwx_init_channel_map(sc, NULL,
3560 		    rsp->regulatory.channel_profile, IWX_NUM_CHANNELS);
3561 	} else {
3562 		rsp_v3 = (void *)rsp;
3563 		iwx_init_channel_map(sc, rsp_v3->regulatory.channel_profile,
3564 		    NULL, IWX_NUM_CHANNELS_V1);
3565 	}
3566 out:
3567 	iwx_free_resp(sc, &hcmd);
3568 	return err;
3569 }
3570 
3571 int
3572 iwx_load_firmware(struct iwx_softc *sc)
3573 {
3574 	struct iwx_fw_sects *fws;
3575 	int err;
3576 
3577 	splassert(IPL_NET);
3578 
3579 	sc->sc_uc.uc_intr = 0;
3580 	sc->sc_uc.uc_ok = 0;
3581 
3582 	fws = &sc->sc_fw.fw_sects[IWX_UCODE_TYPE_REGULAR];
3583 	err = iwx_ctxt_info_init(sc, fws);
3584 	if (err) {
3585 		printf("%s: could not init context info\n", DEVNAME(sc));
3586 		return err;
3587 	}
3588 
3589 	/* wait for the firmware to load */
3590 	err = tsleep_nsec(&sc->sc_uc, 0, "iwxuc", SEC_TO_NSEC(1));
3591 	if (err || !sc->sc_uc.uc_ok) {
3592 		printf("%s: could not load firmware, %d\n", DEVNAME(sc), err);
3593 		iwx_ctxt_info_free_paging(sc);
3594 	}
3595 
3596 	iwx_ctxt_info_free_fw_img(sc);
3597 
3598 	if (!sc->sc_uc.uc_ok)
3599 		return EINVAL;
3600 
3601 	return err;
3602 }
3603 
3604 int
3605 iwx_start_fw(struct iwx_softc *sc)
3606 {
3607 	int err;
3608 
3609 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
3610 
3611 	iwx_disable_interrupts(sc);
3612 
3613 	/* make sure rfkill handshake bits are cleared */
3614 	IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, IWX_CSR_UCODE_SW_BIT_RFKILL);
3615 	IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR,
3616 	    IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3617 
3618 	/* clear (again), then enable firmware load interrupt */
3619 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
3620 
3621 	err = iwx_nic_init(sc);
3622 	if (err) {
3623 		printf("%s: unable to init nic\n", DEVNAME(sc));
3624 		return err;
3625 	}
3626 
3627 	iwx_enable_fwload_interrupt(sc);
3628 
3629 	return iwx_load_firmware(sc);
3630 }
3631 
3632 int
3633 iwx_send_tx_ant_cfg(struct iwx_softc *sc, uint8_t valid_tx_ant)
3634 {
3635 	struct iwx_tx_ant_cfg_cmd tx_ant_cmd = {
3636 		.valid = htole32(valid_tx_ant),
3637 	};
3638 
3639 	return iwx_send_cmd_pdu(sc, IWX_TX_ANT_CONFIGURATION_CMD,
3640 	    0, sizeof(tx_ant_cmd), &tx_ant_cmd);
3641 }
3642 
3643 int
3644 iwx_send_phy_cfg_cmd(struct iwx_softc *sc)
3645 {
3646 	struct iwx_phy_cfg_cmd phy_cfg_cmd;
3647 
3648 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3649 	phy_cfg_cmd.calib_control.event_trigger =
3650 	    sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].event_trigger;
3651 	phy_cfg_cmd.calib_control.flow_trigger =
3652 	    sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].flow_trigger;
3653 
3654 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONFIGURATION_CMD, 0,
3655 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3656 }
3657 
3658 int
3659 iwx_send_dqa_cmd(struct iwx_softc *sc)
3660 {
3661 	struct iwx_dqa_enable_cmd dqa_cmd = {
3662 		.cmd_queue = htole32(IWX_DQA_CMD_QUEUE),
3663 	};
3664 	uint32_t cmd_id;
3665 
3666 	cmd_id = iwx_cmd_id(IWX_DQA_ENABLE_CMD, IWX_DATA_PATH_GROUP, 0);
3667 	return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
3668 }
3669 
3670 int
3671 iwx_load_ucode_wait_alive(struct iwx_softc *sc)
3672 {
3673 	int err;
3674 
3675 	err = iwx_read_firmware(sc);
3676 	if (err)
3677 		return err;
3678 
3679 	err = iwx_start_fw(sc);
3680 	if (err)
3681 		return err;
3682 
3683 	iwx_post_alive(sc);
3684 
3685 	return 0;
3686 }
3687 
3688 int
3689 iwx_run_init_mvm_ucode(struct iwx_softc *sc, int readnvm)
3690 {
3691 	const int wait_flags = IWX_INIT_COMPLETE;
3692 	struct iwx_nvm_access_complete_cmd nvm_complete = {};
3693 	struct iwx_init_extended_cfg_cmd init_cfg = {
3694 		.init_flags = htole32(IWX_INIT_NVM),
3695 	};
3696 	int err, s;
3697 
3698 	if ((sc->sc_flags & IWX_FLAG_RFKILL) && !readnvm) {
3699 		printf("%s: radio is disabled by hardware switch\n",
3700 		    DEVNAME(sc));
3701 		return EPERM;
3702 	}
3703 
3704 	s = splnet();
3705 	sc->sc_init_complete = 0;
3706 	err = iwx_load_ucode_wait_alive(sc);
3707 	if (err) {
3708 		printf("%s: failed to load init firmware\n", DEVNAME(sc));
3709 		splx(s);
3710 		return err;
3711 	}
3712 
3713 	/*
3714 	 * Send init config command to mark that we are sending NVM
3715 	 * access commands
3716 	 */
3717 	err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_SYSTEM_GROUP,
3718 	    IWX_INIT_EXTENDED_CFG_CMD), 0, sizeof(init_cfg), &init_cfg);
3719 	if (err) {
3720 		splx(s);
3721 		return err;
3722 	}
3723 
3724 	err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
3725 	    IWX_NVM_ACCESS_COMPLETE), 0, sizeof(nvm_complete), &nvm_complete);
3726 	if (err) {
3727 		splx(s);
3728 		return err;
3729 	}
3730 
3731 	/* Wait for the init complete notification from the firmware. */
3732 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
3733 		err = tsleep_nsec(&sc->sc_init_complete, 0, "iwxinit",
3734 		    SEC_TO_NSEC(2));
3735 		if (err) {
3736 			splx(s);
3737 			return err;
3738 		}
3739 	}
3740 	splx(s);
3741 	if (readnvm) {
3742 		err = iwx_nvm_get(sc);
3743 		if (err) {
3744 			printf("%s: failed to read nvm\n", DEVNAME(sc));
3745 			return err;
3746 		}
3747 		if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
3748 			IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
3749 			    sc->sc_nvm.hw_addr);
3750 
3751 	}
3752 	return 0;
3753 }
3754 
3755 int
3756 iwx_config_ltr(struct iwx_softc *sc)
3757 {
3758 	struct iwx_ltr_config_cmd cmd = {
3759 		.flags = htole32(IWX_LTR_CFG_FLAG_FEATURE_ENABLE),
3760 	};
3761 
3762 	if (!sc->sc_ltr_enabled)
3763 		return 0;
3764 
3765 	return iwx_send_cmd_pdu(sc, IWX_LTR_CONFIG, 0, sizeof(cmd), &cmd);
3766 }
3767 
3768 void
3769 iwx_update_rx_desc(struct iwx_softc *sc, struct iwx_rx_ring *ring, int idx)
3770 {
3771 	struct iwx_rx_data *data = &ring->data[idx];
3772 
3773 	((uint64_t *)ring->desc)[idx] =
3774 	    htole64(data->map->dm_segs[0].ds_addr | (idx & 0x0fff));
3775 	bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
3776 	    idx * sizeof(uint64_t), sizeof(uint64_t),
3777 	    BUS_DMASYNC_PREWRITE);
3778 }
3779 
3780 int
3781 iwx_rx_addbuf(struct iwx_softc *sc, int size, int idx)
3782 {
3783 	struct iwx_rx_ring *ring = &sc->rxq;
3784 	struct iwx_rx_data *data = &ring->data[idx];
3785 	struct mbuf *m;
3786 	int err;
3787 	int fatal = 0;
3788 
3789 	m = m_gethdr(M_DONTWAIT, MT_DATA);
3790 	if (m == NULL)
3791 		return ENOBUFS;
3792 
3793 	if (size <= MCLBYTES) {
3794 		MCLGET(m, M_DONTWAIT);
3795 	} else {
3796 		MCLGETL(m, M_DONTWAIT, IWX_RBUF_SIZE);
3797 	}
3798 	if ((m->m_flags & M_EXT) == 0) {
3799 		m_freem(m);
3800 		return ENOBUFS;
3801 	}
3802 
3803 	if (data->m != NULL) {
3804 		bus_dmamap_unload(sc->sc_dmat, data->map);
3805 		fatal = 1;
3806 	}
3807 
3808 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3809 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3810 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
3811 	if (err) {
3812 		/* XXX */
3813 		if (fatal)
3814 			panic("%s: could not load RX mbuf", DEVNAME(sc));
3815 		m_freem(m);
3816 		return err;
3817 	}
3818 	data->m = m;
3819 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3820 
3821 	/* Update RX descriptor. */
3822 	iwx_update_rx_desc(sc, ring, idx);
3823 
3824 	return 0;
3825 }
3826 
3827 int
3828 iwx_rxmq_get_signal_strength(struct iwx_softc *sc,
3829     struct iwx_rx_mpdu_desc *desc)
3830 {
3831 	int energy_a, energy_b;
3832 
3833 	energy_a = desc->v1.energy_a;
3834 	energy_b = desc->v1.energy_b;
3835 	energy_a = energy_a ? -energy_a : -256;
3836 	energy_b = energy_b ? -energy_b : -256;
3837 	return MAX(energy_a, energy_b);
3838 }
3839 
3840 void
3841 iwx_rx_rx_phy_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
3842     struct iwx_rx_data *data)
3843 {
3844 	struct iwx_rx_phy_info *phy_info = (void *)pkt->data;
3845 
3846 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3847 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3848 
3849 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3850 }
3851 
3852 /*
3853  * Retrieve the average noise (in dBm) among receivers.
3854  */
3855 int
3856 iwx_get_noise(const struct iwx_statistics_rx_non_phy *stats)
3857 {
3858 	int i, total, nbant, noise;
3859 
3860 	total = nbant = noise = 0;
3861 	for (i = 0; i < 3; i++) {
3862 		noise = letoh32(stats->beacon_silence_rssi[i]) & 0xff;
3863 		if (noise) {
3864 			total += noise;
3865 			nbant++;
3866 		}
3867 	}
3868 
3869 	/* There should be at least one antenna but check anyway. */
3870 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3871 }
3872 
3873 int
3874 iwx_ccmp_decap(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
3875     struct ieee80211_rxinfo *rxi)
3876 {
3877 	struct ieee80211com *ic = &sc->sc_ic;
3878 	struct ieee80211_key *k;
3879 	struct ieee80211_frame *wh;
3880 	uint64_t pn, *prsc;
3881 	uint8_t *ivp;
3882 	uint8_t tid;
3883 	int hdrlen, hasqos;
3884 
3885 	wh = mtod(m, struct ieee80211_frame *);
3886 	hdrlen = ieee80211_get_hdrlen(wh);
3887 	ivp = (uint8_t *)wh + hdrlen;
3888 
3889 	/* find key for decryption */
3890 	k = ieee80211_get_rxkey(ic, m, ni);
3891 	if (k == NULL || k->k_cipher != IEEE80211_CIPHER_CCMP)
3892 		return 1;
3893 
3894 	/* Check that ExtIV bit is be set. */
3895 	if (!(ivp[3] & IEEE80211_WEP_EXTIV))
3896 		return 1;
3897 
3898 	hasqos = ieee80211_has_qos(wh);
3899 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
3900 	prsc = &k->k_rsc[tid];
3901 
3902 	/* Extract the 48-bit PN from the CCMP header. */
3903 	pn = (uint64_t)ivp[0]       |
3904 	     (uint64_t)ivp[1] <<  8 |
3905 	     (uint64_t)ivp[4] << 16 |
3906 	     (uint64_t)ivp[5] << 24 |
3907 	     (uint64_t)ivp[6] << 32 |
3908 	     (uint64_t)ivp[7] << 40;
3909 	if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
3910 		if (pn < *prsc) {
3911 			ic->ic_stats.is_ccmp_replays++;
3912 			return 1;
3913 		}
3914 	} else if (pn <= *prsc) {
3915 		ic->ic_stats.is_ccmp_replays++;
3916 		return 1;
3917 	}
3918 	/* Last seen packet number is updated in ieee80211_inputm(). */
3919 
3920 	/*
3921 	 * Some firmware versions strip the MIC, and some don't. It is not
3922 	 * clear which of the capability flags could tell us what to expect.
3923 	 * For now, keep things simple and just leave the MIC in place if
3924 	 * it is present.
3925 	 *
3926 	 * The IV will be stripped by ieee80211_inputm().
3927 	 */
3928 	return 0;
3929 }
3930 
3931 int
3932 iwx_rx_hwdecrypt(struct iwx_softc *sc, struct mbuf *m, uint32_t rx_pkt_status,
3933     struct ieee80211_rxinfo *rxi)
3934 {
3935 	struct ieee80211com *ic = &sc->sc_ic;
3936 	struct ifnet *ifp = IC2IFP(ic);
3937 	struct ieee80211_frame *wh;
3938 	struct ieee80211_node *ni;
3939 	int ret = 0;
3940 	uint8_t type, subtype;
3941 
3942 	wh = mtod(m, struct ieee80211_frame *);
3943 
3944 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3945 	if (type == IEEE80211_FC0_TYPE_CTL)
3946 		return 0;
3947 
3948 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3949 	if (ieee80211_has_qos(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA))
3950 		return 0;
3951 
3952 	ni = ieee80211_find_rxnode(ic, wh);
3953 	/* Handle hardware decryption. */
3954 	if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL)
3955 	    && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) &&
3956 	    (ni->ni_flags & IEEE80211_NODE_RXPROT) &&
3957 	    ((!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
3958 	    ni->ni_rsncipher == IEEE80211_CIPHER_CCMP) ||
3959 	    (IEEE80211_IS_MULTICAST(wh->i_addr1) &&
3960 	    ni->ni_rsngroupcipher == IEEE80211_CIPHER_CCMP))) {
3961 		if ((rx_pkt_status & IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
3962 		    IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
3963 			ic->ic_stats.is_ccmp_dec_errs++;
3964 			ret = 1;
3965 			goto out;
3966 		}
3967 		/* Check whether decryption was successful or not. */
3968 		if ((rx_pkt_status &
3969 		    (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
3970 		    IWX_RX_MPDU_RES_STATUS_MIC_OK)) !=
3971 		    (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
3972 		    IWX_RX_MPDU_RES_STATUS_MIC_OK)) {
3973 			ic->ic_stats.is_ccmp_dec_errs++;
3974 			ret = 1;
3975 			goto out;
3976 		}
3977 		rxi->rxi_flags |= IEEE80211_RXI_HWDEC;
3978 	}
3979 out:
3980 	if (ret)
3981 		ifp->if_ierrors++;
3982 	ieee80211_release_node(ic, ni);
3983 	return ret;
3984 }
3985 
3986 void
3987 iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx,
3988     uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
3989     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
3990     struct mbuf_list *ml)
3991 {
3992 	struct ieee80211com *ic = &sc->sc_ic;
3993 	struct ifnet *ifp = IC2IFP(ic);
3994 	struct ieee80211_frame *wh;
3995 	struct ieee80211_node *ni;
3996 
3997 	if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
3998 		chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
3999 
4000 	wh = mtod(m, struct ieee80211_frame *);
4001 	ni = ieee80211_find_rxnode(ic, wh);
4002 	if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
4003 	    iwx_ccmp_decap(sc, m, ni, rxi) != 0) {
4004 		ifp->if_ierrors++;
4005 		m_freem(m);
4006 		ieee80211_release_node(ic, ni);
4007 		return;
4008 	}
4009 
4010 #if NBPFILTER > 0
4011 	if (sc->sc_drvbpf != NULL) {
4012 		struct iwx_rx_radiotap_header *tap = &sc->sc_rxtap;
4013 		uint16_t chan_flags;
4014 
4015 		tap->wr_flags = 0;
4016 		if (is_shortpre)
4017 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
4018 		tap->wr_chan_freq =
4019 		    htole16(ic->ic_channels[chanidx].ic_freq);
4020 		chan_flags = ic->ic_channels[chanidx].ic_flags;
4021 		if (ic->ic_curmode != IEEE80211_MODE_11N &&
4022 		    ic->ic_curmode != IEEE80211_MODE_11AC) {
4023 			chan_flags &= ~IEEE80211_CHAN_HT;
4024 			chan_flags &= ~IEEE80211_CHAN_40MHZ;
4025 		}
4026 		if (ic->ic_curmode != IEEE80211_MODE_11AC)
4027 			chan_flags &= ~IEEE80211_CHAN_VHT;
4028 		tap->wr_chan_flags = htole16(chan_flags);
4029 		tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
4030 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4031 		tap->wr_tsft = device_timestamp;
4032 		if (rate_n_flags & IWX_RATE_MCS_HT_MSK) {
4033 			uint8_t mcs = (rate_n_flags &
4034 			    (IWX_RATE_HT_MCS_RATE_CODE_MSK |
4035 			    IWX_RATE_HT_MCS_NSS_MSK));
4036 			tap->wr_rate = (0x80 | mcs);
4037 		} else {
4038 			uint8_t rate = (rate_n_flags &
4039 			    IWX_RATE_LEGACY_RATE_MSK);
4040 			switch (rate) {
4041 			/* CCK rates. */
4042 			case  10: tap->wr_rate =   2; break;
4043 			case  20: tap->wr_rate =   4; break;
4044 			case  55: tap->wr_rate =  11; break;
4045 			case 110: tap->wr_rate =  22; break;
4046 			/* OFDM rates. */
4047 			case 0xd: tap->wr_rate =  12; break;
4048 			case 0xf: tap->wr_rate =  18; break;
4049 			case 0x5: tap->wr_rate =  24; break;
4050 			case 0x7: tap->wr_rate =  36; break;
4051 			case 0x9: tap->wr_rate =  48; break;
4052 			case 0xb: tap->wr_rate =  72; break;
4053 			case 0x1: tap->wr_rate =  96; break;
4054 			case 0x3: tap->wr_rate = 108; break;
4055 			/* Unknown rate: should not happen. */
4056 			default:  tap->wr_rate =   0;
4057 			}
4058 		}
4059 
4060 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
4061 		    m, BPF_DIRECTION_IN);
4062 	}
4063 #endif
4064 	ieee80211_inputm(IC2IFP(ic), m, ni, rxi, ml);
4065 	ieee80211_release_node(ic, ni);
4066 }
4067 
4068 /*
4069  * Drop duplicate 802.11 retransmissions
4070  * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
4071  * and handle pseudo-duplicate frames which result from deaggregation
4072  * of A-MSDU frames in hardware.
4073  */
4074 int
4075 iwx_detect_duplicate(struct iwx_softc *sc, struct mbuf *m,
4076     struct iwx_rx_mpdu_desc *desc, struct ieee80211_rxinfo *rxi)
4077 {
4078 	struct ieee80211com *ic = &sc->sc_ic;
4079 	struct iwx_node *in = (void *)ic->ic_bss;
4080 	struct iwx_rxq_dup_data *dup_data = &in->dup_data;
4081 	uint8_t tid = IWX_MAX_TID_COUNT, subframe_idx;
4082 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4083 	uint8_t type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4084 	uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4085 	int hasqos = ieee80211_has_qos(wh);
4086 	uint16_t seq;
4087 
4088 	if (type == IEEE80211_FC0_TYPE_CTL ||
4089 	    (hasqos && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) ||
4090 	    IEEE80211_IS_MULTICAST(wh->i_addr1))
4091 		return 0;
4092 
4093 	if (hasqos) {
4094 		tid = (ieee80211_get_qos(wh) & IEEE80211_QOS_TID);
4095 		if (tid > IWX_MAX_TID_COUNT)
4096 			tid = IWX_MAX_TID_COUNT;
4097 	}
4098 
4099 	/* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
4100 	subframe_idx = desc->amsdu_info &
4101 		IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
4102 
4103 	seq = letoh16(*(u_int16_t *)wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
4104 	if ((wh->i_fc[1] & IEEE80211_FC1_RETRY) &&
4105 	    dup_data->last_seq[tid] == seq &&
4106 	    dup_data->last_sub_frame[tid] >= subframe_idx)
4107 		return 1;
4108 
4109 	/*
4110 	 * Allow the same frame sequence number for all A-MSDU subframes
4111 	 * following the first subframe.
4112 	 * Otherwise these subframes would be discarded as replays.
4113 	 */
4114 	if (dup_data->last_seq[tid] == seq &&
4115 	    subframe_idx > dup_data->last_sub_frame[tid] &&
4116 	    (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU)) {
4117 		rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
4118 	}
4119 
4120 	dup_data->last_seq[tid] = seq;
4121 	dup_data->last_sub_frame[tid] = subframe_idx;
4122 
4123 	return 0;
4124 }
4125 
4126 /*
4127  * Returns true if sn2 - buffer_size < sn1 < sn2.
4128  * To be used only in order to compare reorder buffer head with NSSN.
4129  * We fully trust NSSN unless it is behind us due to reorder timeout.
4130  * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
4131  */
4132 int
4133 iwx_is_sn_less(uint16_t sn1, uint16_t sn2, uint16_t buffer_size)
4134 {
4135 	return SEQ_LT(sn1, sn2) && !SEQ_LT(sn1, sn2 - buffer_size);
4136 }
4137 
4138 void
4139 iwx_release_frames(struct iwx_softc *sc, struct ieee80211_node *ni,
4140     struct iwx_rxba_data *rxba, struct iwx_reorder_buffer *reorder_buf,
4141     uint16_t nssn, struct mbuf_list *ml)
4142 {
4143 	struct iwx_reorder_buf_entry *entries = &rxba->entries[0];
4144 	uint16_t ssn = reorder_buf->head_sn;
4145 
4146 	/* ignore nssn smaller than head sn - this can happen due to timeout */
4147 	if (iwx_is_sn_less(nssn, ssn, reorder_buf->buf_size))
4148 		goto set_timer;
4149 
4150 	while (iwx_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
4151 		int index = ssn % reorder_buf->buf_size;
4152 		struct mbuf *m;
4153 		int chanidx, is_shortpre;
4154 		uint32_t rx_pkt_status, rate_n_flags, device_timestamp;
4155 		struct ieee80211_rxinfo *rxi;
4156 
4157 		/* This data is the same for all A-MSDU subframes. */
4158 		chanidx = entries[index].chanidx;
4159 		rx_pkt_status = entries[index].rx_pkt_status;
4160 		is_shortpre = entries[index].is_shortpre;
4161 		rate_n_flags = entries[index].rate_n_flags;
4162 		device_timestamp = entries[index].device_timestamp;
4163 		rxi = &entries[index].rxi;
4164 
4165 		/*
4166 		 * Empty the list. Will have more than one frame for A-MSDU.
4167 		 * Empty list is valid as well since nssn indicates frames were
4168 		 * received.
4169 		 */
4170 		while ((m = ml_dequeue(&entries[index].frames)) != NULL) {
4171 			iwx_rx_frame(sc, m, chanidx, rx_pkt_status, is_shortpre,
4172 			    rate_n_flags, device_timestamp, rxi, ml);
4173 			reorder_buf->num_stored--;
4174 
4175 			/*
4176 			 * Allow the same frame sequence number and CCMP PN for
4177 			 * all A-MSDU subframes following the first subframe.
4178 			 * Otherwise they would be discarded as replays.
4179 			 */
4180 			rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
4181 			rxi->rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
4182 		}
4183 
4184 		ssn = (ssn + 1) & 0xfff;
4185 	}
4186 	reorder_buf->head_sn = nssn;
4187 
4188 set_timer:
4189 	if (reorder_buf->num_stored && !reorder_buf->removed) {
4190 		timeout_add_usec(&reorder_buf->reorder_timer,
4191 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
4192 	} else
4193 		timeout_del(&reorder_buf->reorder_timer);
4194 }
4195 
4196 int
4197 iwx_oldsn_workaround(struct iwx_softc *sc, struct ieee80211_node *ni, int tid,
4198     struct iwx_reorder_buffer *buffer, uint32_t reorder_data, uint32_t gp2)
4199 {
4200 	struct ieee80211com *ic = &sc->sc_ic;
4201 
4202 	if (gp2 != buffer->consec_oldsn_ampdu_gp2) {
4203 		/* we have a new (A-)MPDU ... */
4204 
4205 		/*
4206 		 * reset counter to 0 if we didn't have any oldsn in
4207 		 * the last A-MPDU (as detected by GP2 being identical)
4208 		 */
4209 		if (!buffer->consec_oldsn_prev_drop)
4210 			buffer->consec_oldsn_drops = 0;
4211 
4212 		/* either way, update our tracking state */
4213 		buffer->consec_oldsn_ampdu_gp2 = gp2;
4214 	} else if (buffer->consec_oldsn_prev_drop) {
4215 		/*
4216 		 * tracking state didn't change, and we had an old SN
4217 		 * indication before - do nothing in this case, we
4218 		 * already noted this one down and are waiting for the
4219 		 * next A-MPDU (by GP2)
4220 		 */
4221 		return 0;
4222 	}
4223 
4224 	/* return unless this MPDU has old SN */
4225 	if (!(reorder_data & IWX_RX_MPDU_REORDER_BA_OLD_SN))
4226 		return 0;
4227 
4228 	/* update state */
4229 	buffer->consec_oldsn_prev_drop = 1;
4230 	buffer->consec_oldsn_drops++;
4231 
4232 	/* if limit is reached, send del BA and reset state */
4233 	if (buffer->consec_oldsn_drops == IWX_AMPDU_CONSEC_DROPS_DELBA) {
4234 		ieee80211_delba_request(ic, ni, IEEE80211_REASON_UNSPECIFIED,
4235 		    0, tid);
4236 		buffer->consec_oldsn_prev_drop = 0;
4237 		buffer->consec_oldsn_drops = 0;
4238 		return 1;
4239 	}
4240 
4241 	return 0;
4242 }
4243 
4244 /*
4245  * Handle re-ordering of frames which were de-aggregated in hardware.
4246  * Returns 1 if the MPDU was consumed (buffered or dropped).
4247  * Returns 0 if the MPDU should be passed to upper layer.
4248  */
4249 int
4250 iwx_rx_reorder(struct iwx_softc *sc, struct mbuf *m, int chanidx,
4251     struct iwx_rx_mpdu_desc *desc, int is_shortpre, int rate_n_flags,
4252     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
4253     struct mbuf_list *ml)
4254 {
4255 	struct ieee80211com *ic = &sc->sc_ic;
4256 	struct ieee80211_frame *wh;
4257 	struct ieee80211_node *ni;
4258 	struct iwx_rxba_data *rxba;
4259 	struct iwx_reorder_buffer *buffer;
4260 	uint32_t reorder_data = le32toh(desc->reorder_data);
4261 	int is_amsdu = (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU);
4262 	int last_subframe =
4263 		(desc->amsdu_info & IWX_RX_MPDU_AMSDU_LAST_SUBFRAME);
4264 	uint8_t tid;
4265 	uint8_t subframe_idx = (desc->amsdu_info &
4266 	    IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
4267 	struct iwx_reorder_buf_entry *entries;
4268 	int index;
4269 	uint16_t nssn, sn;
4270 	uint8_t baid, type, subtype;
4271 	int hasqos;
4272 
4273 	wh = mtod(m, struct ieee80211_frame *);
4274 	hasqos = ieee80211_has_qos(wh);
4275 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4276 
4277 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4278 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4279 
4280 	/*
4281 	 * We are only interested in Block Ack requests and unicast QoS data.
4282 	 */
4283 	if (IEEE80211_IS_MULTICAST(wh->i_addr1))
4284 		return 0;
4285 	if (hasqos) {
4286 		if (subtype & IEEE80211_FC0_SUBTYPE_NODATA)
4287 			return 0;
4288 	} else {
4289 		if (type != IEEE80211_FC0_TYPE_CTL ||
4290 		    subtype != IEEE80211_FC0_SUBTYPE_BAR)
4291 			return 0;
4292 	}
4293 
4294 	baid = (reorder_data & IWX_RX_MPDU_REORDER_BAID_MASK) >>
4295 		IWX_RX_MPDU_REORDER_BAID_SHIFT;
4296 	if (baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
4297 	    baid >= nitems(sc->sc_rxba_data))
4298 		return 0;
4299 
4300 	rxba = &sc->sc_rxba_data[baid];
4301 	if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
4302 	    tid != rxba->tid || rxba->sta_id != IWX_STATION_ID)
4303 		return 0;
4304 
4305 	if (rxba->timeout != 0)
4306 		getmicrouptime(&rxba->last_rx);
4307 
4308 	/* Bypass A-MPDU re-ordering in net80211. */
4309 	rxi->rxi_flags |= IEEE80211_RXI_AMPDU_DONE;
4310 
4311 	nssn = reorder_data & IWX_RX_MPDU_REORDER_NSSN_MASK;
4312 	sn = (reorder_data & IWX_RX_MPDU_REORDER_SN_MASK) >>
4313 		IWX_RX_MPDU_REORDER_SN_SHIFT;
4314 
4315 	buffer = &rxba->reorder_buf;
4316 	entries = &rxba->entries[0];
4317 
4318 	if (!buffer->valid) {
4319 		if (reorder_data & IWX_RX_MPDU_REORDER_BA_OLD_SN)
4320 			return 0;
4321 		buffer->valid = 1;
4322 	}
4323 
4324 	ni = ieee80211_find_rxnode(ic, wh);
4325 	if (type == IEEE80211_FC0_TYPE_CTL &&
4326 	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
4327 		iwx_release_frames(sc, ni, rxba, buffer, nssn, ml);
4328 		goto drop;
4329 	}
4330 
4331 	/*
4332 	 * If there was a significant jump in the nssn - adjust.
4333 	 * If the SN is smaller than the NSSN it might need to first go into
4334 	 * the reorder buffer, in which case we just release up to it and the
4335 	 * rest of the function will take care of storing it and releasing up to
4336 	 * the nssn.
4337 	 */
4338 	if (!iwx_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
4339 	    buffer->buf_size) ||
4340 	    !SEQ_LT(sn, buffer->head_sn + buffer->buf_size)) {
4341 		uint16_t min_sn = SEQ_LT(sn, nssn) ? sn : nssn;
4342 		ic->ic_stats.is_ht_rx_frame_above_ba_winend++;
4343 		iwx_release_frames(sc, ni, rxba, buffer, min_sn, ml);
4344 	}
4345 
4346 	if (iwx_oldsn_workaround(sc, ni, tid, buffer, reorder_data,
4347 	    device_timestamp)) {
4348 		 /* BA session will be torn down. */
4349 		ic->ic_stats.is_ht_rx_ba_window_jump++;
4350 		goto drop;
4351 
4352 	}
4353 
4354 	/* drop any outdated packets */
4355 	if (SEQ_LT(sn, buffer->head_sn)) {
4356 		ic->ic_stats.is_ht_rx_frame_below_ba_winstart++;
4357 		goto drop;
4358 	}
4359 
4360 	/* release immediately if allowed by nssn and no stored frames */
4361 	if (!buffer->num_stored && SEQ_LT(sn, nssn)) {
4362 		if (iwx_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) &&
4363 		   (!is_amsdu || last_subframe))
4364 			buffer->head_sn = nssn;
4365 		ieee80211_release_node(ic, ni);
4366 		return 0;
4367 	}
4368 
4369 	/*
4370 	 * release immediately if there are no stored frames, and the sn is
4371 	 * equal to the head.
4372 	 * This can happen due to reorder timer, where NSSN is behind head_sn.
4373 	 * When we released everything, and we got the next frame in the
4374 	 * sequence, according to the NSSN we can't release immediately,
4375 	 * while technically there is no hole and we can move forward.
4376 	 */
4377 	if (!buffer->num_stored && sn == buffer->head_sn) {
4378 		if (!is_amsdu || last_subframe)
4379 			buffer->head_sn = (buffer->head_sn + 1) & 0xfff;
4380 		ieee80211_release_node(ic, ni);
4381 		return 0;
4382 	}
4383 
4384 	index = sn % buffer->buf_size;
4385 
4386 	/*
4387 	 * Check if we already stored this frame
4388 	 * As AMSDU is either received or not as whole, logic is simple:
4389 	 * If we have frames in that position in the buffer and the last frame
4390 	 * originated from AMSDU had a different SN then it is a retransmission.
4391 	 * If it is the same SN then if the subframe index is incrementing it
4392 	 * is the same AMSDU - otherwise it is a retransmission.
4393 	 */
4394 	if (!ml_empty(&entries[index].frames)) {
4395 		if (!is_amsdu) {
4396 			ic->ic_stats.is_ht_rx_ba_no_buf++;
4397 			goto drop;
4398 		} else if (sn != buffer->last_amsdu ||
4399 		    buffer->last_sub_index >= subframe_idx) {
4400 			ic->ic_stats.is_ht_rx_ba_no_buf++;
4401 			goto drop;
4402 		}
4403 	} else {
4404 		/* This data is the same for all A-MSDU subframes. */
4405 		entries[index].chanidx = chanidx;
4406 		entries[index].is_shortpre = is_shortpre;
4407 		entries[index].rate_n_flags = rate_n_flags;
4408 		entries[index].device_timestamp = device_timestamp;
4409 		memcpy(&entries[index].rxi, rxi, sizeof(entries[index].rxi));
4410 	}
4411 
4412 	/* put in reorder buffer */
4413 	ml_enqueue(&entries[index].frames, m);
4414 	buffer->num_stored++;
4415 	getmicrouptime(&entries[index].reorder_time);
4416 
4417 	if (is_amsdu) {
4418 		buffer->last_amsdu = sn;
4419 		buffer->last_sub_index = subframe_idx;
4420 	}
4421 
4422 	/*
4423 	 * We cannot trust NSSN for AMSDU sub-frames that are not the last.
4424 	 * The reason is that NSSN advances on the first sub-frame, and may
4425 	 * cause the reorder buffer to advance before all the sub-frames arrive.
4426 	 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
4427 	 * SN 1. NSSN for first sub frame will be 3 with the result of driver
4428 	 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
4429 	 * already ahead and it will be dropped.
4430 	 * If the last sub-frame is not on this queue - we will get frame
4431 	 * release notification with up to date NSSN.
4432 	 */
4433 	if (!is_amsdu || last_subframe)
4434 		iwx_release_frames(sc, ni, rxba, buffer, nssn, ml);
4435 
4436 	ieee80211_release_node(ic, ni);
4437 	return 1;
4438 
4439 drop:
4440 	m_freem(m);
4441 	ieee80211_release_node(ic, ni);
4442 	return 1;
4443 }
4444 
4445 void
4446 iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata,
4447     size_t maxlen, struct mbuf_list *ml)
4448 {
4449 	struct ieee80211com *ic = &sc->sc_ic;
4450 	struct ieee80211_rxinfo rxi;
4451 	struct iwx_rx_mpdu_desc *desc;
4452 	uint32_t len, hdrlen, rate_n_flags, device_timestamp;
4453 	int rssi;
4454 	uint8_t chanidx;
4455 	uint16_t phy_info;
4456 
4457 	desc = (struct iwx_rx_mpdu_desc *)pktdata;
4458 
4459 	if (!(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_CRC_OK)) ||
4460 	    !(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
4461 		m_freem(m);
4462 		return; /* drop */
4463 	}
4464 
4465 	len = le16toh(desc->mpdu_len);
4466 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4467 		/* Allow control frames in monitor mode. */
4468 		if (len < sizeof(struct ieee80211_frame_cts)) {
4469 			ic->ic_stats.is_rx_tooshort++;
4470 			IC2IFP(ic)->if_ierrors++;
4471 			m_freem(m);
4472 			return;
4473 		}
4474 	} else if (len < sizeof(struct ieee80211_frame)) {
4475 		ic->ic_stats.is_rx_tooshort++;
4476 		IC2IFP(ic)->if_ierrors++;
4477 		m_freem(m);
4478 		return;
4479 	}
4480 	if (len > maxlen - sizeof(*desc)) {
4481 		IC2IFP(ic)->if_ierrors++;
4482 		m_freem(m);
4483 		return;
4484 	}
4485 
4486 	m->m_data = pktdata + sizeof(*desc);
4487 	m->m_pkthdr.len = m->m_len = len;
4488 
4489 	/* Account for padding following the frame header. */
4490 	if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_PAD) {
4491 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4492 		int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4493 		if (type == IEEE80211_FC0_TYPE_CTL) {
4494 			switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
4495 			case IEEE80211_FC0_SUBTYPE_CTS:
4496 				hdrlen = sizeof(struct ieee80211_frame_cts);
4497 				break;
4498 			case IEEE80211_FC0_SUBTYPE_ACK:
4499 				hdrlen = sizeof(struct ieee80211_frame_ack);
4500 				break;
4501 			default:
4502 				hdrlen = sizeof(struct ieee80211_frame_min);
4503 				break;
4504 			}
4505 		} else
4506 			hdrlen = ieee80211_get_hdrlen(wh);
4507 
4508 		if ((le16toh(desc->status) &
4509 		    IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
4510 		    IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4511 			/* Padding is inserted after the IV. */
4512 			hdrlen += IEEE80211_CCMP_HDRLEN;
4513 		}
4514 
4515 		memmove(m->m_data + 2, m->m_data, hdrlen);
4516 		m_adj(m, 2);
4517 	}
4518 
4519 	memset(&rxi, 0, sizeof(rxi));
4520 
4521 	/*
4522 	 * Hardware de-aggregates A-MSDUs and copies the same MAC header
4523 	 * in place for each subframe. But it leaves the 'A-MSDU present'
4524 	 * bit set in the frame header. We need to clear this bit ourselves.
4525 	 * (XXX This workaround is not required on AX200/AX201 devices that
4526 	 * have been tested by me, but it's unclear when this problem was
4527 	 * fixed in the hardware. It definitely affects the 9k generation.
4528 	 * Leaving this in place for now since some 9k/AX200 hybrids seem
4529 	 * to exist that we may eventually add support for.)
4530 	 *
4531 	 * And we must allow the same CCMP PN for subframes following the
4532 	 * first subframe. Otherwise they would be discarded as replays.
4533 	 */
4534 	if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU) {
4535 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4536 		uint8_t subframe_idx = (desc->amsdu_info &
4537 		    IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
4538 		if (subframe_idx > 0)
4539 			rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
4540 		if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
4541 		    m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) {
4542 			struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,
4543 			    struct ieee80211_qosframe_addr4 *);
4544 			qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
4545 		} else if (ieee80211_has_qos(wh) &&
4546 		    m->m_len >= sizeof(struct ieee80211_qosframe)) {
4547 			struct ieee80211_qosframe *qwh = mtod(m,
4548 			    struct ieee80211_qosframe *);
4549 			qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
4550 		}
4551 	}
4552 
4553 	/*
4554 	 * Verify decryption before duplicate detection. The latter uses
4555 	 * the TID supplied in QoS frame headers and this TID is implicitly
4556 	 * verified as part of the CCMP nonce.
4557 	 */
4558 	if (iwx_rx_hwdecrypt(sc, m, le16toh(desc->status), &rxi)) {
4559 		m_freem(m);
4560 		return;
4561 	}
4562 
4563 	if (iwx_detect_duplicate(sc, m, desc, &rxi)) {
4564 		m_freem(m);
4565 		return;
4566 	}
4567 
4568 	phy_info = le16toh(desc->phy_info);
4569 	rate_n_flags = le32toh(desc->v1.rate_n_flags);
4570 	chanidx = desc->v1.channel;
4571 	device_timestamp = desc->v1.gp2_on_air_rise;
4572 
4573 	rssi = iwx_rxmq_get_signal_strength(sc, desc);
4574 	rssi = (0 - IWX_MIN_DBM) + rssi;	/* normalize */
4575 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
4576 
4577 	rxi.rxi_rssi = rssi;
4578 	rxi.rxi_tstamp = le64toh(desc->v1.tsf_on_air_rise);
4579 	rxi.rxi_chan = chanidx;
4580 
4581 	if (iwx_rx_reorder(sc, m, chanidx, desc,
4582 	    (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
4583 	    rate_n_flags, device_timestamp, &rxi, ml))
4584 		return;
4585 
4586 	iwx_rx_frame(sc, m, chanidx, le16toh(desc->status),
4587 	    (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
4588 	    rate_n_flags, device_timestamp, &rxi, ml);
4589 }
4590 
4591 void
4592 iwx_clear_tx_desc(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx)
4593 {
4594 	struct iwx_tfh_tfd *desc = &ring->desc[idx];
4595 	uint8_t num_tbs = le16toh(desc->num_tbs) & 0x1f;
4596 	int i;
4597 
4598 	/* First TB is never cleared - it is bidirectional DMA data. */
4599 	for (i = 1; i < num_tbs; i++) {
4600 		struct iwx_tfh_tb *tb = &desc->tbs[i];
4601 		memset(tb, 0, sizeof(*tb));
4602 	}
4603 	desc->num_tbs = 0;
4604 
4605 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4606 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
4607 	    sizeof(*desc), BUS_DMASYNC_PREWRITE);
4608 }
4609 
4610 void
4611 iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_data *txd)
4612 {
4613 	struct ieee80211com *ic = &sc->sc_ic;
4614 
4615 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
4616 	    BUS_DMASYNC_POSTWRITE);
4617 	bus_dmamap_unload(sc->sc_dmat, txd->map);
4618 	m_freem(txd->m);
4619 	txd->m = NULL;
4620 
4621 	KASSERT(txd->in);
4622 	ieee80211_release_node(ic, &txd->in->in_ni);
4623 	txd->in = NULL;
4624 }
4625 
4626 void
4627 iwx_txq_advance(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx)
4628 {
4629  	struct iwx_tx_data *txd;
4630 
4631 	while (ring->tail != idx) {
4632 		txd = &ring->data[ring->tail];
4633 		if (txd->m != NULL) {
4634 			iwx_clear_tx_desc(sc, ring, ring->tail);
4635 			iwx_tx_update_byte_tbl(ring, ring->tail, 0, 0);
4636 			iwx_txd_done(sc, txd);
4637 			ring->queued--;
4638 		}
4639 		ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT;
4640 	}
4641 }
4642 
4643 void
4644 iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4645     struct iwx_rx_data *data)
4646 {
4647 	struct ieee80211com *ic = &sc->sc_ic;
4648 	struct ifnet *ifp = IC2IFP(ic);
4649 	struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
4650 	int qid = cmd_hdr->qid, status, txfail;
4651 	struct iwx_tx_ring *ring = &sc->txq[qid];
4652 	struct iwx_tx_resp *tx_resp = (void *)pkt->data;
4653 	uint32_t ssn;
4654 	uint32_t len = iwx_rx_packet_len(pkt);
4655 
4656 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,
4657 	    BUS_DMASYNC_POSTREAD);
4658 
4659 	/* Sanity checks. */
4660 	if (sizeof(*tx_resp) > len)
4661 		return;
4662 	if (qid < IWX_FIRST_AGG_TX_QUEUE && tx_resp->frame_count > 1)
4663 		return;
4664 	if (qid >= IWX_FIRST_AGG_TX_QUEUE && sizeof(*tx_resp) + sizeof(ssn) +
4665 	    tx_resp->frame_count * sizeof(tx_resp->status) > len)
4666 		return;
4667 
4668 	sc->sc_tx_timer[qid] = 0;
4669 
4670 	if (tx_resp->frame_count > 1) /* A-MPDU */
4671 		return;
4672 
4673 	status = le16toh(tx_resp->status.status) & IWX_TX_STATUS_MSK;
4674 	txfail = (status != IWX_TX_STATUS_SUCCESS &&
4675 	    status != IWX_TX_STATUS_DIRECT_DONE);
4676 
4677 	if (txfail)
4678 		ifp->if_oerrors++;
4679 
4680 	/*
4681 	 * On hardware supported by iwx(4) the SSN counter is only
4682 	 * 8 bit and corresponds to a Tx ring index rather than a
4683 	 * sequence number. Frames up to this index (non-inclusive)
4684 	 * can now be freed.
4685 	 */
4686 	memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn));
4687 	ssn = le32toh(ssn) & 0xff;
4688 	iwx_txq_advance(sc, ring, ssn);
4689 	iwx_clear_oactive(sc, ring);
4690 }
4691 
4692 void
4693 iwx_clear_oactive(struct iwx_softc *sc, struct iwx_tx_ring *ring)
4694 {
4695 	struct ieee80211com *ic = &sc->sc_ic;
4696 	struct ifnet *ifp = IC2IFP(ic);
4697 
4698 	if (ring->queued < IWX_TX_RING_LOMARK) {
4699 		sc->qfullmsk &= ~(1 << ring->qid);
4700 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
4701 			ifq_clr_oactive(&ifp->if_snd);
4702 			/*
4703 			 * Well, we're in interrupt context, but then again
4704 			 * I guess net80211 does all sorts of stunts in
4705 			 * interrupt context, so maybe this is no biggie.
4706 			 */
4707 			(*ifp->if_start)(ifp);
4708 		}
4709 	}
4710 }
4711 
4712 void
4713 iwx_rx_compressed_ba(struct iwx_softc *sc, struct iwx_rx_packet *pkt)
4714 {
4715 	struct iwx_compressed_ba_notif *ba_res = (void *)pkt->data;
4716 	struct ieee80211com *ic = &sc->sc_ic;
4717 	struct ieee80211_node *ni;
4718 	struct ieee80211_tx_ba *ba;
4719 	struct iwx_node *in;
4720 	struct iwx_tx_ring *ring;
4721 	uint16_t i, tfd_cnt, ra_tid_cnt, idx;
4722 	int qid;
4723 
4724 	if (ic->ic_state != IEEE80211_S_RUN)
4725 		return;
4726 
4727 	if (iwx_rx_packet_payload_len(pkt) < sizeof(*ba_res))
4728 		return;
4729 
4730 	if (ba_res->sta_id != IWX_STATION_ID)
4731 		return;
4732 
4733 	ni = ic->ic_bss;
4734 	in = (void *)ni;
4735 
4736 	tfd_cnt = le16toh(ba_res->tfd_cnt);
4737 	ra_tid_cnt = le16toh(ba_res->ra_tid_cnt);
4738 	if (!tfd_cnt || iwx_rx_packet_payload_len(pkt) < (sizeof(*ba_res) +
4739 	    sizeof(ba_res->ra_tid[0]) * ra_tid_cnt +
4740 	    sizeof(ba_res->tfd[0]) * tfd_cnt))
4741 		return;
4742 
4743 	for (i = 0; i < tfd_cnt; i++) {
4744 		struct iwx_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
4745 		uint8_t tid;
4746 
4747 		tid = ba_tfd->tid;
4748 		if (tid >= nitems(sc->aggqid))
4749 			continue;
4750 
4751 		qid = sc->aggqid[tid];
4752 		if (qid != htole16(ba_tfd->q_num))
4753 			continue;
4754 
4755 		ring = &sc->txq[qid];
4756 
4757 		ba = &ni->ni_tx_ba[tid];
4758 		if (ba->ba_state != IEEE80211_BA_AGREED)
4759 			continue;
4760 
4761 		idx = le16toh(ba_tfd->tfd_index);
4762 		if (idx >= IWX_TX_RING_COUNT)
4763 			continue;
4764 		sc->sc_tx_timer[qid] = 0;
4765 		iwx_txq_advance(sc, ring, idx);
4766 		iwx_clear_oactive(sc, ring);
4767 	}
4768 }
4769 
4770 void
4771 iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4772     struct iwx_rx_data *data)
4773 {
4774 	struct ieee80211com *ic = &sc->sc_ic;
4775 	struct iwx_missed_beacons_notif *mbn = (void *)pkt->data;
4776 	uint32_t missed;
4777 
4778 	if ((ic->ic_opmode != IEEE80211_M_STA) ||
4779 	    (ic->ic_state != IEEE80211_S_RUN))
4780 		return;
4781 
4782 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
4783 	    sizeof(*mbn), BUS_DMASYNC_POSTREAD);
4784 
4785 	missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
4786 	if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
4787 		if (ic->ic_if.if_flags & IFF_DEBUG)
4788 			printf("%s: receiving no beacons from %s; checking if "
4789 			    "this AP is still responding to probe requests\n",
4790 			    DEVNAME(sc), ether_sprintf(ic->ic_bss->ni_macaddr));
4791 		/*
4792 		 * Rather than go directly to scan state, try to send a
4793 		 * directed probe request first. If that fails then the
4794 		 * state machine will drop us into scanning after timing
4795 		 * out waiting for a probe response.
4796 		 */
4797 		IEEE80211_SEND_MGMT(ic, ic->ic_bss,
4798 		    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
4799 	}
4800 
4801 }
4802 
4803 int
4804 iwx_binding_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action)
4805 {
4806 	struct iwx_binding_cmd cmd;
4807 	struct iwx_phy_ctxt *phyctxt = in->in_phyctxt;
4808 	uint32_t mac_id = IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
4809 	int i, err, active = (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE);
4810 	uint32_t status;
4811 
4812 	if (action == IWX_FW_CTXT_ACTION_ADD && active)
4813 		panic("binding already added");
4814 	if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
4815 		panic("binding already removed");
4816 
4817 	if (phyctxt == NULL) /* XXX race with iwx_stop() */
4818 		return EINVAL;
4819 
4820 	memset(&cmd, 0, sizeof(cmd));
4821 
4822 	cmd.id_and_color
4823 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4824 	cmd.action = htole32(action);
4825 	cmd.phy = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4826 
4827 	cmd.macs[0] = htole32(mac_id);
4828 	for (i = 1; i < IWX_MAX_MACS_IN_BINDING; i++)
4829 		cmd.macs[i] = htole32(IWX_FW_CTXT_INVALID);
4830 
4831 	if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
4832 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
4833 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
4834 	else
4835 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
4836 
4837 	status = 0;
4838 	err = iwx_send_cmd_pdu_status(sc, IWX_BINDING_CONTEXT_CMD, sizeof(cmd),
4839 	    &cmd, &status);
4840 	if (err == 0 && status != 0)
4841 		err = EIO;
4842 
4843 	return err;
4844 }
4845 
4846 uint8_t
4847 iwx_get_vht_ctrl_pos(struct ieee80211com *ic, struct ieee80211_channel *chan)
4848 {
4849 	int center_idx = ic->ic_bss->ni_vht_chan_center_freq_idx0;
4850 	int primary_idx = ieee80211_chan2ieee(ic, ic->ic_bss->ni_chan);
4851 	/*
4852 	 * The FW is expected to check the control channel position only
4853 	 * when in HT/VHT and the channel width is not 20MHz. Return
4854 	 * this value as the default one:
4855 	 */
4856 	uint8_t pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4857 
4858 	switch (primary_idx - center_idx) {
4859 	case -6:
4860 		pos = IWX_PHY_VHT_CTRL_POS_2_BELOW;
4861 		break;
4862 	case -2:
4863 		pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4864 		break;
4865 	case 2:
4866 		pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
4867 		break;
4868 	case 6:
4869 		pos = IWX_PHY_VHT_CTRL_POS_2_ABOVE;
4870 		break;
4871 	default:
4872 		break;
4873 	}
4874 
4875 	return pos;
4876 }
4877 
4878 int
4879 iwx_phy_ctxt_cmd_uhb_v3(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
4880     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
4881     uint8_t vht_chan_width)
4882 {
4883 	struct ieee80211com *ic = &sc->sc_ic;
4884 	struct iwx_phy_context_cmd_uhb cmd;
4885 	uint8_t active_cnt, idle_cnt;
4886 	struct ieee80211_channel *chan = ctxt->channel;
4887 
4888 	memset(&cmd, 0, sizeof(cmd));
4889 	cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
4890 	    ctxt->color));
4891 	cmd.action = htole32(action);
4892 
4893 	if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel) ||
4894 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
4895 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
4896 	else
4897 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
4898 
4899 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
4900 	    IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
4901 	cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
4902 	if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
4903 		cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
4904 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
4905 	} else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
4906 		if (sco == IEEE80211_HTOP0_SCO_SCA) {
4907 			/* secondary chan above -> control chan below */
4908 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4909 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
4910 		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
4911 			/* secondary chan below -> control chan above */
4912 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
4913 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
4914 		} else {
4915 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
4916 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4917 		}
4918 	} else {
4919 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
4920 		cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4921 	}
4922 
4923 	idle_cnt = chains_static;
4924 	active_cnt = chains_dynamic;
4925 	cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
4926 	    IWX_PHY_RX_CHAIN_VALID_POS);
4927 	cmd.rxchain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
4928 	cmd.rxchain_info |= htole32(active_cnt <<
4929 	    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
4930 
4931 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
4932 }
4933 
4934 int
4935 iwx_phy_ctxt_cmd_v3(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
4936     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
4937     uint8_t vht_chan_width)
4938 {
4939 	struct ieee80211com *ic = &sc->sc_ic;
4940 	struct iwx_phy_context_cmd cmd;
4941 	uint8_t active_cnt, idle_cnt;
4942 	struct ieee80211_channel *chan = ctxt->channel;
4943 
4944 	memset(&cmd, 0, sizeof(cmd));
4945 	cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
4946 	    ctxt->color));
4947 	cmd.action = htole32(action);
4948 
4949 	if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel) ||
4950 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
4951 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
4952 	else
4953 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
4954 
4955 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
4956 	    IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
4957 	cmd.ci.channel = ieee80211_chan2ieee(ic, chan);
4958 	if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
4959 		cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
4960 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
4961 	} else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
4962 		if (sco == IEEE80211_HTOP0_SCO_SCA) {
4963 			/* secondary chan above -> control chan below */
4964 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4965 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
4966 		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
4967 			/* secondary chan below -> control chan above */
4968 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
4969 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
4970 		} else {
4971 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
4972 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4973 		}
4974 	} else {
4975 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
4976 		cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4977 	}
4978 
4979 	idle_cnt = chains_static;
4980 	active_cnt = chains_dynamic;
4981 	cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
4982 	    IWX_PHY_RX_CHAIN_VALID_POS);
4983 	cmd.rxchain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
4984 	cmd.rxchain_info |= htole32(active_cnt <<
4985 	    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
4986 
4987 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
4988 }
4989 
4990 int
4991 iwx_phy_ctxt_cmd(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
4992     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
4993     uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
4994 {
4995 	int cmdver;
4996 
4997 	cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_PHY_CONTEXT_CMD);
4998 	if (cmdver != 3) {
4999 		printf("%s: firmware does not support phy-context-cmd v3\n",
5000 		    DEVNAME(sc));
5001 		return ENOTSUP;
5002 	}
5003 
5004 	/*
5005 	 * Intel increased the size of the fw_channel_info struct and neglected
5006 	 * to bump the phy_context_cmd struct, which contains an fw_channel_info
5007 	 * member in the middle.
5008 	 * To keep things simple we use a separate function to handle the larger
5009 	 * variant of the phy context command.
5010 	 */
5011 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS)) {
5012 		return iwx_phy_ctxt_cmd_uhb_v3(sc, ctxt, chains_static,
5013 		    chains_dynamic, action, sco, vht_chan_width);
5014 	}
5015 
5016 	return iwx_phy_ctxt_cmd_v3(sc, ctxt, chains_static, chains_dynamic,
5017 	    action, sco, vht_chan_width);
5018 }
5019 
5020 int
5021 iwx_send_cmd(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5022 {
5023 	struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5024 	struct iwx_tfh_tfd *desc;
5025 	struct iwx_tx_data *txdata;
5026 	struct iwx_device_cmd *cmd;
5027 	struct mbuf *m;
5028 	bus_addr_t paddr;
5029 	uint64_t addr;
5030 	int err = 0, i, paylen, off, s;
5031 	int idx, code, async, group_id;
5032 	size_t hdrlen, datasz;
5033 	uint8_t *data;
5034 	int generation = sc->sc_generation;
5035 
5036 	code = hcmd->id;
5037 	async = hcmd->flags & IWX_CMD_ASYNC;
5038 	idx = ring->cur;
5039 
5040 	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
5041 		paylen += hcmd->len[i];
5042 	}
5043 
5044 	/* If this command waits for a response, allocate response buffer. */
5045 	hcmd->resp_pkt = NULL;
5046 	if (hcmd->flags & IWX_CMD_WANT_RESP) {
5047 		uint8_t *resp_buf;
5048 		KASSERT(!async);
5049 		KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet));
5050 		KASSERT(hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX);
5051 		if (sc->sc_cmd_resp_pkt[idx] != NULL)
5052 			return ENOSPC;
5053 		resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
5054 		    M_NOWAIT | M_ZERO);
5055 		if (resp_buf == NULL)
5056 			return ENOMEM;
5057 		sc->sc_cmd_resp_pkt[idx] = resp_buf;
5058 		sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
5059 	} else {
5060 		sc->sc_cmd_resp_pkt[idx] = NULL;
5061 	}
5062 
5063 	s = splnet();
5064 
5065 	desc = &ring->desc[idx];
5066 	txdata = &ring->data[idx];
5067 
5068 	/*
5069 	 * XXX Intel inside (tm)
5070 	 * Firmware API versions >= 50 reject old-style commands in
5071 	 * group 0 with a "BAD_COMMAND" firmware error. We must pretend
5072 	 * that such commands were in the LONG_GROUP instead in order
5073 	 * for firmware to accept them.
5074 	 */
5075 	if (iwx_cmd_groupid(code) == 0) {
5076 		code = IWX_WIDE_ID(IWX_LONG_GROUP, code);
5077 		txdata->flags |= IWX_TXDATA_FLAG_CMD_IS_NARROW;
5078 	} else
5079 		txdata->flags &= ~IWX_TXDATA_FLAG_CMD_IS_NARROW;
5080 
5081 	group_id = iwx_cmd_groupid(code);
5082 
5083 	hdrlen = sizeof(cmd->hdr_wide);
5084 	datasz = sizeof(cmd->data_wide);
5085 
5086 	if (paylen > datasz) {
5087 		/* Command is too large to fit in pre-allocated space. */
5088 		size_t totlen = hdrlen + paylen;
5089 		if (paylen > IWX_MAX_CMD_PAYLOAD_SIZE) {
5090 			printf("%s: firmware command too long (%zd bytes)\n",
5091 			    DEVNAME(sc), totlen);
5092 			err = EINVAL;
5093 			goto out;
5094 		}
5095 		m = MCLGETL(NULL, M_DONTWAIT, totlen);
5096 		if (m == NULL) {
5097 			printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
5098 			    DEVNAME(sc), totlen);
5099 			err = ENOMEM;
5100 			goto out;
5101 		}
5102 		cmd = mtod(m, struct iwx_device_cmd *);
5103 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
5104 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
5105 		if (err) {
5106 			printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
5107 			    DEVNAME(sc), totlen);
5108 			m_freem(m);
5109 			goto out;
5110 		}
5111 		txdata->m = m; /* mbuf will be freed in iwx_cmd_done() */
5112 		paddr = txdata->map->dm_segs[0].ds_addr;
5113 	} else {
5114 		cmd = &ring->cmd[idx];
5115 		paddr = txdata->cmd_paddr;
5116 	}
5117 
5118 	memset(cmd, 0, sizeof(*cmd));
5119 	cmd->hdr_wide.opcode = iwx_cmd_opcode(code);
5120 	cmd->hdr_wide.group_id = group_id;
5121 	cmd->hdr_wide.qid = ring->qid;
5122 	cmd->hdr_wide.idx = idx;
5123 	cmd->hdr_wide.length = htole16(paylen);
5124 	cmd->hdr_wide.version = iwx_cmd_version(code);
5125 	data = cmd->data_wide;
5126 
5127 	for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
5128 		if (hcmd->len[i] == 0)
5129 			continue;
5130 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
5131 		off += hcmd->len[i];
5132 	}
5133 	KASSERT(off == paylen);
5134 
5135 	desc->tbs[0].tb_len = htole16(MIN(hdrlen + paylen, IWX_FIRST_TB_SIZE));
5136 	addr = htole64(paddr);
5137 	memcpy(&desc->tbs[0].addr, &addr, sizeof(addr));
5138 	if (hdrlen + paylen > IWX_FIRST_TB_SIZE) {
5139 		desc->tbs[1].tb_len = htole16(hdrlen + paylen -
5140 		    IWX_FIRST_TB_SIZE);
5141 		addr = htole64(paddr + IWX_FIRST_TB_SIZE);
5142 		memcpy(&desc->tbs[1].addr, &addr, sizeof(addr));
5143 		desc->num_tbs = htole16(2);
5144 	} else
5145 		desc->num_tbs = htole16(1);
5146 
5147 	if (paylen > datasz) {
5148 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
5149 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
5150 	} else {
5151 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
5152 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
5153 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
5154 	}
5155 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
5156 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
5157 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
5158 	/* Kick command ring. */
5159 	DPRINTF(("%s: sending command 0x%x\n", __func__, code));
5160 	ring->queued++;
5161 	ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
5162 	IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur);
5163 
5164 	if (!async) {
5165 		err = tsleep_nsec(desc, PCATCH, "iwxcmd", SEC_TO_NSEC(1));
5166 		if (err == 0) {
5167 			/* if hardware is no longer up, return error */
5168 			if (generation != sc->sc_generation) {
5169 				err = ENXIO;
5170 				goto out;
5171 			}
5172 
5173 			/* Response buffer will be freed in iwx_free_resp(). */
5174 			hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
5175 			sc->sc_cmd_resp_pkt[idx] = NULL;
5176 		} else if (generation == sc->sc_generation) {
5177 			free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
5178 			    sc->sc_cmd_resp_len[idx]);
5179 			sc->sc_cmd_resp_pkt[idx] = NULL;
5180 		}
5181 	}
5182  out:
5183 	splx(s);
5184 
5185 	return err;
5186 }
5187 
5188 int
5189 iwx_send_cmd_pdu(struct iwx_softc *sc, uint32_t id, uint32_t flags,
5190     uint16_t len, const void *data)
5191 {
5192 	struct iwx_host_cmd cmd = {
5193 		.id = id,
5194 		.len = { len, },
5195 		.data = { data, },
5196 		.flags = flags,
5197 	};
5198 
5199 	return iwx_send_cmd(sc, &cmd);
5200 }
5201 
5202 int
5203 iwx_send_cmd_status(struct iwx_softc *sc, struct iwx_host_cmd *cmd,
5204     uint32_t *status)
5205 {
5206 	struct iwx_rx_packet *pkt;
5207 	struct iwx_cmd_response *resp;
5208 	int err, resp_len;
5209 
5210 	KASSERT((cmd->flags & IWX_CMD_WANT_RESP) == 0);
5211 	cmd->flags |= IWX_CMD_WANT_RESP;
5212 	cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
5213 
5214 	err = iwx_send_cmd(sc, cmd);
5215 	if (err)
5216 		return err;
5217 
5218 	pkt = cmd->resp_pkt;
5219 	if (pkt == NULL || (pkt->hdr.flags & IWX_CMD_FAILED_MSK))
5220 		return EIO;
5221 
5222 	resp_len = iwx_rx_packet_payload_len(pkt);
5223 	if (resp_len != sizeof(*resp)) {
5224 		iwx_free_resp(sc, cmd);
5225 		return EIO;
5226 	}
5227 
5228 	resp = (void *)pkt->data;
5229 	*status = le32toh(resp->status);
5230 	iwx_free_resp(sc, cmd);
5231 	return err;
5232 }
5233 
5234 int
5235 iwx_send_cmd_pdu_status(struct iwx_softc *sc, uint32_t id, uint16_t len,
5236     const void *data, uint32_t *status)
5237 {
5238 	struct iwx_host_cmd cmd = {
5239 		.id = id,
5240 		.len = { len, },
5241 		.data = { data, },
5242 	};
5243 
5244 	return iwx_send_cmd_status(sc, &cmd, status);
5245 }
5246 
5247 void
5248 iwx_free_resp(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5249 {
5250 	KASSERT((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP);
5251 	free(hcmd->resp_pkt, M_DEVBUF, hcmd->resp_pkt_len);
5252 	hcmd->resp_pkt = NULL;
5253 }
5254 
5255 void
5256 iwx_cmd_done(struct iwx_softc *sc, int qid, int idx, int code)
5257 {
5258 	struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5259 	struct iwx_tx_data *data;
5260 
5261 	if (qid != IWX_DQA_CMD_QUEUE) {
5262 		return;	/* Not a command ack. */
5263 	}
5264 
5265 	data = &ring->data[idx];
5266 
5267 	if (data->m != NULL) {
5268 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
5269 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5270 		bus_dmamap_unload(sc->sc_dmat, data->map);
5271 		m_freem(data->m);
5272 		data->m = NULL;
5273 	}
5274 	wakeup(&ring->desc[idx]);
5275 
5276 	DPRINTF(("%s: command 0x%x done\n", __func__, code));
5277 	if (ring->queued == 0) {
5278 		DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
5279 			DEVNAME(sc), code));
5280 	} else if (ring->queued > 0)
5281 		ring->queued--;
5282 }
5283 
5284 /*
5285  * Fill in various bit for management frames, and leave them
5286  * unfilled for data frames (firmware takes care of that).
5287  * Return the selected TX rate.
5288  */
5289 const struct iwx_rate *
5290 iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in,
5291     struct ieee80211_frame *wh, struct iwx_tx_cmd_gen2 *tx)
5292 {
5293 	struct ieee80211com *ic = &sc->sc_ic;
5294 	struct ieee80211_node *ni = &in->in_ni;
5295 	struct ieee80211_rateset *rs = &ni->ni_rates;
5296 	const struct iwx_rate *rinfo;
5297 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5298 	int min_ridx = iwx_rval2ridx(ieee80211_min_basic_rate(ic));
5299 	int ridx, rate_flags;
5300 	uint32_t flags = 0;
5301 
5302 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
5303 	    type != IEEE80211_FC0_TYPE_DATA) {
5304 		/* for non-data, use the lowest supported rate */
5305 		ridx = min_ridx;
5306 		flags |= IWX_TX_FLAGS_CMD_RATE;
5307 	} else if (ic->ic_fixed_mcs != -1) {
5308 		ridx = sc->sc_fixed_ridx;
5309 		flags |= IWX_TX_FLAGS_CMD_RATE;
5310 	} else if (ic->ic_fixed_rate != -1) {
5311 		ridx = sc->sc_fixed_ridx;
5312 		flags |= IWX_TX_FLAGS_CMD_RATE;
5313 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
5314 		ridx = iwx_mcs2ridx[ni->ni_txmcs];
5315 	} else {
5316 		uint8_t rval;
5317 		rval = (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL);
5318 		ridx = iwx_rval2ridx(rval);
5319 		if (ridx < min_ridx)
5320 			ridx = min_ridx;
5321 	}
5322 
5323 	if ((ic->ic_flags & IEEE80211_F_RSNON) &&
5324 	    ni->ni_rsn_supp_state == RSNA_SUPP_PTKNEGOTIATING)
5325 		flags |= IWX_TX_FLAGS_HIGH_PRI;
5326 	tx->flags = htole32(flags);
5327 
5328 	rinfo = &iwx_rates[ridx];
5329 	if (iwx_is_mimo_ht_plcp(rinfo->ht_plcp))
5330 		rate_flags = IWX_RATE_MCS_ANT_AB_MSK;
5331 	else
5332 		rate_flags = IWX_RATE_MCS_ANT_A_MSK;
5333 	if (IWX_RIDX_IS_CCK(ridx))
5334 		rate_flags |= IWX_RATE_MCS_CCK_MSK;
5335 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
5336  	    type == IEEE80211_FC0_TYPE_DATA &&
5337 	    rinfo->ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP) {
5338 		uint8_t sco = IEEE80211_HTOP0_SCO_SCN;
5339 		uint8_t vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
5340 		if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
5341 		    IEEE80211_CHAN_80MHZ_ALLOWED(ni->ni_chan) &&
5342 		    ieee80211_node_supports_vht_chan80(ni))
5343 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
5344 		else if (IEEE80211_CHAN_40MHZ_ALLOWED(ni->ni_chan) &&
5345 		    ieee80211_node_supports_ht_chan40(ni))
5346 			sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
5347 		rate_flags |= IWX_RATE_MCS_HT_MSK;
5348 		if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80 &&
5349 		    in->in_phyctxt != NULL &&
5350 		    in->in_phyctxt->vht_chan_width == vht_chan_width) {
5351 			rate_flags |= IWX_RATE_MCS_CHAN_WIDTH_80;
5352 			if (ieee80211_node_supports_vht_sgi80(ni))
5353 				rate_flags |= IWX_RATE_MCS_SGI_MSK;
5354 		} else if ((sco == IEEE80211_HTOP0_SCO_SCA ||
5355 		    sco == IEEE80211_HTOP0_SCO_SCB) &&
5356 		    in->in_phyctxt != NULL && in->in_phyctxt->sco == sco) {
5357 			rate_flags |= IWX_RATE_MCS_CHAN_WIDTH_40;
5358 			if (ieee80211_node_supports_ht_sgi40(ni))
5359 				rate_flags |= IWX_RATE_MCS_SGI_MSK;
5360 		} else if (ieee80211_node_supports_ht_sgi20(ni))
5361 			rate_flags |= IWX_RATE_MCS_SGI_MSK;
5362 		tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
5363 	} else
5364 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
5365 
5366 	return rinfo;
5367 }
5368 
5369 void
5370 iwx_tx_update_byte_tbl(struct iwx_tx_ring *txq, int idx, uint16_t byte_cnt,
5371     uint16_t num_tbs)
5372 {
5373 	uint8_t filled_tfd_size, num_fetch_chunks;
5374 	uint16_t len = byte_cnt;
5375 	uint16_t bc_ent;
5376 	struct iwx_agn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.vaddr;
5377 
5378 	filled_tfd_size = offsetof(struct iwx_tfh_tfd, tbs) +
5379 			  num_tbs * sizeof(struct iwx_tfh_tb);
5380 	/*
5381 	 * filled_tfd_size contains the number of filled bytes in the TFD.
5382 	 * Dividing it by 64 will give the number of chunks to fetch
5383 	 * to SRAM- 0 for one chunk, 1 for 2 and so on.
5384 	 * If, for example, TFD contains only 3 TBs then 32 bytes
5385 	 * of the TFD are used, and only one chunk of 64 bytes should
5386 	 * be fetched
5387 	 */
5388 	num_fetch_chunks = howmany(filled_tfd_size, 64) - 1;
5389 
5390 	/* Before AX210, the HW expects DW */
5391 	len = howmany(len, 4);
5392 	bc_ent = htole16(len | (num_fetch_chunks << 12));
5393 	scd_bc_tbl->tfd_offset[idx] = bc_ent;
5394 }
5395 
5396 int
5397 iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
5398 {
5399 	struct ieee80211com *ic = &sc->sc_ic;
5400 	struct iwx_node *in = (void *)ni;
5401 	struct iwx_tx_ring *ring;
5402 	struct iwx_tx_data *data;
5403 	struct iwx_tfh_tfd *desc;
5404 	struct iwx_device_cmd *cmd;
5405 	struct iwx_tx_cmd_gen2 *tx;
5406 	struct ieee80211_frame *wh;
5407 	struct ieee80211_key *k = NULL;
5408 	const struct iwx_rate *rinfo;
5409 	uint64_t paddr;
5410 	u_int hdrlen;
5411 	bus_dma_segment_t *seg;
5412 	uint16_t num_tbs;
5413 	uint8_t type, subtype;
5414 	int i, totlen, err, pad, qid;
5415 
5416 	wh = mtod(m, struct ieee80211_frame *);
5417 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5418 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
5419 	if (type == IEEE80211_FC0_TYPE_CTL)
5420 		hdrlen = sizeof(struct ieee80211_frame_min);
5421 	else
5422 		hdrlen = ieee80211_get_hdrlen(wh);
5423 
5424 	qid = sc->first_data_qid;
5425 
5426 	/* Put QoS frames on the data queue which maps to their TID. */
5427 	if (ieee80211_has_qos(wh)) {
5428 		struct ieee80211_tx_ba *ba;
5429 		uint16_t qos = ieee80211_get_qos(wh);
5430 		uint8_t tid = qos & IEEE80211_QOS_TID;
5431 
5432 		ba = &ni->ni_tx_ba[tid];
5433 		if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
5434 		    type == IEEE80211_FC0_TYPE_DATA &&
5435 		    subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
5436 		    sc->aggqid[tid] != 0 &&
5437 		    ba->ba_state == IEEE80211_BA_AGREED) {
5438 			qid = sc->aggqid[tid];
5439 		}
5440 	}
5441 
5442 	ring = &sc->txq[qid];
5443 	desc = &ring->desc[ring->cur];
5444 	memset(desc, 0, sizeof(*desc));
5445 	data = &ring->data[ring->cur];
5446 
5447 	cmd = &ring->cmd[ring->cur];
5448 	cmd->hdr.code = IWX_TX_CMD;
5449 	cmd->hdr.flags = 0;
5450 	cmd->hdr.qid = ring->qid;
5451 	cmd->hdr.idx = ring->cur;
5452 
5453 	tx = (void *)cmd->data;
5454 	memset(tx, 0, sizeof(*tx));
5455 
5456 	rinfo = iwx_tx_fill_cmd(sc, in, wh, tx);
5457 
5458 #if NBPFILTER > 0
5459 	if (sc->sc_drvbpf != NULL) {
5460 		struct iwx_tx_radiotap_header *tap = &sc->sc_txtap;
5461 		uint16_t chan_flags;
5462 
5463 		tap->wt_flags = 0;
5464 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
5465 		chan_flags = ni->ni_chan->ic_flags;
5466 		if (ic->ic_curmode != IEEE80211_MODE_11N &&
5467 		    ic->ic_curmode != IEEE80211_MODE_11AC) {
5468 			chan_flags &= ~IEEE80211_CHAN_HT;
5469 			chan_flags &= ~IEEE80211_CHAN_40MHZ;
5470 		}
5471 		if (ic->ic_curmode != IEEE80211_MODE_11AC)
5472 			chan_flags &= ~IEEE80211_CHAN_VHT;
5473 		tap->wt_chan_flags = htole16(chan_flags);
5474 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
5475 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
5476 		    type == IEEE80211_FC0_TYPE_DATA &&
5477 		    rinfo->ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP) {
5478 			tap->wt_rate = (0x80 | rinfo->ht_plcp);
5479 		} else
5480 			tap->wt_rate = rinfo->rate;
5481 		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
5482 		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
5483 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
5484 
5485 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
5486 		    m, BPF_DIRECTION_OUT);
5487 	}
5488 #endif
5489 
5490 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
5491                 k = ieee80211_get_txkey(ic, wh, ni);
5492 		if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
5493 			if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
5494 				return ENOBUFS;
5495 			/* 802.11 header may have moved. */
5496 			wh = mtod(m, struct ieee80211_frame *);
5497 			tx->flags |= htole32(IWX_TX_FLAGS_ENCRYPT_DIS);
5498 		} else {
5499 			k->k_tsc++;
5500 			/* Hardware increments PN internally and adds IV. */
5501 		}
5502 	} else
5503 		tx->flags |= htole32(IWX_TX_FLAGS_ENCRYPT_DIS);
5504 
5505 	totlen = m->m_pkthdr.len;
5506 
5507 	if (hdrlen & 3) {
5508 		/* First segment length must be a multiple of 4. */
5509 		pad = 4 - (hdrlen & 3);
5510 		tx->offload_assist |= htole16(IWX_TX_CMD_OFFLD_PAD);
5511 	} else
5512 		pad = 0;
5513 
5514 	tx->len = htole16(totlen);
5515 
5516 	/* Copy 802.11 header in TX command. */
5517 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
5518 
5519 	/* Trim 802.11 header. */
5520 	m_adj(m, hdrlen);
5521 
5522 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
5523 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
5524 	if (err && err != EFBIG) {
5525 		printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
5526 		m_freem(m);
5527 		return err;
5528 	}
5529 	if (err) {
5530 		/* Too many DMA segments, linearize mbuf. */
5531 		if (m_defrag(m, M_DONTWAIT)) {
5532 			m_freem(m);
5533 			return ENOBUFS;
5534 		}
5535 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
5536 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
5537 		if (err) {
5538 			printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
5539 			    err);
5540 			m_freem(m);
5541 			return err;
5542 		}
5543 	}
5544 	data->m = m;
5545 	data->in = in;
5546 
5547 	/* Fill TX descriptor. */
5548 	num_tbs = 2 + data->map->dm_nsegs;
5549 	desc->num_tbs = htole16(num_tbs);
5550 
5551 	desc->tbs[0].tb_len = htole16(IWX_FIRST_TB_SIZE);
5552 	paddr = htole64(data->cmd_paddr);
5553 	memcpy(&desc->tbs[0].addr, &paddr, sizeof(paddr));
5554 	if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[0].tb_len)) >> 32)
5555 		DPRINTF(("%s: TB0 crosses 32bit boundary\n", __func__));
5556 	desc->tbs[1].tb_len = htole16(sizeof(struct iwx_cmd_header) +
5557 	    sizeof(*tx) + hdrlen + pad - IWX_FIRST_TB_SIZE);
5558 	paddr = htole64(data->cmd_paddr + IWX_FIRST_TB_SIZE);
5559 	memcpy(&desc->tbs[1].addr, &paddr, sizeof(paddr));
5560 
5561 	if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[1].tb_len)) >> 32)
5562 		DPRINTF(("%s: TB1 crosses 32bit boundary\n", __func__));
5563 
5564 	/* Other DMA segments are for data payload. */
5565 	seg = data->map->dm_segs;
5566 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
5567 		desc->tbs[i + 2].tb_len = htole16(seg->ds_len);
5568 		paddr = htole64(seg->ds_addr);
5569 		memcpy(&desc->tbs[i + 2].addr, &paddr, sizeof(paddr));
5570 		if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[i + 2].tb_len)) >> 32)
5571 			DPRINTF(("%s: TB%d crosses 32bit boundary\n", __func__, i + 2));
5572 	}
5573 
5574 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
5575 	    BUS_DMASYNC_PREWRITE);
5576 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
5577 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
5578 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
5579 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
5580 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
5581 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
5582 
5583 	iwx_tx_update_byte_tbl(ring, ring->cur, totlen, num_tbs);
5584 
5585 	/* Kick TX ring. */
5586 	ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
5587 	IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur);
5588 
5589 	/* Mark TX ring as full if we reach a certain threshold. */
5590 	if (++ring->queued > IWX_TX_RING_HIMARK) {
5591 		sc->qfullmsk |= 1 << ring->qid;
5592 	}
5593 
5594 	if (ic->ic_if.if_flags & IFF_UP)
5595 		sc->sc_tx_timer[ring->qid] = 15;
5596 
5597 	return 0;
5598 }
5599 
5600 int
5601 iwx_flush_sta_tids(struct iwx_softc *sc, int sta_id, uint16_t tids)
5602 {
5603 	struct iwx_rx_packet *pkt;
5604 	struct iwx_tx_path_flush_cmd_rsp *resp;
5605 	struct iwx_tx_path_flush_cmd flush_cmd = {
5606 		.sta_id = htole32(sta_id),
5607 		.tid_mask = htole16(tids),
5608 	};
5609 	struct iwx_host_cmd hcmd = {
5610 		.id = IWX_TXPATH_FLUSH,
5611 		.len = { sizeof(flush_cmd), },
5612 		.data = { &flush_cmd, },
5613 		.flags = IWX_CMD_WANT_RESP,
5614 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
5615 	};
5616 	int err, resp_len, i, num_flushed_queues;
5617 
5618 	err = iwx_send_cmd(sc, &hcmd);
5619 	if (err)
5620 		return err;
5621 
5622 	pkt = hcmd.resp_pkt;
5623 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
5624 		err = EIO;
5625 		goto out;
5626 	}
5627 
5628 	resp_len = iwx_rx_packet_payload_len(pkt);
5629 	/* Some firmware versions don't provide a response. */
5630 	if (resp_len == 0)
5631 		goto out;
5632 	else if (resp_len != sizeof(*resp)) {
5633 		err = EIO;
5634 		goto out;
5635 	}
5636 
5637 	resp = (void *)pkt->data;
5638 
5639 	if (le16toh(resp->sta_id) != sta_id) {
5640 		err = EIO;
5641 		goto out;
5642 	}
5643 
5644 	num_flushed_queues = le16toh(resp->num_flushed_queues);
5645 	if (num_flushed_queues > IWX_TX_FLUSH_QUEUE_RSP) {
5646 		err = EIO;
5647 		goto out;
5648 	}
5649 
5650 	for (i = 0; i < num_flushed_queues; i++) {
5651 		struct iwx_flush_queue_info *queue_info = &resp->queues[i];
5652 		uint16_t tid = le16toh(queue_info->tid);
5653 		uint16_t read_after = le16toh(queue_info->read_after_flush);
5654 		uint16_t qid = le16toh(queue_info->queue_num);
5655 		struct iwx_tx_ring *txq;
5656 
5657 		if (qid >= nitems(sc->txq))
5658 			continue;
5659 
5660 		txq = &sc->txq[qid];
5661 		if (tid != txq->tid)
5662 			continue;
5663 
5664 		iwx_txq_advance(sc, txq, read_after);
5665 	}
5666 out:
5667 	iwx_free_resp(sc, &hcmd);
5668 	return err;
5669 }
5670 
5671 #define IWX_FLUSH_WAIT_MS	2000
5672 
5673 int
5674 iwx_wait_tx_queues_empty(struct iwx_softc *sc)
5675 {
5676 	int i, err;
5677 
5678 	for (i = 0; i < nitems(sc->txq); i++) {
5679 		struct iwx_tx_ring *ring = &sc->txq[i];
5680 
5681 		if (i == IWX_DQA_CMD_QUEUE)
5682 			continue;
5683 
5684 		while (ring->queued > 0) {
5685 			err = tsleep_nsec(ring, 0, "iwxflush",
5686 			    MSEC_TO_NSEC(IWX_FLUSH_WAIT_MS));
5687 			if (err)
5688 				return err;
5689 		}
5690 	}
5691 
5692 	return 0;
5693 }
5694 
5695 int
5696 iwx_drain_sta(struct iwx_softc *sc, struct iwx_node* in, int drain)
5697 {
5698 	struct iwx_add_sta_cmd cmd;
5699 	int err;
5700 	uint32_t status;
5701 
5702 	memset(&cmd, 0, sizeof(cmd));
5703 	cmd.mac_id_n_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
5704 	    in->in_color));
5705 	cmd.sta_id = IWX_STATION_ID;
5706 	cmd.add_modify = IWX_STA_MODE_MODIFY;
5707 	cmd.station_flags = drain ? htole32(IWX_STA_FLG_DRAIN_FLOW) : 0;
5708 	cmd.station_flags_msk = htole32(IWX_STA_FLG_DRAIN_FLOW);
5709 
5710 	status = IWX_ADD_STA_SUCCESS;
5711 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA,
5712 	    sizeof(cmd), &cmd, &status);
5713 	if (err) {
5714 		printf("%s: could not update sta (error %d)\n",
5715 		    DEVNAME(sc), err);
5716 		return err;
5717 	}
5718 
5719 	switch (status & IWX_ADD_STA_STATUS_MASK) {
5720 	case IWX_ADD_STA_SUCCESS:
5721 		break;
5722 	default:
5723 		err = EIO;
5724 		printf("%s: Couldn't %s draining for station\n",
5725 		    DEVNAME(sc), drain ? "enable" : "disable");
5726 		break;
5727 	}
5728 
5729 	return err;
5730 }
5731 
5732 int
5733 iwx_flush_sta(struct iwx_softc *sc, struct iwx_node *in)
5734 {
5735 	int err;
5736 
5737 	splassert(IPL_NET);
5738 
5739 	sc->sc_flags |= IWX_FLAG_TXFLUSH;
5740 
5741 	err = iwx_drain_sta(sc, in, 1);
5742 	if (err)
5743 		goto done;
5744 
5745 	err = iwx_flush_sta_tids(sc, IWX_STATION_ID, 0xffff);
5746 	if (err) {
5747 		printf("%s: could not flush Tx path (error %d)\n",
5748 		    DEVNAME(sc), err);
5749 		goto done;
5750 	}
5751 
5752 	err = iwx_wait_tx_queues_empty(sc);
5753 	if (err) {
5754 		printf("%s: Could not empty Tx queues (error %d)\n",
5755 		    DEVNAME(sc), err);
5756 		goto done;
5757 	}
5758 
5759 	err = iwx_drain_sta(sc, in, 0);
5760 done:
5761 	sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
5762 	return err;
5763 }
5764 
5765 #define IWX_POWER_KEEP_ALIVE_PERIOD_SEC    25
5766 
5767 int
5768 iwx_beacon_filter_send_cmd(struct iwx_softc *sc,
5769     struct iwx_beacon_filter_cmd *cmd)
5770 {
5771 	return iwx_send_cmd_pdu(sc, IWX_REPLY_BEACON_FILTERING_CMD,
5772 	    0, sizeof(struct iwx_beacon_filter_cmd), cmd);
5773 }
5774 
5775 int
5776 iwx_update_beacon_abort(struct iwx_softc *sc, struct iwx_node *in, int enable)
5777 {
5778 	struct iwx_beacon_filter_cmd cmd = {
5779 		IWX_BF_CMD_CONFIG_DEFAULTS,
5780 		.bf_enable_beacon_filter = htole32(1),
5781 		.ba_enable_beacon_abort = htole32(enable),
5782 	};
5783 
5784 	if (!sc->sc_bf.bf_enabled)
5785 		return 0;
5786 
5787 	sc->sc_bf.ba_enabled = enable;
5788 	return iwx_beacon_filter_send_cmd(sc, &cmd);
5789 }
5790 
5791 void
5792 iwx_power_build_cmd(struct iwx_softc *sc, struct iwx_node *in,
5793     struct iwx_mac_power_cmd *cmd)
5794 {
5795 	struct ieee80211com *ic = &sc->sc_ic;
5796 	struct ieee80211_node *ni = &in->in_ni;
5797 	int dtim_period, dtim_msec, keep_alive;
5798 
5799 	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
5800 	    in->in_color));
5801 	if (ni->ni_dtimperiod)
5802 		dtim_period = ni->ni_dtimperiod;
5803 	else
5804 		dtim_period = 1;
5805 
5806 	/*
5807 	 * Regardless of power management state the driver must set
5808 	 * keep alive period. FW will use it for sending keep alive NDPs
5809 	 * immediately after association. Check that keep alive period
5810 	 * is at least 3 * DTIM.
5811 	 */
5812 	dtim_msec = dtim_period * ni->ni_intval;
5813 	keep_alive = MAX(3 * dtim_msec, 1000 * IWX_POWER_KEEP_ALIVE_PERIOD_SEC);
5814 	keep_alive = roundup(keep_alive, 1000) / 1000;
5815 	cmd->keep_alive_seconds = htole16(keep_alive);
5816 
5817 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
5818 		cmd->flags = htole16(IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK);
5819 }
5820 
5821 int
5822 iwx_power_mac_update_mode(struct iwx_softc *sc, struct iwx_node *in)
5823 {
5824 	int err;
5825 	int ba_enable;
5826 	struct iwx_mac_power_cmd cmd;
5827 
5828 	memset(&cmd, 0, sizeof(cmd));
5829 
5830 	iwx_power_build_cmd(sc, in, &cmd);
5831 
5832 	err = iwx_send_cmd_pdu(sc, IWX_MAC_PM_POWER_TABLE, 0,
5833 	    sizeof(cmd), &cmd);
5834 	if (err != 0)
5835 		return err;
5836 
5837 	ba_enable = !!(cmd.flags &
5838 	    htole16(IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
5839 	return iwx_update_beacon_abort(sc, in, ba_enable);
5840 }
5841 
5842 int
5843 iwx_power_update_device(struct iwx_softc *sc)
5844 {
5845 	struct iwx_device_power_cmd cmd = { };
5846 	struct ieee80211com *ic = &sc->sc_ic;
5847 
5848 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
5849 		cmd.flags = htole16(IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
5850 
5851 	return iwx_send_cmd_pdu(sc,
5852 	    IWX_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
5853 }
5854 
5855 int
5856 iwx_enable_beacon_filter(struct iwx_softc *sc, struct iwx_node *in)
5857 {
5858 	struct iwx_beacon_filter_cmd cmd = {
5859 		IWX_BF_CMD_CONFIG_DEFAULTS,
5860 		.bf_enable_beacon_filter = htole32(1),
5861 		.ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled),
5862 	};
5863 	int err;
5864 
5865 	err = iwx_beacon_filter_send_cmd(sc, &cmd);
5866 	if (err == 0)
5867 		sc->sc_bf.bf_enabled = 1;
5868 
5869 	return err;
5870 }
5871 
5872 int
5873 iwx_disable_beacon_filter(struct iwx_softc *sc)
5874 {
5875 	struct iwx_beacon_filter_cmd cmd;
5876 	int err;
5877 
5878 	memset(&cmd, 0, sizeof(cmd));
5879 
5880 	err = iwx_beacon_filter_send_cmd(sc, &cmd);
5881 	if (err == 0)
5882 		sc->sc_bf.bf_enabled = 0;
5883 
5884 	return err;
5885 }
5886 
5887 int
5888 iwx_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update)
5889 {
5890 	struct iwx_add_sta_cmd add_sta_cmd;
5891 	int err;
5892 	uint32_t status, aggsize;
5893 	const uint32_t max_aggsize = (IWX_STA_FLG_MAX_AGG_SIZE_64K >>
5894 		    IWX_STA_FLG_MAX_AGG_SIZE_SHIFT);
5895 	struct ieee80211com *ic = &sc->sc_ic;
5896 
5897 	if (!update && (sc->sc_flags & IWX_FLAG_STA_ACTIVE))
5898 		panic("STA already added");
5899 
5900 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
5901 
5902 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5903 		add_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
5904 		add_sta_cmd.station_type = IWX_STA_GENERAL_PURPOSE;
5905 	} else {
5906 		add_sta_cmd.sta_id = IWX_STATION_ID;
5907 		add_sta_cmd.station_type = IWX_STA_LINK;
5908 	}
5909 	add_sta_cmd.mac_id_n_color
5910 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
5911 	if (!update) {
5912 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
5913 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
5914 			    etheranyaddr);
5915 		else
5916 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
5917 			    in->in_macaddr);
5918 	}
5919 	add_sta_cmd.add_modify = update ? 1 : 0;
5920 	add_sta_cmd.station_flags_msk
5921 	    |= htole32(IWX_STA_FLG_FAT_EN_MSK | IWX_STA_FLG_MIMO_EN_MSK);
5922 
5923 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
5924 		add_sta_cmd.station_flags_msk
5925 		    |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_MSK |
5926 		    IWX_STA_FLG_AGG_MPDU_DENS_MSK);
5927 
5928 		if (iwx_mimo_enabled(sc)) {
5929 			if (in->in_ni.ni_flags & IEEE80211_NODE_VHT) {
5930 				uint16_t rx_mcs = (in->in_ni.ni_vht_rxmcs &
5931 				    IEEE80211_VHT_MCS_FOR_SS_MASK(2)) >>
5932 				    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2);
5933 				if (rx_mcs != IEEE80211_VHT_MCS_SS_NOT_SUPP) {
5934 					add_sta_cmd.station_flags |=
5935 					    htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
5936 				}
5937 			} else {
5938 				if (in->in_ni.ni_rxmcs[1] != 0) {
5939 					add_sta_cmd.station_flags |=
5940 					    htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
5941 				}
5942 				if (in->in_ni.ni_rxmcs[2] != 0) {
5943 					add_sta_cmd.station_flags |=
5944 					    htole32(IWX_STA_FLG_MIMO_EN_MIMO3);
5945 				}
5946 			}
5947 		}
5948 
5949 		if (IEEE80211_CHAN_40MHZ_ALLOWED(in->in_ni.ni_chan) &&
5950 		    ieee80211_node_supports_ht_chan40(&in->in_ni)) {
5951 			add_sta_cmd.station_flags |= htole32(
5952 			    IWX_STA_FLG_FAT_EN_40MHZ);
5953 		}
5954 
5955 		if (in->in_ni.ni_flags & IEEE80211_NODE_VHT) {
5956 			if (IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
5957 			    ieee80211_node_supports_vht_chan80(&in->in_ni)) {
5958 				add_sta_cmd.station_flags |= htole32(
5959 				    IWX_STA_FLG_FAT_EN_80MHZ);
5960 			}
5961 			aggsize = (in->in_ni.ni_vhtcaps &
5962 			    IEEE80211_VHTCAP_MAX_AMPDU_LEN_MASK) >>
5963 			    IEEE80211_VHTCAP_MAX_AMPDU_LEN_SHIFT;
5964 		} else {
5965 			aggsize = (in->in_ni.ni_ampdu_param &
5966 			    IEEE80211_AMPDU_PARAM_LE);
5967 		}
5968 		if (aggsize > max_aggsize)
5969 			aggsize = max_aggsize;
5970 		add_sta_cmd.station_flags |= htole32((aggsize <<
5971 		    IWX_STA_FLG_MAX_AGG_SIZE_SHIFT) &
5972 		    IWX_STA_FLG_MAX_AGG_SIZE_MSK);
5973 
5974 		switch (in->in_ni.ni_ampdu_param & IEEE80211_AMPDU_PARAM_SS) {
5975 		case IEEE80211_AMPDU_PARAM_SS_2:
5976 			add_sta_cmd.station_flags
5977 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_2US);
5978 			break;
5979 		case IEEE80211_AMPDU_PARAM_SS_4:
5980 			add_sta_cmd.station_flags
5981 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_4US);
5982 			break;
5983 		case IEEE80211_AMPDU_PARAM_SS_8:
5984 			add_sta_cmd.station_flags
5985 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_8US);
5986 			break;
5987 		case IEEE80211_AMPDU_PARAM_SS_16:
5988 			add_sta_cmd.station_flags
5989 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_16US);
5990 			break;
5991 		default:
5992 			break;
5993 		}
5994 	}
5995 
5996 	status = IWX_ADD_STA_SUCCESS;
5997 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(add_sta_cmd),
5998 	    &add_sta_cmd, &status);
5999 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
6000 		err = EIO;
6001 
6002 	return err;
6003 }
6004 
6005 int
6006 iwx_rm_sta_cmd(struct iwx_softc *sc, struct iwx_node *in)
6007 {
6008 	struct ieee80211com *ic = &sc->sc_ic;
6009 	struct iwx_rm_sta_cmd rm_sta_cmd;
6010 	int err;
6011 
6012 	if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
6013 		panic("sta already removed");
6014 
6015 	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
6016 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6017 		rm_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
6018 	else
6019 		rm_sta_cmd.sta_id = IWX_STATION_ID;
6020 
6021 	err = iwx_send_cmd_pdu(sc, IWX_REMOVE_STA, 0, sizeof(rm_sta_cmd),
6022 	    &rm_sta_cmd);
6023 
6024 	return err;
6025 }
6026 
6027 int
6028 iwx_rm_sta(struct iwx_softc *sc, struct iwx_node *in)
6029 {
6030 	struct ieee80211com *ic = &sc->sc_ic;
6031 	struct ieee80211_node *ni = &in->in_ni;
6032 	int err, i;
6033 
6034 	err = iwx_flush_sta(sc, in);
6035 	if (err) {
6036 		printf("%s: could not flush Tx path (error %d)\n",
6037 		    DEVNAME(sc), err);
6038 		return err;
6039 	}
6040 	err = iwx_rm_sta_cmd(sc, in);
6041 	if (err) {
6042 		printf("%s: could not remove STA (error %d)\n",
6043 		    DEVNAME(sc), err);
6044 		return err;
6045 	}
6046 
6047 	in->in_flags = 0;
6048 
6049 	sc->sc_rx_ba_sessions = 0;
6050 	sc->ba_rx.start_tidmask = 0;
6051 	sc->ba_rx.stop_tidmask = 0;
6052 	memset(sc->aggqid, 0, sizeof(sc->aggqid));
6053 	sc->ba_tx.start_tidmask = 0;
6054 	sc->ba_tx.stop_tidmask = 0;
6055 	for (i = IWX_FIRST_AGG_TX_QUEUE; i < IWX_LAST_AGG_TX_QUEUE; i++)
6056 		sc->qenablemsk &= ~(1 << i);
6057 	for (i = 0; i < IEEE80211_NUM_TID; i++) {
6058 		struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
6059 		if (ba->ba_state != IEEE80211_BA_AGREED)
6060 			continue;
6061 		ieee80211_delba_request(ic, ni, 0, 1, i);
6062 	}
6063 
6064 	return 0;
6065 }
6066 
6067 uint8_t
6068 iwx_umac_scan_fill_channels(struct iwx_softc *sc,
6069     struct iwx_scan_channel_cfg_umac *chan, size_t chan_nitems,
6070     int n_ssids, int bgscan)
6071 {
6072 	struct ieee80211com *ic = &sc->sc_ic;
6073 	struct ieee80211_channel *c;
6074 	uint8_t nchan;
6075 
6076 	for (nchan = 0, c = &ic->ic_channels[1];
6077 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
6078 	    nchan < chan_nitems &&
6079 	    nchan < sc->sc_capa_n_scan_channels;
6080 	    c++) {
6081 		uint8_t channel_num;
6082 
6083 		if (c->ic_flags == 0)
6084 			continue;
6085 
6086 		channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
6087 		if (isset(sc->sc_ucode_api,
6088 		    IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
6089 			chan->v2.channel_num = channel_num;
6090 			if (IEEE80211_IS_CHAN_2GHZ(c))
6091 				chan->v2.band = IWX_PHY_BAND_24;
6092 			else
6093 				chan->v2.band = IWX_PHY_BAND_5;
6094 			chan->v2.iter_count = 1;
6095 			chan->v2.iter_interval = 0;
6096 		} else {
6097 			chan->v1.channel_num = channel_num;
6098 			chan->v1.iter_count = 1;
6099 			chan->v1.iter_interval = htole16(0);
6100 		}
6101 		if (n_ssids != 0 && !bgscan)
6102 			chan->flags = htole32(1 << 0); /* select SSID 0 */
6103 		chan++;
6104 		nchan++;
6105 	}
6106 
6107 	return nchan;
6108 }
6109 
6110 int
6111 iwx_fill_probe_req(struct iwx_softc *sc, struct iwx_scan_probe_req *preq)
6112 {
6113 	struct ieee80211com *ic = &sc->sc_ic;
6114 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
6115 	struct ieee80211_rateset *rs;
6116 	size_t remain = sizeof(preq->buf);
6117 	uint8_t *frm, *pos;
6118 
6119 	memset(preq, 0, sizeof(*preq));
6120 
6121 	if (remain < sizeof(*wh) + 2)
6122 		return ENOBUFS;
6123 
6124 	/*
6125 	 * Build a probe request frame.  Most of the following code is a
6126 	 * copy & paste of what is done in net80211.
6127 	 */
6128 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
6129 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
6130 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
6131 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
6132 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
6133 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
6134 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
6135 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
6136 
6137 	frm = (uint8_t *)(wh + 1);
6138 	*frm++ = IEEE80211_ELEMID_SSID;
6139 	*frm++ = 0;
6140 	/* hardware inserts SSID */
6141 
6142 	/* Tell the firmware where the MAC header is. */
6143 	preq->mac_header.offset = 0;
6144 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
6145 	remain -= frm - (uint8_t *)wh;
6146 
6147 	/* Fill in 2GHz IEs and tell firmware where they are. */
6148 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
6149 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6150 		if (remain < 4 + rs->rs_nrates)
6151 			return ENOBUFS;
6152 	} else if (remain < 2 + rs->rs_nrates)
6153 		return ENOBUFS;
6154 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
6155 	pos = frm;
6156 	frm = ieee80211_add_rates(frm, rs);
6157 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6158 		frm = ieee80211_add_xrates(frm, rs);
6159 	remain -= frm - pos;
6160 
6161 	if (isset(sc->sc_enabled_capa,
6162 	    IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
6163 		if (remain < 3)
6164 			return ENOBUFS;
6165 		*frm++ = IEEE80211_ELEMID_DSPARMS;
6166 		*frm++ = 1;
6167 		*frm++ = 0;
6168 		remain -= 3;
6169 	}
6170 	preq->band_data[0].len = htole16(frm - pos);
6171 
6172 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
6173 		/* Fill in 5GHz IEs. */
6174 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
6175 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6176 			if (remain < 4 + rs->rs_nrates)
6177 				return ENOBUFS;
6178 		} else if (remain < 2 + rs->rs_nrates)
6179 			return ENOBUFS;
6180 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
6181 		pos = frm;
6182 		frm = ieee80211_add_rates(frm, rs);
6183 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6184 			frm = ieee80211_add_xrates(frm, rs);
6185 		preq->band_data[1].len = htole16(frm - pos);
6186 		remain -= frm - pos;
6187 		if (ic->ic_flags & IEEE80211_F_VHTON) {
6188 			if (remain < 14)
6189 				return ENOBUFS;
6190 			frm = ieee80211_add_vhtcaps(frm, ic);
6191 			remain -= frm - pos;
6192 		}
6193 	}
6194 
6195 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
6196 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
6197 	pos = frm;
6198 	if (ic->ic_flags & IEEE80211_F_HTON) {
6199 		if (remain < 28)
6200 			return ENOBUFS;
6201 		frm = ieee80211_add_htcaps(frm, ic);
6202 		/* XXX add WME info? */
6203 		remain -= frm - pos;
6204 	}
6205 
6206 	preq->common_data.len = htole16(frm - pos);
6207 
6208 	return 0;
6209 }
6210 
6211 int
6212 iwx_config_umac_scan_reduced(struct iwx_softc *sc)
6213 {
6214 	struct iwx_scan_config scan_cfg;
6215 	struct iwx_host_cmd hcmd = {
6216 		.id = iwx_cmd_id(IWX_SCAN_CFG_CMD, IWX_LONG_GROUP, 0),
6217 		.len[0] = sizeof(scan_cfg),
6218 		.data[0] = &scan_cfg,
6219 		.flags = 0,
6220 	};
6221 	int cmdver;
6222 
6223 	if (!isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG)) {
6224 		printf("%s: firmware does not support reduced scan config\n",
6225 		    DEVNAME(sc));
6226 		return ENOTSUP;
6227 	}
6228 
6229 	memset(&scan_cfg, 0, sizeof(scan_cfg));
6230 
6231 	/*
6232 	 * SCAN_CFG version >= 5 implies that the broadcast
6233 	 * STA ID field is deprecated.
6234 	 */
6235 	cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_SCAN_CFG_CMD);
6236 	if (cmdver == IWX_FW_CMD_VER_UNKNOWN || cmdver < 5)
6237 		scan_cfg.bcast_sta_id = 0xff;
6238 
6239 	scan_cfg.tx_chains = htole32(iwx_fw_valid_tx_ant(sc));
6240 	scan_cfg.rx_chains = htole32(iwx_fw_valid_rx_ant(sc));
6241 
6242 	return iwx_send_cmd(sc, &hcmd);
6243 }
6244 
6245 uint16_t
6246 iwx_scan_umac_flags_v2(struct iwx_softc *sc, int bgscan)
6247 {
6248 	struct ieee80211com *ic = &sc->sc_ic;
6249 	uint16_t flags = 0;
6250 
6251 	if (ic->ic_des_esslen == 0)
6252 		flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
6253 
6254 	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
6255 	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
6256 	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
6257 
6258 	return flags;
6259 }
6260 
6261 #define IWX_SCAN_DWELL_ACTIVE		10
6262 #define IWX_SCAN_DWELL_PASSIVE		110
6263 
6264 /* adaptive dwell max budget time [TU] for full scan */
6265 #define IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
6266 /* adaptive dwell max budget time [TU] for directed scan */
6267 #define IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
6268 /* adaptive dwell default high band APs number */
6269 #define IWX_SCAN_ADWELL_DEFAULT_HB_N_APS 8
6270 /* adaptive dwell default low band APs number */
6271 #define IWX_SCAN_ADWELL_DEFAULT_LB_N_APS 2
6272 /* adaptive dwell default APs number in social channels (1, 6, 11) */
6273 #define IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
6274 /* adaptive dwell number of APs override for p2p friendly GO channels */
6275 #define IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY 10
6276 /* adaptive dwell number of APs override for social channels */
6277 #define IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
6278 
6279 void
6280 iwx_scan_umac_dwell_v10(struct iwx_softc *sc,
6281     struct iwx_scan_general_params_v10 *general_params, int bgscan)
6282 {
6283 	uint32_t suspend_time, max_out_time;
6284 	uint8_t active_dwell, passive_dwell;
6285 
6286 	active_dwell = IWX_SCAN_DWELL_ACTIVE;
6287 	passive_dwell = IWX_SCAN_DWELL_PASSIVE;
6288 
6289 	general_params->adwell_default_social_chn =
6290 		IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
6291 	general_params->adwell_default_2g = IWX_SCAN_ADWELL_DEFAULT_LB_N_APS;
6292 	general_params->adwell_default_5g = IWX_SCAN_ADWELL_DEFAULT_HB_N_APS;
6293 
6294 	if (bgscan)
6295 		general_params->adwell_max_budget =
6296 			htole16(IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
6297 	else
6298 		general_params->adwell_max_budget =
6299 			htole16(IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
6300 
6301 	general_params->scan_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
6302 	if (bgscan) {
6303 		max_out_time = htole32(120);
6304 		suspend_time = htole32(120);
6305 	} else {
6306 		max_out_time = htole32(0);
6307 		suspend_time = htole32(0);
6308 	}
6309 	general_params->max_out_of_time[IWX_SCAN_LB_LMAC_IDX] =
6310 		htole32(max_out_time);
6311 	general_params->suspend_time[IWX_SCAN_LB_LMAC_IDX] =
6312 		htole32(suspend_time);
6313 	general_params->max_out_of_time[IWX_SCAN_HB_LMAC_IDX] =
6314 		htole32(max_out_time);
6315 	general_params->suspend_time[IWX_SCAN_HB_LMAC_IDX] =
6316 		htole32(suspend_time);
6317 
6318 	general_params->active_dwell[IWX_SCAN_LB_LMAC_IDX] = active_dwell;
6319 	general_params->passive_dwell[IWX_SCAN_LB_LMAC_IDX] = passive_dwell;
6320 	general_params->active_dwell[IWX_SCAN_HB_LMAC_IDX] = active_dwell;
6321 	general_params->passive_dwell[IWX_SCAN_HB_LMAC_IDX] = passive_dwell;
6322 }
6323 
6324 void
6325 iwx_scan_umac_fill_general_p_v10(struct iwx_softc *sc,
6326     struct iwx_scan_general_params_v10 *gp, uint16_t gen_flags, int bgscan)
6327 {
6328 	iwx_scan_umac_dwell_v10(sc, gp, bgscan);
6329 
6330 	gp->flags = htole16(gen_flags);
6331 
6332 	if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
6333 		gp->num_of_fragments[IWX_SCAN_LB_LMAC_IDX] = 3;
6334 	if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
6335 		gp->num_of_fragments[IWX_SCAN_HB_LMAC_IDX] = 3;
6336 
6337 	gp->scan_start_mac_id = 0;
6338 }
6339 
6340 void
6341 iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *sc,
6342     struct iwx_scan_channel_params_v6 *cp, uint32_t channel_cfg_flags,
6343     int n_ssid, int bgscan)
6344 {
6345 	cp->flags = IWX_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
6346 
6347 	cp->count = iwx_umac_scan_fill_channels(sc, cp->channel_config,
6348 	    nitems(cp->channel_config), n_ssid, bgscan);
6349 
6350 	cp->n_aps_override[0] = IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY;
6351 	cp->n_aps_override[1] = IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS;
6352 }
6353 
6354 int
6355 iwx_umac_scan_v14(struct iwx_softc *sc, int bgscan)
6356 {
6357 	struct ieee80211com *ic = &sc->sc_ic;
6358 	struct iwx_host_cmd hcmd = {
6359 		.id = iwx_cmd_id(IWX_SCAN_REQ_UMAC, IWX_LONG_GROUP, 0),
6360 		.len = { 0, },
6361 		.data = { NULL, },
6362 		.flags = 0,
6363 	};
6364 	struct iwx_scan_req_umac_v14 *cmd;
6365 	struct iwx_scan_req_params_v14 *scan_p;
6366 	int err, async = bgscan, n_ssid = 0;
6367 	uint16_t gen_flags;
6368 	uint32_t bitmap_ssid = 0;
6369 
6370 	cmd = malloc(sizeof(*cmd), M_DEVBUF,
6371 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
6372 	if (cmd == NULL)
6373 		return ENOMEM;
6374 
6375 	scan_p = &cmd->scan_params;
6376 
6377 	cmd->ooc_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
6378 	cmd->uid = htole32(0);
6379 
6380 	gen_flags = iwx_scan_umac_flags_v2(sc, bgscan);
6381 	iwx_scan_umac_fill_general_p_v10(sc, &scan_p->general_params,
6382 	    gen_flags, bgscan);
6383 
6384 	scan_p->periodic_params.schedule[0].interval = htole16(0);
6385 	scan_p->periodic_params.schedule[0].iter_count = 1;
6386 
6387 	err = iwx_fill_probe_req(sc, &scan_p->probe_params.preq);
6388 	if (err) {
6389 		free(cmd, M_DEVBUF, sizeof(*cmd));
6390 		return err;
6391 	}
6392 
6393 	if (ic->ic_des_esslen != 0) {
6394 		scan_p->probe_params.direct_scan[0].id = IEEE80211_ELEMID_SSID;
6395 		scan_p->probe_params.direct_scan[0].len = ic->ic_des_esslen;
6396 		memcpy(scan_p->probe_params.direct_scan[0].ssid,
6397 		    ic->ic_des_essid, ic->ic_des_esslen);
6398 		bitmap_ssid |= (1 << 0);
6399 		n_ssid = 1;
6400 	}
6401 
6402 	iwx_scan_umac_fill_ch_p_v6(sc, &scan_p->channel_params, bitmap_ssid,
6403 	    n_ssid, bgscan);
6404 
6405 	hcmd.len[0] = sizeof(*cmd);
6406 	hcmd.data[0] = (void *)cmd;
6407 	hcmd.flags |= async ? IWX_CMD_ASYNC : 0;
6408 
6409 	err = iwx_send_cmd(sc, &hcmd);
6410 	free(cmd, M_DEVBUF, sizeof(*cmd));
6411 	return err;
6412 }
6413 
6414 void
6415 iwx_mcc_update(struct iwx_softc *sc, struct iwx_mcc_chub_notif *notif)
6416 {
6417 	struct ieee80211com *ic = &sc->sc_ic;
6418 	struct ifnet *ifp = IC2IFP(ic);
6419 	char alpha2[3];
6420 
6421 	snprintf(alpha2, sizeof(alpha2), "%c%c",
6422 	    (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
6423 
6424 	if (ifp->if_flags & IFF_DEBUG) {
6425 		printf("%s: firmware has detected regulatory domain '%s' "
6426 		    "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
6427 	}
6428 
6429 	/* TODO: Schedule a task to send MCC_UPDATE_CMD? */
6430 }
6431 
6432 uint8_t
6433 iwx_ridx2rate(struct ieee80211_rateset *rs, int ridx)
6434 {
6435 	int i;
6436 	uint8_t rval;
6437 
6438 	for (i = 0; i < rs->rs_nrates; i++) {
6439 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
6440 		if (rval == iwx_rates[ridx].rate)
6441 			return rs->rs_rates[i];
6442 	}
6443 
6444 	return 0;
6445 }
6446 
6447 int
6448 iwx_rval2ridx(int rval)
6449 {
6450 	int ridx;
6451 
6452 	for (ridx = 0; ridx < nitems(iwx_rates); ridx++) {
6453 		if (iwx_rates[ridx].plcp == IWX_RATE_INVM_PLCP)
6454 			continue;
6455 		if (rval == iwx_rates[ridx].rate)
6456 			break;
6457 	}
6458 
6459        return ridx;
6460 }
6461 
6462 void
6463 iwx_ack_rates(struct iwx_softc *sc, struct iwx_node *in, int *cck_rates,
6464     int *ofdm_rates)
6465 {
6466 	struct ieee80211_node *ni = &in->in_ni;
6467 	struct ieee80211_rateset *rs = &ni->ni_rates;
6468 	int lowest_present_ofdm = -1;
6469 	int lowest_present_cck = -1;
6470 	uint8_t cck = 0;
6471 	uint8_t ofdm = 0;
6472 	int i;
6473 
6474 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
6475 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
6476 		for (i = IWX_FIRST_CCK_RATE; i < IWX_FIRST_OFDM_RATE; i++) {
6477 			if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6478 				continue;
6479 			cck |= (1 << i);
6480 			if (lowest_present_cck == -1 || lowest_present_cck > i)
6481 				lowest_present_cck = i;
6482 		}
6483 	}
6484 	for (i = IWX_FIRST_OFDM_RATE; i <= IWX_LAST_NON_HT_RATE; i++) {
6485 		if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6486 			continue;
6487 		ofdm |= (1 << (i - IWX_FIRST_OFDM_RATE));
6488 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
6489 			lowest_present_ofdm = i;
6490 	}
6491 
6492 	/*
6493 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
6494 	 * variables. This isn't sufficient though, as there might not
6495 	 * be all the right rates in the bitmap. E.g. if the only basic
6496 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
6497 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
6498 	 *
6499 	 *    [...] a STA responding to a received frame shall transmit
6500 	 *    its Control Response frame [...] at the highest rate in the
6501 	 *    BSSBasicRateSet parameter that is less than or equal to the
6502 	 *    rate of the immediately previous frame in the frame exchange
6503 	 *    sequence ([...]) and that is of the same modulation class
6504 	 *    ([...]) as the received frame. If no rate contained in the
6505 	 *    BSSBasicRateSet parameter meets these conditions, then the
6506 	 *    control frame sent in response to a received frame shall be
6507 	 *    transmitted at the highest mandatory rate of the PHY that is
6508 	 *    less than or equal to the rate of the received frame, and
6509 	 *    that is of the same modulation class as the received frame.
6510 	 *
6511 	 * As a consequence, we need to add all mandatory rates that are
6512 	 * lower than all of the basic rates to these bitmaps.
6513 	 */
6514 
6515 	if (IWX_RATE_24M_INDEX < lowest_present_ofdm)
6516 		ofdm |= IWX_RATE_BIT_MSK(24) >> IWX_FIRST_OFDM_RATE;
6517 	if (IWX_RATE_12M_INDEX < lowest_present_ofdm)
6518 		ofdm |= IWX_RATE_BIT_MSK(12) >> IWX_FIRST_OFDM_RATE;
6519 	/* 6M already there or needed so always add */
6520 	ofdm |= IWX_RATE_BIT_MSK(6) >> IWX_FIRST_OFDM_RATE;
6521 
6522 	/*
6523 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
6524 	 * Note, however:
6525 	 *  - if no CCK rates are basic, it must be ERP since there must
6526 	 *    be some basic rates at all, so they're OFDM => ERP PHY
6527 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
6528 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
6529 	 *  - if 5.5M is basic, 1M and 2M are mandatory
6530 	 *  - if 2M is basic, 1M is mandatory
6531 	 *  - if 1M is basic, that's the only valid ACK rate.
6532 	 * As a consequence, it's not as complicated as it sounds, just add
6533 	 * any lower rates to the ACK rate bitmap.
6534 	 */
6535 	if (IWX_RATE_11M_INDEX < lowest_present_cck)
6536 		cck |= IWX_RATE_BIT_MSK(11) >> IWX_FIRST_CCK_RATE;
6537 	if (IWX_RATE_5M_INDEX < lowest_present_cck)
6538 		cck |= IWX_RATE_BIT_MSK(5) >> IWX_FIRST_CCK_RATE;
6539 	if (IWX_RATE_2M_INDEX < lowest_present_cck)
6540 		cck |= IWX_RATE_BIT_MSK(2) >> IWX_FIRST_CCK_RATE;
6541 	/* 1M already there or needed so always add */
6542 	cck |= IWX_RATE_BIT_MSK(1) >> IWX_FIRST_CCK_RATE;
6543 
6544 	*cck_rates = cck;
6545 	*ofdm_rates = ofdm;
6546 }
6547 
6548 void
6549 iwx_mac_ctxt_cmd_common(struct iwx_softc *sc, struct iwx_node *in,
6550     struct iwx_mac_ctx_cmd *cmd, uint32_t action)
6551 {
6552 #define IWX_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6553 	struct ieee80211com *ic = &sc->sc_ic;
6554 	struct ieee80211_node *ni = ic->ic_bss;
6555 	int cck_ack_rates, ofdm_ack_rates;
6556 	int i;
6557 
6558 	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
6559 	    in->in_color));
6560 	cmd->action = htole32(action);
6561 
6562 	if (action == IWX_FW_CTXT_ACTION_REMOVE)
6563 		return;
6564 
6565 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6566 		cmd->mac_type = htole32(IWX_FW_MAC_TYPE_LISTENER);
6567 	else if (ic->ic_opmode == IEEE80211_M_STA)
6568 		cmd->mac_type = htole32(IWX_FW_MAC_TYPE_BSS_STA);
6569 	else
6570 		panic("unsupported operating mode %d", ic->ic_opmode);
6571 	cmd->tsf_id = htole32(IWX_TSF_ID_A);
6572 
6573 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
6574 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6575 		IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
6576 		return;
6577 	}
6578 
6579 	IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr);
6580 	iwx_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
6581 	cmd->cck_rates = htole32(cck_ack_rates);
6582 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
6583 
6584 	cmd->cck_short_preamble
6585 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
6586 	      ? IWX_MAC_FLG_SHORT_PREAMBLE : 0);
6587 	cmd->short_slot
6588 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
6589 	      ? IWX_MAC_FLG_SHORT_SLOT : 0);
6590 
6591 	for (i = 0; i < EDCA_NUM_AC; i++) {
6592 		struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
6593 		int txf = iwx_ac_to_tx_fifo[i];
6594 
6595 		cmd->ac[txf].cw_min = htole16(IWX_EXP2(ac->ac_ecwmin));
6596 		cmd->ac[txf].cw_max = htole16(IWX_EXP2(ac->ac_ecwmax));
6597 		cmd->ac[txf].aifsn = ac->ac_aifsn;
6598 		cmd->ac[txf].fifos_mask = (1 << txf);
6599 		cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32);
6600 	}
6601 	if (ni->ni_flags & IEEE80211_NODE_QOS)
6602 		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_UPDATE_EDCA);
6603 
6604 	if (ni->ni_flags & IEEE80211_NODE_HT) {
6605 		enum ieee80211_htprot htprot =
6606 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
6607 		switch (htprot) {
6608 		case IEEE80211_HTPROT_NONE:
6609 			break;
6610 		case IEEE80211_HTPROT_NONMEMBER:
6611 		case IEEE80211_HTPROT_NONHT_MIXED:
6612 			cmd->protection_flags |=
6613 			    htole32(IWX_MAC_PROT_FLG_HT_PROT |
6614 			    IWX_MAC_PROT_FLG_FAT_PROT);
6615 			break;
6616 		case IEEE80211_HTPROT_20MHZ:
6617 			if (in->in_phyctxt &&
6618 			    (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
6619 			    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)) {
6620 				cmd->protection_flags |=
6621 				    htole32(IWX_MAC_PROT_FLG_HT_PROT |
6622 				    IWX_MAC_PROT_FLG_FAT_PROT);
6623 			}
6624 			break;
6625 		default:
6626 			break;
6627 		}
6628 
6629 		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_TGN);
6630 	}
6631 	if (ic->ic_flags & IEEE80211_F_USEPROT)
6632 		cmd->protection_flags |= htole32(IWX_MAC_PROT_FLG_TGG_PROTECT);
6633 
6634 	cmd->filter_flags = htole32(IWX_MAC_FILTER_ACCEPT_GRP);
6635 #undef IWX_EXP2
6636 }
6637 
6638 void
6639 iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *sc, struct iwx_node *in,
6640     struct iwx_mac_data_sta *sta, int assoc)
6641 {
6642 	struct ieee80211_node *ni = &in->in_ni;
6643 	uint32_t dtim_off;
6644 	uint64_t tsf;
6645 
6646 	dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU;
6647 	memcpy(&tsf, ni->ni_tstamp, sizeof(tsf));
6648 	tsf = letoh64(tsf);
6649 
6650 	sta->is_assoc = htole32(assoc);
6651 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
6652 	sta->dtim_tsf = htole64(tsf + dtim_off);
6653 	sta->bi = htole32(ni->ni_intval);
6654 	sta->bi_reciprocal = htole32(iwx_reciprocal(ni->ni_intval));
6655 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod);
6656 	sta->dtim_reciprocal = htole32(iwx_reciprocal(sta->dtim_interval));
6657 	sta->listen_interval = htole32(10);
6658 	sta->assoc_id = htole32(ni->ni_associd);
6659 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
6660 }
6661 
6662 int
6663 iwx_mac_ctxt_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action,
6664     int assoc)
6665 {
6666 	struct ieee80211com *ic = &sc->sc_ic;
6667 	struct ieee80211_node *ni = &in->in_ni;
6668 	struct iwx_mac_ctx_cmd cmd;
6669 	int active = (sc->sc_flags & IWX_FLAG_MAC_ACTIVE);
6670 
6671 	if (action == IWX_FW_CTXT_ACTION_ADD && active)
6672 		panic("MAC already added");
6673 	if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
6674 		panic("MAC already removed");
6675 
6676 	memset(&cmd, 0, sizeof(cmd));
6677 
6678 	iwx_mac_ctxt_cmd_common(sc, in, &cmd, action);
6679 
6680 	if (action == IWX_FW_CTXT_ACTION_REMOVE) {
6681 		return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0,
6682 		    sizeof(cmd), &cmd);
6683 	}
6684 
6685 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6686 		cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_PROMISC |
6687 		    IWX_MAC_FILTER_IN_CONTROL_AND_MGMT |
6688 		    IWX_MAC_FILTER_ACCEPT_GRP |
6689 		    IWX_MAC_FILTER_IN_BEACON |
6690 		    IWX_MAC_FILTER_IN_PROBE_REQUEST |
6691 		    IWX_MAC_FILTER_IN_CRC32);
6692 	} else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod)
6693 		/*
6694 		 * Allow beacons to pass through as long as we are not
6695 		 * associated or we do not have dtim period information.
6696 		 */
6697 		cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_BEACON);
6698 	else
6699 		iwx_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
6700 
6701 	return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
6702 }
6703 
6704 int
6705 iwx_clear_statistics(struct iwx_softc *sc)
6706 {
6707 	struct iwx_statistics_cmd scmd = {
6708 		.flags = htole32(IWX_STATISTICS_FLG_CLEAR)
6709 	};
6710 	struct iwx_host_cmd cmd = {
6711 		.id = IWX_STATISTICS_CMD,
6712 		.len[0] = sizeof(scmd),
6713 		.data[0] = &scmd,
6714 		.flags = IWX_CMD_WANT_RESP,
6715 		.resp_pkt_len = sizeof(struct iwx_notif_statistics),
6716 	};
6717 	int err;
6718 
6719 	err = iwx_send_cmd(sc, &cmd);
6720 	if (err)
6721 		return err;
6722 
6723 	iwx_free_resp(sc, &cmd);
6724 	return 0;
6725 }
6726 
6727 void
6728 iwx_add_task(struct iwx_softc *sc, struct taskq *taskq, struct task *task)
6729 {
6730 	int s = splnet();
6731 
6732 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
6733 		splx(s);
6734 		return;
6735 	}
6736 
6737 	refcnt_take(&sc->task_refs);
6738 	if (!task_add(taskq, task))
6739 		refcnt_rele_wake(&sc->task_refs);
6740 	splx(s);
6741 }
6742 
6743 void
6744 iwx_del_task(struct iwx_softc *sc, struct taskq *taskq, struct task *task)
6745 {
6746 	if (task_del(taskq, task))
6747 		refcnt_rele(&sc->task_refs);
6748 }
6749 
6750 int
6751 iwx_scan(struct iwx_softc *sc)
6752 {
6753 	struct ieee80211com *ic = &sc->sc_ic;
6754 	struct ifnet *ifp = IC2IFP(ic);
6755 	int err;
6756 
6757 	if (sc->sc_flags & IWX_FLAG_BGSCAN) {
6758 		err = iwx_scan_abort(sc);
6759 		if (err) {
6760 			printf("%s: could not abort background scan\n",
6761 			    DEVNAME(sc));
6762 			return err;
6763 		}
6764 	}
6765 
6766 	err = iwx_umac_scan_v14(sc, 0);
6767 	if (err) {
6768 		printf("%s: could not initiate scan\n", DEVNAME(sc));
6769 		return err;
6770 	}
6771 
6772 	/*
6773 	 * The current mode might have been fixed during association.
6774 	 * Ensure all channels get scanned.
6775 	 */
6776 	if (IFM_MODE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
6777 		ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
6778 
6779 	sc->sc_flags |= IWX_FLAG_SCANNING;
6780 	if (ifp->if_flags & IFF_DEBUG)
6781 		printf("%s: %s -> %s\n", ifp->if_xname,
6782 		    ieee80211_state_name[ic->ic_state],
6783 		    ieee80211_state_name[IEEE80211_S_SCAN]);
6784 	if ((sc->sc_flags & IWX_FLAG_BGSCAN) == 0) {
6785 		ieee80211_set_link_state(ic, LINK_STATE_DOWN);
6786 		ieee80211_node_cleanup(ic, ic->ic_bss);
6787 	}
6788 	ic->ic_state = IEEE80211_S_SCAN;
6789 	wakeup(&ic->ic_state); /* wake iwx_init() */
6790 
6791 	return 0;
6792 }
6793 
6794 int
6795 iwx_bgscan(struct ieee80211com *ic)
6796 {
6797 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
6798 	int err;
6799 
6800 	if (sc->sc_flags & IWX_FLAG_SCANNING)
6801 		return 0;
6802 
6803 	err = iwx_umac_scan_v14(sc, 1);
6804 	if (err) {
6805 		printf("%s: could not initiate scan\n", DEVNAME(sc));
6806 		return err;
6807 	}
6808 
6809 	sc->sc_flags |= IWX_FLAG_BGSCAN;
6810 	return 0;
6811 }
6812 
6813 void
6814 iwx_bgscan_done(struct ieee80211com *ic,
6815     struct ieee80211_node_switch_bss_arg *arg, size_t arg_size)
6816 {
6817 	struct iwx_softc *sc = ic->ic_softc;
6818 
6819 	free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
6820 	sc->bgscan_unref_arg = arg;
6821 	sc->bgscan_unref_arg_size = arg_size;
6822 	iwx_add_task(sc, sc->sc_nswq, &sc->bgscan_done_task);
6823 }
6824 
6825 void
6826 iwx_bgscan_done_task(void *arg)
6827 {
6828 	struct iwx_softc *sc = arg;
6829 	struct ieee80211com *ic = &sc->sc_ic;
6830 	struct iwx_node *in = (void *)ic->ic_bss;
6831 	struct ieee80211_node *ni = &in->in_ni;
6832 	int tid, err = 0, s = splnet();
6833 
6834 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) ||
6835 	    (ic->ic_flags & IEEE80211_F_BGSCAN) == 0 ||
6836 	    ic->ic_state != IEEE80211_S_RUN) {
6837 		err = ENXIO;
6838 		goto done;
6839 	}
6840 
6841 	err = iwx_flush_sta(sc, in);
6842 	if (err)
6843 		goto done;
6844 
6845 	for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
6846 		int qid = IWX_FIRST_AGG_TX_QUEUE + tid;
6847 
6848 		if (sc->aggqid[tid] == 0)
6849 			continue;
6850 
6851 		err = iwx_disable_txq(sc, IWX_STATION_ID, qid, tid);
6852 		if (err)
6853 			goto done;
6854 #if 0 /* disabled for now; we are going to DEAUTH soon anyway */
6855 		IEEE80211_SEND_ACTION(ic, ni, IEEE80211_CATEG_BA,
6856 		    IEEE80211_ACTION_DELBA,
6857 		    IEEE80211_REASON_AUTH_LEAVE << 16 |
6858 		    IEEE80211_FC1_DIR_TODS << 8 | tid);
6859 #endif
6860 		ieee80211_node_tx_ba_clear(ni, tid);
6861 		sc->aggqid[tid] = 0;
6862 	}
6863 
6864 	/*
6865 	 * Tx queues have been flushed and Tx agg has been stopped.
6866 	 * Allow roaming to proceed.
6867 	 */
6868 	ni->ni_unref_arg = sc->bgscan_unref_arg;
6869 	ni->ni_unref_arg_size = sc->bgscan_unref_arg_size;
6870 	sc->bgscan_unref_arg = NULL;
6871 	sc->bgscan_unref_arg_size = 0;
6872 	ieee80211_node_tx_stopped(ic, &in->in_ni);
6873 done:
6874 	if (err) {
6875 		free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
6876 		sc->bgscan_unref_arg = NULL;
6877 		sc->bgscan_unref_arg_size = 0;
6878 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
6879 			task_add(systq, &sc->init_task);
6880 	}
6881 	refcnt_rele_wake(&sc->task_refs);
6882 	splx(s);
6883 }
6884 
6885 int
6886 iwx_umac_scan_abort(struct iwx_softc *sc)
6887 {
6888 	struct iwx_umac_scan_abort cmd = { 0 };
6889 
6890 	return iwx_send_cmd_pdu(sc,
6891 	    IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC),
6892 	    0, sizeof(cmd), &cmd);
6893 }
6894 
6895 int
6896 iwx_scan_abort(struct iwx_softc *sc)
6897 {
6898 	int err;
6899 
6900 	err = iwx_umac_scan_abort(sc);
6901 	if (err == 0)
6902 		sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
6903 	return err;
6904 }
6905 
6906 int
6907 iwx_enable_mgmt_queue(struct iwx_softc *sc)
6908 {
6909 	int err;
6910 
6911 	sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
6912 
6913 	/*
6914 	 * Non-QoS frames use the "MGMT" TID and queue.
6915 	 * Other TIDs and data queues are reserved for QoS data frames.
6916 	 */
6917 	err = iwx_enable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
6918 	    IWX_MGMT_TID, IWX_TX_RING_COUNT);
6919 	if (err) {
6920 		printf("%s: could not enable Tx queue %d (error %d)\n",
6921 		    DEVNAME(sc), sc->first_data_qid, err);
6922 		return err;
6923 	}
6924 
6925 	return 0;
6926 }
6927 
6928 int
6929 iwx_rs_rval2idx(uint8_t rval)
6930 {
6931 	/* Firmware expects indices which match our 11g rate set. */
6932 	const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11g;
6933 	int i;
6934 
6935 	for (i = 0; i < rs->rs_nrates; i++) {
6936 		if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
6937 			return i;
6938 	}
6939 
6940 	return -1;
6941 }
6942 
6943 uint16_t
6944 iwx_rs_ht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int rsidx)
6945 {
6946 	struct ieee80211com *ic = &sc->sc_ic;
6947 	const struct ieee80211_ht_rateset *rs;
6948 	uint16_t htrates = 0;
6949 	int mcs;
6950 
6951 	rs = &ieee80211_std_ratesets_11n[rsidx];
6952 	for (mcs = rs->min_mcs; mcs <= rs->max_mcs; mcs++) {
6953 		if (!isset(ni->ni_rxmcs, mcs) ||
6954 		    !isset(ic->ic_sup_mcs, mcs))
6955 			continue;
6956 		htrates |= (1 << (mcs - rs->min_mcs));
6957 	}
6958 
6959 	return htrates;
6960 }
6961 
6962 uint16_t
6963 iwx_rs_vht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int num_ss)
6964 {
6965 	uint16_t rx_mcs;
6966 	int max_mcs = -1;
6967 
6968 	rx_mcs = (ni->ni_vht_rxmcs & IEEE80211_VHT_MCS_FOR_SS_MASK(num_ss)) >>
6969 	    IEEE80211_VHT_MCS_FOR_SS_SHIFT(num_ss);
6970 	switch (rx_mcs) {
6971 	case IEEE80211_VHT_MCS_SS_NOT_SUPP:
6972 		break;
6973 	case IEEE80211_VHT_MCS_0_7:
6974 		max_mcs = 7;
6975 		break;
6976 	case IEEE80211_VHT_MCS_0_8:
6977 		max_mcs = 8;
6978 		break;
6979 	case IEEE80211_VHT_MCS_0_9:
6980 		/* Disable VHT MCS 9 for 20MHz-only stations. */
6981 		if (!ieee80211_node_supports_ht_chan40(ni))
6982 			max_mcs = 8;
6983 		else
6984 			max_mcs = 9;
6985 		break;
6986 	default:
6987 		/* Should not happen; Values above cover the possible range. */
6988 		panic("invalid VHT Rx MCS value %u", rx_mcs);
6989 	}
6990 
6991 	return ((1 << (max_mcs + 1)) - 1);
6992 }
6993 
6994 int
6995 iwx_rs_init(struct iwx_softc *sc, struct iwx_node *in)
6996 {
6997 	struct ieee80211_node *ni = &in->in_ni;
6998 	struct ieee80211_rateset *rs = &ni->ni_rates;
6999 	struct iwx_tlc_config_cmd cfg_cmd;
7000 	uint32_t cmd_id;
7001 	int i;
7002 	size_t cmd_size = sizeof(cfg_cmd);
7003 
7004 	memset(&cfg_cmd, 0, sizeof(cfg_cmd));
7005 
7006 	for (i = 0; i < rs->rs_nrates; i++) {
7007 		uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7008 		int idx = iwx_rs_rval2idx(rval);
7009 		if (idx == -1)
7010 			return EINVAL;
7011 		cfg_cmd.non_ht_rates |= (1 << idx);
7012 	}
7013 
7014 	if (ni->ni_flags & IEEE80211_NODE_VHT) {
7015 		cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
7016 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_HT_BW_NONE_160] =
7017 		    htole16(iwx_rs_vht_rates(sc, ni, 1));
7018 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_HT_BW_NONE_160] =
7019 		    htole16(iwx_rs_vht_rates(sc, ni, 2));
7020 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
7021 		cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
7022 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_HT_BW_NONE_160] =
7023 		    htole16(iwx_rs_ht_rates(sc, ni,
7024 		    IEEE80211_HT_RATESET_SISO));
7025 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_HT_BW_NONE_160] =
7026 		    htole16(iwx_rs_ht_rates(sc, ni,
7027 		    IEEE80211_HT_RATESET_MIMO2));
7028 	} else
7029 		cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
7030 
7031 	cfg_cmd.sta_id = IWX_STATION_ID;
7032 	if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
7033 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7034 	else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7035 	    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
7036 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7037 	else
7038 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7039 	cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
7040 	if (ni->ni_flags & IEEE80211_NODE_VHT)
7041 		cfg_cmd.max_mpdu_len = htole16(3895);
7042 	else
7043 		cfg_cmd.max_mpdu_len = htole16(3839);
7044 	if (ni->ni_flags & IEEE80211_NODE_HT) {
7045 		if (ieee80211_node_supports_ht_sgi20(ni)) {
7046 			cfg_cmd.sgi_ch_width_supp |= (1 <<
7047 			    IWX_TLC_MNG_CH_WIDTH_20MHZ);
7048 		}
7049 		if (ieee80211_node_supports_ht_sgi40(ni)) {
7050 			cfg_cmd.sgi_ch_width_supp |= (1 <<
7051 			    IWX_TLC_MNG_CH_WIDTH_40MHZ);
7052 		}
7053 	}
7054 	if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
7055 	    ieee80211_node_supports_vht_sgi80(ni))
7056 		cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
7057 
7058 	cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
7059 	return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
7060 }
7061 
7062 void
7063 iwx_rs_update(struct iwx_softc *sc, struct iwx_tlc_update_notif *notif)
7064 {
7065 	struct ieee80211com *ic = &sc->sc_ic;
7066 	struct ieee80211_node *ni = ic->ic_bss;
7067 	struct ieee80211_rateset *rs = &ni->ni_rates;
7068 	uint32_t rate_n_flags;
7069 	int i;
7070 
7071 	if (notif->sta_id != IWX_STATION_ID ||
7072 	    (le32toh(notif->flags) & IWX_TLC_NOTIF_FLAG_RATE) == 0)
7073 		return;
7074 
7075 	rate_n_flags = le32toh(notif->rate);
7076 	if (rate_n_flags & IWX_RATE_MCS_VHT_MSK) {
7077 		ni->ni_txmcs = (rate_n_flags & IWX_RATE_VHT_MCS_RATE_CODE_MSK);
7078 		ni->ni_vht_ss = ((rate_n_flags & IWX_RATE_VHT_MCS_NSS_MSK) >>
7079 		    IWX_RATE_VHT_MCS_NSS_POS) + 1;
7080 	} else if (rate_n_flags & IWX_RATE_MCS_HT_MSK) {
7081 		ni->ni_txmcs = (rate_n_flags &
7082 		    (IWX_RATE_HT_MCS_RATE_CODE_MSK |
7083 		    IWX_RATE_HT_MCS_NSS_MSK));
7084 	} else {
7085 		uint8_t plcp = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
7086 		uint8_t rval = 0;
7087 		for (i = IWX_RATE_1M_INDEX; i < nitems(iwx_rates); i++) {
7088 			if (iwx_rates[i].plcp == plcp) {
7089 				rval = iwx_rates[i].rate;
7090 				break;
7091 			}
7092 		}
7093 		if (rval) {
7094 			uint8_t rv;
7095 			for (i = 0; i < rs->rs_nrates; i++) {
7096 				rv = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7097 				if (rv == rval) {
7098 					ni->ni_txrate = i;
7099 					break;
7100 				}
7101 			}
7102 		}
7103 	}
7104 }
7105 
7106 int
7107 iwx_phy_ctxt_update(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
7108     struct ieee80211_channel *chan, uint8_t chains_static,
7109     uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco,
7110     uint8_t vht_chan_width)
7111 {
7112 	uint16_t band_flags = (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
7113 	int err;
7114 
7115 	if (isset(sc->sc_enabled_capa,
7116 	    IWX_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
7117 	    (phyctxt->channel->ic_flags & band_flags) !=
7118 	    (chan->ic_flags & band_flags)) {
7119 		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7120 		    chains_dynamic, IWX_FW_CTXT_ACTION_REMOVE, apply_time, sco,
7121 		    vht_chan_width);
7122 		if (err) {
7123 			printf("%s: could not remove PHY context "
7124 			    "(error %d)\n", DEVNAME(sc), err);
7125 			return err;
7126 		}
7127 		phyctxt->channel = chan;
7128 		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7129 		    chains_dynamic, IWX_FW_CTXT_ACTION_ADD, apply_time, sco,
7130 		    vht_chan_width);
7131 		if (err) {
7132 			printf("%s: could not add PHY context "
7133 			    "(error %d)\n", DEVNAME(sc), err);
7134 			return err;
7135 		}
7136 	} else {
7137 		phyctxt->channel = chan;
7138 		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7139 		    chains_dynamic, IWX_FW_CTXT_ACTION_MODIFY, apply_time, sco,
7140 		    vht_chan_width);
7141 		if (err) {
7142 			printf("%s: could not update PHY context (error %d)\n",
7143 			    DEVNAME(sc), err);
7144 			return err;
7145 		}
7146 	}
7147 
7148 	phyctxt->sco = sco;
7149 	phyctxt->vht_chan_width = vht_chan_width;
7150 	return 0;
7151 }
7152 
7153 int
7154 iwx_auth(struct iwx_softc *sc)
7155 {
7156 	struct ieee80211com *ic = &sc->sc_ic;
7157 	struct iwx_node *in = (void *)ic->ic_bss;
7158 	uint32_t duration;
7159 	int generation = sc->sc_generation, err;
7160 
7161 	splassert(IPL_NET);
7162 
7163 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7164 		err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7165 		    ic->ic_ibss_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7166 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7167 		if (err)
7168 			return err;
7169 	} else {
7170 		err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7171 		    in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7172 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7173 		if (err)
7174 			return err;
7175 	}
7176 	in->in_phyctxt = &sc->sc_phyctxt[0];
7177 	IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr);
7178 
7179 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD, 0);
7180 	if (err) {
7181 		printf("%s: could not add MAC context (error %d)\n",
7182 		    DEVNAME(sc), err);
7183 		return err;
7184  	}
7185 	sc->sc_flags |= IWX_FLAG_MAC_ACTIVE;
7186 
7187 	err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD);
7188 	if (err) {
7189 		printf("%s: could not add binding (error %d)\n",
7190 		    DEVNAME(sc), err);
7191 		goto rm_mac_ctxt;
7192 	}
7193 	sc->sc_flags |= IWX_FLAG_BINDING_ACTIVE;
7194 
7195 	err = iwx_add_sta_cmd(sc, in, 0);
7196 	if (err) {
7197 		printf("%s: could not add sta (error %d)\n",
7198 		    DEVNAME(sc), err);
7199 		goto rm_binding;
7200 	}
7201 	sc->sc_flags |= IWX_FLAG_STA_ACTIVE;
7202 
7203 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7204 		err = iwx_enable_txq(sc, IWX_MONITOR_STA_ID,
7205 		    IWX_DQA_INJECT_MONITOR_QUEUE, IWX_MGMT_TID,
7206 		    IWX_TX_RING_COUNT);
7207 		if (err)
7208 			goto rm_sta;
7209 		return 0;
7210 	}
7211 
7212 	err = iwx_enable_mgmt_queue(sc);
7213 	if (err)
7214 		goto rm_sta;
7215 
7216 	err = iwx_clear_statistics(sc);
7217 	if (err)
7218 		goto rm_sta;
7219 
7220 	/*
7221 	 * Prevent the FW from wandering off channel during association
7222 	 * by "protecting" the session with a time event.
7223 	 */
7224 	if (in->in_ni.ni_intval)
7225 		duration = in->in_ni.ni_intval * 2;
7226 	else
7227 		duration = IEEE80211_DUR_TU;
7228 	return iwx_schedule_session_protection(sc, in, duration);
7229 rm_sta:
7230 	if (generation == sc->sc_generation) {
7231 		iwx_rm_sta_cmd(sc, in);
7232 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
7233 	}
7234 rm_binding:
7235 	if (generation == sc->sc_generation) {
7236 		iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
7237 		sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
7238 	}
7239 rm_mac_ctxt:
7240 	if (generation == sc->sc_generation) {
7241 		iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
7242 		sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
7243 	}
7244 	return err;
7245 }
7246 
7247 int
7248 iwx_deauth(struct iwx_softc *sc)
7249 {
7250 	struct ieee80211com *ic = &sc->sc_ic;
7251 	struct iwx_node *in = (void *)ic->ic_bss;
7252 	int err;
7253 
7254 	splassert(IPL_NET);
7255 
7256 	if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
7257 		err = iwx_rm_sta(sc, in);
7258 		if (err)
7259 			return err;
7260 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
7261 	}
7262 
7263 	if (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE) {
7264 		err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
7265 		if (err) {
7266 			printf("%s: could not remove binding (error %d)\n",
7267 			    DEVNAME(sc), err);
7268 			return err;
7269 		}
7270 		sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
7271 	}
7272 
7273 	if (sc->sc_flags & IWX_FLAG_MAC_ACTIVE) {
7274 		err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
7275 		if (err) {
7276 			printf("%s: could not remove MAC context (error %d)\n",
7277 			    DEVNAME(sc), err);
7278 			return err;
7279 		}
7280 		sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
7281 	}
7282 
7283 	/* Move unused PHY context to a default channel. */
7284 	err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7285 	    &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7286 	    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7287 	if (err)
7288 		return err;
7289 
7290 	return 0;
7291 }
7292 
7293 int
7294 iwx_run(struct iwx_softc *sc)
7295 {
7296 	struct ieee80211com *ic = &sc->sc_ic;
7297 	struct iwx_node *in = (void *)ic->ic_bss;
7298 	struct ieee80211_node *ni = &in->in_ni;
7299 	int err;
7300 
7301 	splassert(IPL_NET);
7302 
7303 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7304 		/* Add a MAC context and a sniffing STA. */
7305 		err = iwx_auth(sc);
7306 		if (err)
7307 			return err;
7308 	}
7309 
7310 	/* Configure Rx chains for MIMO and configure 40 MHz channel. */
7311 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7312 		uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1;
7313 		err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
7314 		    in->in_phyctxt->channel, chains, chains,
7315 		    0, IEEE80211_HTOP0_SCO_SCN,
7316 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7317 		if (err) {
7318 			printf("%s: failed to update PHY\n", DEVNAME(sc));
7319 			return err;
7320 		}
7321 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
7322 		uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1;
7323 		uint8_t sco, vht_chan_width;
7324 		if (IEEE80211_CHAN_40MHZ_ALLOWED(in->in_ni.ni_chan) &&
7325 		    ieee80211_node_supports_ht_chan40(ni))
7326 			sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
7327 		else
7328 			sco = IEEE80211_HTOP0_SCO_SCN;
7329 		if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
7330 		    IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
7331 		    ieee80211_node_supports_vht_chan80(ni))
7332 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
7333 		else
7334 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
7335 		err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
7336 		    in->in_phyctxt->channel, chains, chains,
7337 		    0, sco, vht_chan_width);
7338 		if (err) {
7339 			printf("%s: failed to update PHY\n", DEVNAME(sc));
7340 			return err;
7341 		}
7342 	}
7343 
7344 	/* Update STA again to apply HT and VHT settings. */
7345 	err = iwx_add_sta_cmd(sc, in, 1);
7346 	if (err) {
7347 		printf("%s: could not update STA (error %d)\n",
7348 		    DEVNAME(sc), err);
7349 		return err;
7350 	}
7351 
7352 	/* We have now been assigned an associd by the AP. */
7353 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
7354 	if (err) {
7355 		printf("%s: failed to update MAC\n", DEVNAME(sc));
7356 		return err;
7357 	}
7358 
7359 	err = iwx_sf_config(sc, IWX_SF_FULL_ON);
7360 	if (err) {
7361 		printf("%s: could not set sf full on (error %d)\n",
7362 		    DEVNAME(sc), err);
7363 		return err;
7364 	}
7365 
7366 	err = iwx_allow_mcast(sc);
7367 	if (err) {
7368 		printf("%s: could not allow mcast (error %d)\n",
7369 		    DEVNAME(sc), err);
7370 		return err;
7371 	}
7372 
7373 	err = iwx_power_update_device(sc);
7374 	if (err) {
7375 		printf("%s: could not send power command (error %d)\n",
7376 		    DEVNAME(sc), err);
7377 		return err;
7378 	}
7379 #ifdef notyet
7380 	/*
7381 	 * Disabled for now. Default beacon filter settings
7382 	 * prevent net80211 from getting ERP and HT protection
7383 	 * updates from beacons.
7384 	 */
7385 	err = iwx_enable_beacon_filter(sc, in);
7386 	if (err) {
7387 		printf("%s: could not enable beacon filter\n",
7388 		    DEVNAME(sc));
7389 		return err;
7390 	}
7391 #endif
7392 	err = iwx_power_mac_update_mode(sc, in);
7393 	if (err) {
7394 		printf("%s: could not update MAC power (error %d)\n",
7395 		    DEVNAME(sc), err);
7396 		return err;
7397 	}
7398 
7399 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7400 		return 0;
7401 
7402 	/* Start at lowest available bit-rate. Firmware will raise. */
7403 	in->in_ni.ni_txrate = 0;
7404 	in->in_ni.ni_txmcs = 0;
7405 
7406 	err = iwx_rs_init(sc, in);
7407 	if (err) {
7408 		printf("%s: could not init rate scaling (error %d)\n",
7409 		    DEVNAME(sc), err);
7410 		return err;
7411 	}
7412 
7413 	return 0;
7414 }
7415 
7416 int
7417 iwx_run_stop(struct iwx_softc *sc)
7418 {
7419 	struct ieee80211com *ic = &sc->sc_ic;
7420 	struct iwx_node *in = (void *)ic->ic_bss;
7421 	struct ieee80211_node *ni = &in->in_ni;
7422 	int err, i;
7423 
7424 	splassert(IPL_NET);
7425 
7426 	err = iwx_flush_sta(sc, in);
7427 	if (err) {
7428 		printf("%s: could not flush Tx path (error %d)\n",
7429 		    DEVNAME(sc), err);
7430 		return err;
7431 	}
7432 
7433 	/*
7434 	 * Stop Rx BA sessions now. We cannot rely on the BA task
7435 	 * for this when moving out of RUN state since it runs in a
7436 	 * separate thread.
7437 	 * Note that in->in_ni (struct ieee80211_node) already represents
7438 	 * our new access point in case we are roaming between APs.
7439 	 * This means we cannot rely on struct ieee802111_node to tell
7440 	 * us which BA sessions exist.
7441 	 */
7442 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
7443 		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
7444 		if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID)
7445 			continue;
7446 		iwx_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
7447 	}
7448 
7449 	err = iwx_sf_config(sc, IWX_SF_INIT_OFF);
7450 	if (err)
7451 		return err;
7452 
7453 	err = iwx_disable_beacon_filter(sc);
7454 	if (err) {
7455 		printf("%s: could not disable beacon filter (error %d)\n",
7456 		    DEVNAME(sc), err);
7457 		return err;
7458 	}
7459 
7460 	/* Mark station as disassociated. */
7461 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 0);
7462 	if (err) {
7463 		printf("%s: failed to update MAC\n", DEVNAME(sc));
7464 		return err;
7465 	}
7466 
7467 	/* Reset Tx chains in case MIMO or 40 MHz channels were enabled. */
7468 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
7469 		err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
7470 		    in->in_phyctxt->channel, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7471 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7472 		if (err) {
7473 			printf("%s: failed to update PHY\n", DEVNAME(sc));
7474 			return err;
7475 		}
7476 	}
7477 
7478 	return 0;
7479 }
7480 
7481 struct ieee80211_node *
7482 iwx_node_alloc(struct ieee80211com *ic)
7483 {
7484 	return malloc(sizeof (struct iwx_node), M_DEVBUF, M_NOWAIT | M_ZERO);
7485 }
7486 
7487 int
7488 iwx_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
7489     struct ieee80211_key *k)
7490 {
7491 	struct iwx_softc *sc = ic->ic_softc;
7492 	struct iwx_node *in = (void *)ni;
7493 	struct iwx_setkey_task_arg *a;
7494 	int err;
7495 
7496 	if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
7497 		/* Fallback to software crypto for other ciphers. */
7498 		err = ieee80211_set_key(ic, ni, k);
7499 		if (!err && (k->k_flags & IEEE80211_KEY_GROUP))
7500 			in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
7501 		return err;
7502 	}
7503 
7504 	if (sc->setkey_nkeys >= nitems(sc->setkey_arg))
7505 		return ENOSPC;
7506 
7507 	a = &sc->setkey_arg[sc->setkey_cur];
7508 	a->sta_id = IWX_STATION_ID;
7509 	a->ni = ni;
7510 	a->k = k;
7511 	sc->setkey_cur = (sc->setkey_cur + 1) % nitems(sc->setkey_arg);
7512 	sc->setkey_nkeys++;
7513 	iwx_add_task(sc, systq, &sc->setkey_task);
7514 	return EBUSY;
7515 }
7516 
7517 int
7518 iwx_add_sta_key(struct iwx_softc *sc, int sta_id, struct ieee80211_node *ni,
7519     struct ieee80211_key *k)
7520 {
7521 	struct ieee80211com *ic = &sc->sc_ic;
7522 	struct iwx_node *in = (void *)ni;
7523 	struct iwx_add_sta_key_cmd cmd;
7524 	uint32_t status;
7525 	const int want_keymask = (IWX_NODE_FLAG_HAVE_PAIRWISE_KEY |
7526 	    IWX_NODE_FLAG_HAVE_GROUP_KEY);
7527 	int err;
7528 
7529 	/*
7530 	 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
7531 	 * Currently we only implement station mode where 'ni' is always
7532 	 * ic->ic_bss so there is no need to validate arguments beyond this:
7533 	 */
7534 	KASSERT(ni == ic->ic_bss);
7535 
7536 	memset(&cmd, 0, sizeof(cmd));
7537 
7538 	cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
7539 	    IWX_STA_KEY_FLG_WEP_KEY_MAP |
7540 	    ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
7541 	    IWX_STA_KEY_FLG_KEYID_MSK));
7542 	if (k->k_flags & IEEE80211_KEY_GROUP) {
7543 		cmd.common.key_offset = 1;
7544 		cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
7545 	} else
7546 		cmd.common.key_offset = 0;
7547 
7548 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
7549 	cmd.common.sta_id = sta_id;
7550 
7551 	cmd.transmit_seq_cnt = htole64(k->k_tsc);
7552 
7553 	status = IWX_ADD_STA_SUCCESS;
7554 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
7555 	    &status);
7556 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
7557 		return ECANCELED;
7558 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
7559 		err = EIO;
7560 	if (err) {
7561 		IEEE80211_SEND_MGMT(ic, ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
7562 		    IEEE80211_REASON_AUTH_LEAVE);
7563 		ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
7564 		return err;
7565 	}
7566 
7567 	if (k->k_flags & IEEE80211_KEY_GROUP)
7568 		in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
7569 	else
7570 		in->in_flags |= IWX_NODE_FLAG_HAVE_PAIRWISE_KEY;
7571 
7572 	if ((in->in_flags & want_keymask) == want_keymask) {
7573 		DPRINTF(("marking port %s valid\n",
7574 		    ether_sprintf(ni->ni_macaddr)));
7575 		ni->ni_port_valid = 1;
7576 		ieee80211_set_link_state(ic, LINK_STATE_UP);
7577 	}
7578 
7579 	return 0;
7580 }
7581 
7582 void
7583 iwx_setkey_task(void *arg)
7584 {
7585 	struct iwx_softc *sc = arg;
7586 	struct iwx_setkey_task_arg *a;
7587 	int err = 0, s = splnet();
7588 
7589 	while (sc->setkey_nkeys > 0) {
7590 		if (err || (sc->sc_flags & IWX_FLAG_SHUTDOWN))
7591 			break;
7592 		a = &sc->setkey_arg[sc->setkey_tail];
7593 		err = iwx_add_sta_key(sc, a->sta_id, a->ni, a->k);
7594 		a->sta_id = 0;
7595 		a->ni = NULL;
7596 		a->k = NULL;
7597 		sc->setkey_tail = (sc->setkey_tail + 1) %
7598 		    nitems(sc->setkey_arg);
7599 		sc->setkey_nkeys--;
7600 	}
7601 
7602 	refcnt_rele_wake(&sc->task_refs);
7603 	splx(s);
7604 }
7605 
7606 void
7607 iwx_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
7608     struct ieee80211_key *k)
7609 {
7610 	struct iwx_softc *sc = ic->ic_softc;
7611 	struct iwx_add_sta_key_cmd cmd;
7612 
7613 	if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
7614 		/* Fallback to software crypto for other ciphers. */
7615                 ieee80211_delete_key(ic, ni, k);
7616 		return;
7617 	}
7618 
7619 	memset(&cmd, 0, sizeof(cmd));
7620 
7621 	cmd.common.key_flags = htole16(IWX_STA_KEY_NOT_VALID |
7622 	    IWX_STA_KEY_FLG_NO_ENC | IWX_STA_KEY_FLG_WEP_KEY_MAP |
7623 	    ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
7624 	    IWX_STA_KEY_FLG_KEYID_MSK));
7625 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
7626 	if (k->k_flags & IEEE80211_KEY_GROUP)
7627 		cmd.common.key_offset = 1;
7628 	else
7629 		cmd.common.key_offset = 0;
7630 	cmd.common.sta_id = IWX_STATION_ID;
7631 
7632 	iwx_send_cmd_pdu(sc, IWX_ADD_STA_KEY, IWX_CMD_ASYNC, sizeof(cmd), &cmd);
7633 }
7634 
7635 int
7636 iwx_media_change(struct ifnet *ifp)
7637 {
7638 	struct iwx_softc *sc = ifp->if_softc;
7639 	struct ieee80211com *ic = &sc->sc_ic;
7640 	uint8_t rate, ridx;
7641 	int err;
7642 
7643 	err = ieee80211_media_change(ifp);
7644 	if (err != ENETRESET)
7645 		return err;
7646 
7647 	if (ic->ic_fixed_mcs != -1)
7648 		sc->sc_fixed_ridx = iwx_mcs2ridx[ic->ic_fixed_mcs];
7649 	else if (ic->ic_fixed_rate != -1) {
7650 		rate = ic->ic_sup_rates[ic->ic_curmode].
7651 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
7652 		/* Map 802.11 rate to HW rate index. */
7653 		for (ridx = 0; ridx <= IWX_RIDX_MAX; ridx++)
7654 			if (iwx_rates[ridx].rate == rate)
7655 				break;
7656 		sc->sc_fixed_ridx = ridx;
7657 	}
7658 
7659 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
7660 	    (IFF_UP | IFF_RUNNING)) {
7661 		iwx_stop(ifp);
7662 		err = iwx_init(ifp);
7663 	}
7664 	return err;
7665 }
7666 
7667 void
7668 iwx_newstate_task(void *psc)
7669 {
7670 	struct iwx_softc *sc = (struct iwx_softc *)psc;
7671 	struct ieee80211com *ic = &sc->sc_ic;
7672 	enum ieee80211_state nstate = sc->ns_nstate;
7673 	enum ieee80211_state ostate = ic->ic_state;
7674 	int arg = sc->ns_arg;
7675 	int err = 0, s = splnet();
7676 
7677 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
7678 		/* iwx_stop() is waiting for us. */
7679 		refcnt_rele_wake(&sc->task_refs);
7680 		splx(s);
7681 		return;
7682 	}
7683 
7684 	if (ostate == IEEE80211_S_SCAN) {
7685 		if (nstate == ostate) {
7686 			if (sc->sc_flags & IWX_FLAG_SCANNING) {
7687 				refcnt_rele_wake(&sc->task_refs);
7688 				splx(s);
7689 				return;
7690 			}
7691 			/* Firmware is no longer scanning. Do another scan. */
7692 			goto next_scan;
7693 		}
7694 	}
7695 
7696 	if (nstate <= ostate) {
7697 		switch (ostate) {
7698 		case IEEE80211_S_RUN:
7699 			err = iwx_run_stop(sc);
7700 			if (err)
7701 				goto out;
7702 			/* FALLTHROUGH */
7703 		case IEEE80211_S_ASSOC:
7704 		case IEEE80211_S_AUTH:
7705 			if (nstate <= IEEE80211_S_AUTH) {
7706 				err = iwx_deauth(sc);
7707 				if (err)
7708 					goto out;
7709 			}
7710 			/* FALLTHROUGH */
7711 		case IEEE80211_S_SCAN:
7712 		case IEEE80211_S_INIT:
7713 			break;
7714 		}
7715 
7716 		/* Die now if iwx_stop() was called while we were sleeping. */
7717 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
7718 			refcnt_rele_wake(&sc->task_refs);
7719 			splx(s);
7720 			return;
7721 		}
7722 	}
7723 
7724 	switch (nstate) {
7725 	case IEEE80211_S_INIT:
7726 		break;
7727 
7728 	case IEEE80211_S_SCAN:
7729 next_scan:
7730 		err = iwx_scan(sc);
7731 		if (err)
7732 			break;
7733 		refcnt_rele_wake(&sc->task_refs);
7734 		splx(s);
7735 		return;
7736 
7737 	case IEEE80211_S_AUTH:
7738 		err = iwx_auth(sc);
7739 		break;
7740 
7741 	case IEEE80211_S_ASSOC:
7742 		break;
7743 
7744 	case IEEE80211_S_RUN:
7745 		err = iwx_run(sc);
7746 		break;
7747 	}
7748 
7749 out:
7750 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
7751 		if (err)
7752 			task_add(systq, &sc->init_task);
7753 		else
7754 			sc->sc_newstate(ic, nstate, arg);
7755 	}
7756 	refcnt_rele_wake(&sc->task_refs);
7757 	splx(s);
7758 }
7759 
7760 int
7761 iwx_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
7762 {
7763 	struct ifnet *ifp = IC2IFP(ic);
7764 	struct iwx_softc *sc = ifp->if_softc;
7765 
7766 	/*
7767 	 * Prevent attempts to transition towards the same state, unless
7768 	 * we are scanning in which case a SCAN -> SCAN transition
7769 	 * triggers another scan iteration. And AUTH -> AUTH is needed
7770 	 * to support band-steering.
7771 	 */
7772 	if (sc->ns_nstate == nstate && nstate != IEEE80211_S_SCAN &&
7773 	    nstate != IEEE80211_S_AUTH)
7774 		return 0;
7775 
7776 	if (ic->ic_state == IEEE80211_S_RUN) {
7777 		iwx_del_task(sc, systq, &sc->ba_task);
7778 		iwx_del_task(sc, systq, &sc->setkey_task);
7779 		memset(sc->setkey_arg, 0, sizeof(sc->setkey_arg));
7780 		sc->setkey_cur = sc->setkey_tail = sc->setkey_nkeys = 0;
7781 		iwx_del_task(sc, systq, &sc->mac_ctxt_task);
7782 		iwx_del_task(sc, systq, &sc->phy_ctxt_task);
7783 		iwx_del_task(sc, systq, &sc->bgscan_done_task);
7784 	}
7785 
7786 	sc->ns_nstate = nstate;
7787 	sc->ns_arg = arg;
7788 
7789 	iwx_add_task(sc, sc->sc_nswq, &sc->newstate_task);
7790 
7791 	return 0;
7792 }
7793 
7794 void
7795 iwx_endscan(struct iwx_softc *sc)
7796 {
7797 	struct ieee80211com *ic = &sc->sc_ic;
7798 
7799 	if ((sc->sc_flags & (IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN)) == 0)
7800 		return;
7801 
7802 	sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
7803 	ieee80211_end_scan(&ic->ic_if);
7804 }
7805 
7806 /*
7807  * Aging and idle timeouts for the different possible scenarios
7808  * in default configuration
7809  */
7810 static const uint32_t
7811 iwx_sf_full_timeout_def[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
7812 	{
7813 		htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
7814 		htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
7815 	},
7816 	{
7817 		htole32(IWX_SF_AGG_UNICAST_AGING_TIMER_DEF),
7818 		htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF)
7819 	},
7820 	{
7821 		htole32(IWX_SF_MCAST_AGING_TIMER_DEF),
7822 		htole32(IWX_SF_MCAST_IDLE_TIMER_DEF)
7823 	},
7824 	{
7825 		htole32(IWX_SF_BA_AGING_TIMER_DEF),
7826 		htole32(IWX_SF_BA_IDLE_TIMER_DEF)
7827 	},
7828 	{
7829 		htole32(IWX_SF_TX_RE_AGING_TIMER_DEF),
7830 		htole32(IWX_SF_TX_RE_IDLE_TIMER_DEF)
7831 	},
7832 };
7833 
7834 /*
7835  * Aging and idle timeouts for the different possible scenarios
7836  * in single BSS MAC configuration.
7837  */
7838 static const uint32_t
7839 iwx_sf_full_timeout[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
7840 	{
7841 		htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER),
7842 		htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER)
7843 	},
7844 	{
7845 		htole32(IWX_SF_AGG_UNICAST_AGING_TIMER),
7846 		htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER)
7847 	},
7848 	{
7849 		htole32(IWX_SF_MCAST_AGING_TIMER),
7850 		htole32(IWX_SF_MCAST_IDLE_TIMER)
7851 	},
7852 	{
7853 		htole32(IWX_SF_BA_AGING_TIMER),
7854 		htole32(IWX_SF_BA_IDLE_TIMER)
7855 	},
7856 	{
7857 		htole32(IWX_SF_TX_RE_AGING_TIMER),
7858 		htole32(IWX_SF_TX_RE_IDLE_TIMER)
7859 	},
7860 };
7861 
7862 void
7863 iwx_fill_sf_command(struct iwx_softc *sc, struct iwx_sf_cfg_cmd *sf_cmd,
7864     struct ieee80211_node *ni)
7865 {
7866 	int i, j, watermark;
7867 
7868 	sf_cmd->watermark[IWX_SF_LONG_DELAY_ON] = htole32(IWX_SF_W_MARK_SCAN);
7869 
7870 	/*
7871 	 * If we are in association flow - check antenna configuration
7872 	 * capabilities of the AP station, and choose the watermark accordingly.
7873 	 */
7874 	if (ni) {
7875 		if (ni->ni_flags & IEEE80211_NODE_HT) {
7876 			if (ni->ni_rxmcs[1] != 0)
7877 				watermark = IWX_SF_W_MARK_MIMO2;
7878 			else
7879 				watermark = IWX_SF_W_MARK_SISO;
7880 		} else {
7881 			watermark = IWX_SF_W_MARK_LEGACY;
7882 		}
7883 	/* default watermark value for unassociated mode. */
7884 	} else {
7885 		watermark = IWX_SF_W_MARK_MIMO2;
7886 	}
7887 	sf_cmd->watermark[IWX_SF_FULL_ON] = htole32(watermark);
7888 
7889 	for (i = 0; i < IWX_SF_NUM_SCENARIO; i++) {
7890 		for (j = 0; j < IWX_SF_NUM_TIMEOUT_TYPES; j++) {
7891 			sf_cmd->long_delay_timeouts[i][j] =
7892 					htole32(IWX_SF_LONG_DELAY_AGING_TIMER);
7893 		}
7894 	}
7895 
7896 	if (ni) {
7897 		memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout,
7898 		       sizeof(iwx_sf_full_timeout));
7899 	} else {
7900 		memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout_def,
7901 		       sizeof(iwx_sf_full_timeout_def));
7902 	}
7903 
7904 }
7905 
7906 int
7907 iwx_sf_config(struct iwx_softc *sc, int new_state)
7908 {
7909 	struct ieee80211com *ic = &sc->sc_ic;
7910 	struct iwx_sf_cfg_cmd sf_cmd = {
7911 		.state = htole32(new_state),
7912 	};
7913 	int err = 0;
7914 
7915 	switch (new_state) {
7916 	case IWX_SF_UNINIT:
7917 	case IWX_SF_INIT_OFF:
7918 		iwx_fill_sf_command(sc, &sf_cmd, NULL);
7919 		break;
7920 	case IWX_SF_FULL_ON:
7921 		iwx_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
7922 		break;
7923 	default:
7924 		return EINVAL;
7925 	}
7926 
7927 	err = iwx_send_cmd_pdu(sc, IWX_REPLY_SF_CFG_CMD, IWX_CMD_ASYNC,
7928 				   sizeof(sf_cmd), &sf_cmd);
7929 	return err;
7930 }
7931 
7932 int
7933 iwx_send_bt_init_conf(struct iwx_softc *sc)
7934 {
7935 	struct iwx_bt_coex_cmd bt_cmd;
7936 
7937 	bt_cmd.mode = htole32(IWX_BT_COEX_WIFI);
7938 	bt_cmd.enabled_modules = 0;
7939 
7940 	return iwx_send_cmd_pdu(sc, IWX_BT_CONFIG, 0, sizeof(bt_cmd),
7941 	    &bt_cmd);
7942 }
7943 
7944 int
7945 iwx_send_soc_conf(struct iwx_softc *sc)
7946 {
7947 	struct iwx_soc_configuration_cmd cmd;
7948 	int err;
7949 	uint32_t cmd_id, flags = 0;
7950 
7951 	memset(&cmd, 0, sizeof(cmd));
7952 
7953 	/*
7954 	 * In VER_1 of this command, the discrete value is considered
7955 	 * an integer; In VER_2, it's a bitmask.  Since we have only 2
7956 	 * values in VER_1, this is backwards-compatible with VER_2,
7957 	 * as long as we don't set any other flag bits.
7958 	 */
7959 	if (!sc->sc_integrated) { /* VER_1 */
7960 		flags = IWX_SOC_CONFIG_CMD_FLAGS_DISCRETE;
7961 	} else { /* VER_2 */
7962 		uint8_t scan_cmd_ver;
7963 		if (sc->sc_ltr_delay != IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
7964 			flags |= (sc->sc_ltr_delay &
7965 			    IWX_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
7966 		scan_cmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP,
7967 		    IWX_SCAN_REQ_UMAC);
7968 		if (scan_cmd_ver != IWX_FW_CMD_VER_UNKNOWN &&
7969 		    scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
7970 			flags |= IWX_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
7971 	}
7972 	cmd.flags = htole32(flags);
7973 
7974 	cmd.latency = htole32(sc->sc_xtal_latency);
7975 
7976 	cmd_id = iwx_cmd_id(IWX_SOC_CONFIGURATION_CMD, IWX_SYSTEM_GROUP, 0);
7977 	err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
7978 	if (err)
7979 		printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
7980 	return err;
7981 }
7982 
7983 int
7984 iwx_send_update_mcc_cmd(struct iwx_softc *sc, const char *alpha2)
7985 {
7986 	struct iwx_mcc_update_cmd mcc_cmd;
7987 	struct iwx_host_cmd hcmd = {
7988 		.id = IWX_MCC_UPDATE_CMD,
7989 		.flags = IWX_CMD_WANT_RESP,
7990 		.data = { &mcc_cmd },
7991 	};
7992 	struct iwx_rx_packet *pkt;
7993 	struct iwx_mcc_update_resp *resp;
7994 	size_t resp_len;
7995 	int err;
7996 
7997 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
7998 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
7999 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
8000 	    isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC))
8001 		mcc_cmd.source_id = IWX_MCC_SOURCE_GET_CURRENT;
8002 	else
8003 		mcc_cmd.source_id = IWX_MCC_SOURCE_OLD_FW;
8004 
8005 	hcmd.len[0] = sizeof(struct iwx_mcc_update_cmd);
8006 	hcmd.resp_pkt_len = IWX_CMD_RESP_MAX;
8007 
8008 	err = iwx_send_cmd(sc, &hcmd);
8009 	if (err)
8010 		return err;
8011 
8012 	pkt = hcmd.resp_pkt;
8013 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
8014 		err = EIO;
8015 		goto out;
8016 	}
8017 
8018 	resp_len = iwx_rx_packet_payload_len(pkt);
8019 	if (resp_len < sizeof(*resp)) {
8020 		err = EIO;
8021 		goto out;
8022 	}
8023 
8024 	resp = (void *)pkt->data;
8025 	if (resp_len != sizeof(*resp) +
8026 	    resp->n_channels * sizeof(resp->channels[0])) {
8027 		err = EIO;
8028 		goto out;
8029 	}
8030 
8031 	DPRINTF(("MCC status=0x%x mcc=0x%x cap=0x%x time=0x%x geo_info=0x%x source_id=0x%d n_channels=%u\n",
8032 	    resp->status, resp->mcc, resp->cap, resp->time, resp->geo_info, resp->source_id, resp->n_channels));
8033 
8034 	/* Update channel map for net80211 and our scan configuration. */
8035 	iwx_init_channel_map(sc, NULL, resp->channels, resp->n_channels);
8036 
8037 out:
8038 	iwx_free_resp(sc, &hcmd);
8039 
8040 	return err;
8041 }
8042 
8043 int
8044 iwx_send_temp_report_ths_cmd(struct iwx_softc *sc)
8045 {
8046 	struct iwx_temp_report_ths_cmd cmd;
8047 	int err;
8048 
8049 	/*
8050 	 * In order to give responsibility for critical-temperature-kill
8051 	 * and TX backoff to FW we need to send an empty temperature
8052 	 * reporting command at init time.
8053 	 */
8054 	memset(&cmd, 0, sizeof(cmd));
8055 
8056 	err = iwx_send_cmd_pdu(sc,
8057 	    IWX_WIDE_ID(IWX_PHY_OPS_GROUP, IWX_TEMP_REPORTING_THRESHOLDS_CMD),
8058 	    0, sizeof(cmd), &cmd);
8059 	if (err)
8060 		printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
8061 		    DEVNAME(sc), err);
8062 
8063 	return err;
8064 }
8065 
8066 int
8067 iwx_init_hw(struct iwx_softc *sc)
8068 {
8069 	struct ieee80211com *ic = &sc->sc_ic;
8070 	int err, i;
8071 
8072 	err = iwx_run_init_mvm_ucode(sc, 0);
8073 	if (err)
8074 		return err;
8075 
8076 	if (!iwx_nic_lock(sc))
8077 		return EBUSY;
8078 
8079 	err = iwx_send_tx_ant_cfg(sc, iwx_fw_valid_tx_ant(sc));
8080 	if (err) {
8081 		printf("%s: could not init tx ant config (error %d)\n",
8082 		    DEVNAME(sc), err);
8083 		goto err;
8084 	}
8085 
8086 	if (sc->sc_tx_with_siso_diversity) {
8087 		err = iwx_send_phy_cfg_cmd(sc);
8088 		if (err) {
8089 			printf("%s: could not send phy config (error %d)\n",
8090 			    DEVNAME(sc), err);
8091 			goto err;
8092 		}
8093 	}
8094 
8095 	err = iwx_send_bt_init_conf(sc);
8096 	if (err) {
8097 		printf("%s: could not init bt coex (error %d)\n",
8098 		    DEVNAME(sc), err);
8099 		return err;
8100 	}
8101 
8102 	err = iwx_send_soc_conf(sc);
8103 	if (err)
8104 		return err;
8105 
8106 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DQA_SUPPORT)) {
8107 		err = iwx_send_dqa_cmd(sc);
8108 		if (err)
8109 			return err;
8110 	}
8111 
8112 	for (i = 0; i < IWX_NUM_PHY_CTX; i++) {
8113 		/*
8114 		 * The channel used here isn't relevant as it's
8115 		 * going to be overwritten in the other flows.
8116 		 * For now use the first channel we have.
8117 		 */
8118 		sc->sc_phyctxt[i].id = i;
8119 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
8120 		err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
8121 		    IWX_FW_CTXT_ACTION_ADD, 0, IEEE80211_HTOP0_SCO_SCN,
8122 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8123 		if (err) {
8124 			printf("%s: could not add phy context %d (error %d)\n",
8125 			    DEVNAME(sc), i, err);
8126 			goto err;
8127 		}
8128 	}
8129 
8130 	err = iwx_config_ltr(sc);
8131 	if (err) {
8132 		printf("%s: PCIe LTR configuration failed (error %d)\n",
8133 		    DEVNAME(sc), err);
8134 	}
8135 
8136 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
8137 		err = iwx_send_temp_report_ths_cmd(sc);
8138 		if (err)
8139 			goto err;
8140 	}
8141 
8142 	err = iwx_power_update_device(sc);
8143 	if (err) {
8144 		printf("%s: could not send power command (error %d)\n",
8145 		    DEVNAME(sc), err);
8146 		goto err;
8147 	}
8148 
8149 	if (sc->sc_nvm.lar_enabled) {
8150 		err = iwx_send_update_mcc_cmd(sc, "ZZ");
8151 		if (err) {
8152 			printf("%s: could not init LAR (error %d)\n",
8153 			    DEVNAME(sc), err);
8154 			goto err;
8155 		}
8156 	}
8157 
8158 	err = iwx_config_umac_scan_reduced(sc);
8159 	if (err) {
8160 		printf("%s: could not configure scan (error %d)\n",
8161 		    DEVNAME(sc), err);
8162 		goto err;
8163 	}
8164 
8165 	err = iwx_disable_beacon_filter(sc);
8166 	if (err) {
8167 		printf("%s: could not disable beacon filter (error %d)\n",
8168 		    DEVNAME(sc), err);
8169 		goto err;
8170 	}
8171 
8172 err:
8173 	iwx_nic_unlock(sc);
8174 	return err;
8175 }
8176 
8177 /* Allow multicast from our BSSID. */
8178 int
8179 iwx_allow_mcast(struct iwx_softc *sc)
8180 {
8181 	struct ieee80211com *ic = &sc->sc_ic;
8182 	struct iwx_node *in = (void *)ic->ic_bss;
8183 	struct iwx_mcast_filter_cmd *cmd;
8184 	size_t size;
8185 	int err;
8186 
8187 	size = roundup(sizeof(*cmd), 4);
8188 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
8189 	if (cmd == NULL)
8190 		return ENOMEM;
8191 	cmd->filter_own = 1;
8192 	cmd->port_id = 0;
8193 	cmd->count = 0;
8194 	cmd->pass_all = 1;
8195 	IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr);
8196 
8197 	err = iwx_send_cmd_pdu(sc, IWX_MCAST_FILTER_CMD,
8198 	    0, size, cmd);
8199 	free(cmd, M_DEVBUF, size);
8200 	return err;
8201 }
8202 
8203 int
8204 iwx_init(struct ifnet *ifp)
8205 {
8206 	struct iwx_softc *sc = ifp->if_softc;
8207 	struct ieee80211com *ic = &sc->sc_ic;
8208 	int err, generation;
8209 
8210 	rw_assert_wrlock(&sc->ioctl_rwl);
8211 
8212 	generation = ++sc->sc_generation;
8213 
8214 	err = iwx_preinit(sc);
8215 	if (err)
8216 		return err;
8217 
8218 	err = iwx_start_hw(sc);
8219 	if (err) {
8220 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
8221 		return err;
8222 	}
8223 
8224 	err = iwx_init_hw(sc);
8225 	if (err) {
8226 		if (generation == sc->sc_generation)
8227 			iwx_stop_device(sc);
8228 		return err;
8229 	}
8230 
8231 	if (sc->sc_nvm.sku_cap_11n_enable)
8232 		iwx_setup_ht_rates(sc);
8233 	if (sc->sc_nvm.sku_cap_11ac_enable)
8234 		iwx_setup_vht_rates(sc);
8235 
8236 	KASSERT(sc->task_refs.r_refs == 0);
8237 	refcnt_init(&sc->task_refs);
8238 	ifq_clr_oactive(&ifp->if_snd);
8239 	ifp->if_flags |= IFF_RUNNING;
8240 
8241 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8242 		ic->ic_bss->ni_chan = ic->ic_ibss_chan;
8243 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
8244 		return 0;
8245 	}
8246 
8247 	ieee80211_begin_scan(ifp);
8248 
8249 	/*
8250 	 * ieee80211_begin_scan() ends up scheduling iwx_newstate_task().
8251 	 * Wait until the transition to SCAN state has completed.
8252 	 */
8253 	do {
8254 		err = tsleep_nsec(&ic->ic_state, PCATCH, "iwxinit",
8255 		    SEC_TO_NSEC(1));
8256 		if (generation != sc->sc_generation)
8257 			return ENXIO;
8258 		if (err) {
8259 			iwx_stop(ifp);
8260 			return err;
8261 		}
8262 	} while (ic->ic_state != IEEE80211_S_SCAN);
8263 
8264 	return 0;
8265 }
8266 
8267 void
8268 iwx_start(struct ifnet *ifp)
8269 {
8270 	struct iwx_softc *sc = ifp->if_softc;
8271 	struct ieee80211com *ic = &sc->sc_ic;
8272 	struct ieee80211_node *ni;
8273 	struct ether_header *eh;
8274 	struct mbuf *m;
8275 
8276 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
8277 		return;
8278 
8279 	for (;;) {
8280 		/* why isn't this done per-queue? */
8281 		if (sc->qfullmsk != 0) {
8282 			ifq_set_oactive(&ifp->if_snd);
8283 			break;
8284 		}
8285 
8286 		/* Don't queue additional frames while flushing Tx queues. */
8287 		if (sc->sc_flags & IWX_FLAG_TXFLUSH)
8288 			break;
8289 
8290 		/* need to send management frames even if we're not RUNning */
8291 		m = mq_dequeue(&ic->ic_mgtq);
8292 		if (m) {
8293 			ni = m->m_pkthdr.ph_cookie;
8294 			goto sendit;
8295 		}
8296 
8297 		if (ic->ic_state != IEEE80211_S_RUN ||
8298 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
8299 			break;
8300 
8301 		m = ifq_dequeue(&ifp->if_snd);
8302 		if (!m)
8303 			break;
8304 		if (m->m_len < sizeof (*eh) &&
8305 		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
8306 			ifp->if_oerrors++;
8307 			continue;
8308 		}
8309 #if NBPFILTER > 0
8310 		if (ifp->if_bpf != NULL)
8311 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
8312 #endif
8313 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
8314 			ifp->if_oerrors++;
8315 			continue;
8316 		}
8317 
8318  sendit:
8319 #if NBPFILTER > 0
8320 		if (ic->ic_rawbpf != NULL)
8321 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
8322 #endif
8323 		if (iwx_tx(sc, m, ni) != 0) {
8324 			ieee80211_release_node(ic, ni);
8325 			ifp->if_oerrors++;
8326 			continue;
8327 		}
8328 
8329 		if (ifp->if_flags & IFF_UP)
8330 			ifp->if_timer = 1;
8331 	}
8332 
8333 	return;
8334 }
8335 
8336 void
8337 iwx_stop(struct ifnet *ifp)
8338 {
8339 	struct iwx_softc *sc = ifp->if_softc;
8340 	struct ieee80211com *ic = &sc->sc_ic;
8341 	struct iwx_node *in = (void *)ic->ic_bss;
8342 	int i, s = splnet();
8343 
8344 	rw_assert_wrlock(&sc->ioctl_rwl);
8345 
8346 	sc->sc_flags |= IWX_FLAG_SHUTDOWN; /* Disallow new tasks. */
8347 
8348 	/* Cancel scheduled tasks and let any stale tasks finish up. */
8349 	task_del(systq, &sc->init_task);
8350 	iwx_del_task(sc, sc->sc_nswq, &sc->newstate_task);
8351 	iwx_del_task(sc, systq, &sc->ba_task);
8352 	iwx_del_task(sc, systq, &sc->setkey_task);
8353 	memset(sc->setkey_arg, 0, sizeof(sc->setkey_arg));
8354 	sc->setkey_cur = sc->setkey_tail = sc->setkey_nkeys = 0;
8355 	iwx_del_task(sc, systq, &sc->mac_ctxt_task);
8356 	iwx_del_task(sc, systq, &sc->phy_ctxt_task);
8357 	iwx_del_task(sc, systq, &sc->bgscan_done_task);
8358 	KASSERT(sc->task_refs.r_refs >= 1);
8359 	refcnt_finalize(&sc->task_refs, "iwxstop");
8360 
8361 	iwx_stop_device(sc);
8362 
8363 	free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
8364 	sc->bgscan_unref_arg = NULL;
8365 	sc->bgscan_unref_arg_size = 0;
8366 
8367 	/* Reset soft state. */
8368 
8369 	sc->sc_generation++;
8370 	for (i = 0; i < nitems(sc->sc_cmd_resp_pkt); i++) {
8371 		free(sc->sc_cmd_resp_pkt[i], M_DEVBUF, sc->sc_cmd_resp_len[i]);
8372 		sc->sc_cmd_resp_pkt[i] = NULL;
8373 		sc->sc_cmd_resp_len[i] = 0;
8374 	}
8375 	ifp->if_flags &= ~IFF_RUNNING;
8376 	ifq_clr_oactive(&ifp->if_snd);
8377 
8378 	in->in_phyctxt = NULL;
8379 	in->in_flags = 0;
8380 	IEEE80211_ADDR_COPY(in->in_macaddr, etheranyaddr);
8381 
8382 	sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
8383 	sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
8384 	sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
8385 	sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
8386 	sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
8387 	sc->sc_flags &= ~IWX_FLAG_HW_ERR;
8388 	sc->sc_flags &= ~IWX_FLAG_SHUTDOWN;
8389 	sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
8390 
8391 	sc->sc_rx_ba_sessions = 0;
8392 	sc->ba_rx.start_tidmask = 0;
8393 	sc->ba_rx.stop_tidmask = 0;
8394 	memset(sc->aggqid, 0, sizeof(sc->aggqid));
8395 	sc->ba_tx.start_tidmask = 0;
8396 	sc->ba_tx.stop_tidmask = 0;
8397 
8398 	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
8399 	sc->ns_nstate = IEEE80211_S_INIT;
8400 
8401 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
8402 		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
8403 		iwx_clear_reorder_buffer(sc, rxba);
8404 	}
8405 	memset(sc->sc_tx_timer, 0, sizeof(sc->sc_tx_timer));
8406 	ifp->if_timer = 0;
8407 
8408 	splx(s);
8409 }
8410 
8411 void
8412 iwx_watchdog(struct ifnet *ifp)
8413 {
8414 	struct iwx_softc *sc = ifp->if_softc;
8415 	int i;
8416 
8417 	ifp->if_timer = 0;
8418 
8419 	/*
8420 	 * We maintain a separate timer for each Tx queue because
8421 	 * Tx aggregation queues can get "stuck" while other queues
8422 	 * keep working. The Linux driver uses a similar workaround.
8423 	 */
8424 	for (i = 0; i < nitems(sc->sc_tx_timer); i++) {
8425 		if (sc->sc_tx_timer[i] > 0) {
8426 			if (--sc->sc_tx_timer[i] == 0) {
8427 				printf("%s: device timeout\n", DEVNAME(sc));
8428 				if (ifp->if_flags & IFF_DEBUG) {
8429 					iwx_nic_error(sc);
8430 					iwx_dump_driver_status(sc);
8431 				}
8432 				if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
8433 					task_add(systq, &sc->init_task);
8434 				ifp->if_oerrors++;
8435 				return;
8436 			}
8437 			ifp->if_timer = 1;
8438 		}
8439 	}
8440 
8441 	ieee80211_watchdog(ifp);
8442 }
8443 
8444 int
8445 iwx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
8446 {
8447 	struct iwx_softc *sc = ifp->if_softc;
8448 	int s, err = 0, generation = sc->sc_generation;
8449 
8450 	/*
8451 	 * Prevent processes from entering this function while another
8452 	 * process is tsleep'ing in it.
8453 	 */
8454 	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
8455 	if (err == 0 && generation != sc->sc_generation) {
8456 		rw_exit(&sc->ioctl_rwl);
8457 		return ENXIO;
8458 	}
8459 	if (err)
8460 		return err;
8461 	s = splnet();
8462 
8463 	switch (cmd) {
8464 	case SIOCSIFADDR:
8465 		ifp->if_flags |= IFF_UP;
8466 		/* FALLTHROUGH */
8467 	case SIOCSIFFLAGS:
8468 		if (ifp->if_flags & IFF_UP) {
8469 			if (!(ifp->if_flags & IFF_RUNNING)) {
8470 				/* Force reload of firmware image from disk. */
8471 				sc->sc_fw.fw_status = IWX_FW_STATUS_NONE;
8472 				err = iwx_init(ifp);
8473 			}
8474 		} else {
8475 			if (ifp->if_flags & IFF_RUNNING)
8476 				iwx_stop(ifp);
8477 		}
8478 		break;
8479 
8480 	default:
8481 		err = ieee80211_ioctl(ifp, cmd, data);
8482 	}
8483 
8484 	if (err == ENETRESET) {
8485 		err = 0;
8486 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
8487 		    (IFF_UP | IFF_RUNNING)) {
8488 			iwx_stop(ifp);
8489 			err = iwx_init(ifp);
8490 		}
8491 	}
8492 
8493 	splx(s);
8494 	rw_exit(&sc->ioctl_rwl);
8495 
8496 	return err;
8497 }
8498 
8499 /*
8500  * Note: This structure is read from the device with IO accesses,
8501  * and the reading already does the endian conversion. As it is
8502  * read with uint32_t-sized accesses, any members with a different size
8503  * need to be ordered correctly though!
8504  */
8505 struct iwx_error_event_table {
8506 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
8507 	uint32_t error_id;		/* type of error */
8508 	uint32_t trm_hw_status0;	/* TRM HW status */
8509 	uint32_t trm_hw_status1;	/* TRM HW status */
8510 	uint32_t blink2;		/* branch link */
8511 	uint32_t ilink1;		/* interrupt link */
8512 	uint32_t ilink2;		/* interrupt link */
8513 	uint32_t data1;		/* error-specific data */
8514 	uint32_t data2;		/* error-specific data */
8515 	uint32_t data3;		/* error-specific data */
8516 	uint32_t bcon_time;		/* beacon timer */
8517 	uint32_t tsf_low;		/* network timestamp function timer */
8518 	uint32_t tsf_hi;		/* network timestamp function timer */
8519 	uint32_t gp1;		/* GP1 timer register */
8520 	uint32_t gp2;		/* GP2 timer register */
8521 	uint32_t fw_rev_type;	/* firmware revision type */
8522 	uint32_t major;		/* uCode version major */
8523 	uint32_t minor;		/* uCode version minor */
8524 	uint32_t hw_ver;		/* HW Silicon version */
8525 	uint32_t brd_ver;		/* HW board version */
8526 	uint32_t log_pc;		/* log program counter */
8527 	uint32_t frame_ptr;		/* frame pointer */
8528 	uint32_t stack_ptr;		/* stack pointer */
8529 	uint32_t hcmd;		/* last host command header */
8530 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
8531 				 * rxtx_flag */
8532 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
8533 				 * host_flag */
8534 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
8535 				 * enc_flag */
8536 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
8537 				 * time_flag */
8538 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
8539 				 * wico interrupt */
8540 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
8541 	uint32_t wait_event;		/* wait event() caller address */
8542 	uint32_t l2p_control;	/* L2pControlField */
8543 	uint32_t l2p_duration;	/* L2pDurationField */
8544 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
8545 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
8546 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
8547 				 * (LMPM_PMG_SEL) */
8548 	uint32_t u_timestamp;	/* indicate when the date and time of the
8549 				 * compilation */
8550 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
8551 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
8552 
8553 /*
8554  * UMAC error struct - relevant starting from family 8000 chip.
8555  * Note: This structure is read from the device with IO accesses,
8556  * and the reading already does the endian conversion. As it is
8557  * read with u32-sized accesses, any members with a different size
8558  * need to be ordered correctly though!
8559  */
8560 struct iwx_umac_error_event_table {
8561 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
8562 	uint32_t error_id;	/* type of error */
8563 	uint32_t blink1;	/* branch link */
8564 	uint32_t blink2;	/* branch link */
8565 	uint32_t ilink1;	/* interrupt link */
8566 	uint32_t ilink2;	/* interrupt link */
8567 	uint32_t data1;		/* error-specific data */
8568 	uint32_t data2;		/* error-specific data */
8569 	uint32_t data3;		/* error-specific data */
8570 	uint32_t umac_major;
8571 	uint32_t umac_minor;
8572 	uint32_t frame_pointer;	/* core register 27*/
8573 	uint32_t stack_pointer;	/* core register 28 */
8574 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
8575 	uint32_t nic_isr_pref;	/* ISR status register */
8576 } __packed;
8577 
8578 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
8579 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
8580 
8581 void
8582 iwx_nic_umac_error(struct iwx_softc *sc)
8583 {
8584 	struct iwx_umac_error_event_table table;
8585 	uint32_t base;
8586 
8587 	base = sc->sc_uc.uc_umac_error_event_table;
8588 
8589 	if (base < 0x800000) {
8590 		printf("%s: Invalid error log pointer 0x%08x\n",
8591 		    DEVNAME(sc), base);
8592 		return;
8593 	}
8594 
8595 	if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8596 		printf("%s: reading errlog failed\n", DEVNAME(sc));
8597 		return;
8598 	}
8599 
8600 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
8601 		printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
8602 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
8603 			sc->sc_flags, table.valid);
8604 	}
8605 
8606 	printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
8607 		iwx_desc_lookup(table.error_id));
8608 	printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
8609 	printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
8610 	printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
8611 	printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
8612 	printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
8613 	printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
8614 	printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
8615 	printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
8616 	printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
8617 	printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
8618 	    table.frame_pointer);
8619 	printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
8620 	    table.stack_pointer);
8621 	printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
8622 	printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
8623 	    table.nic_isr_pref);
8624 }
8625 
8626 #define IWX_FW_SYSASSERT_CPU_MASK 0xf0000000
8627 static struct {
8628 	const char *name;
8629 	uint8_t num;
8630 } advanced_lookup[] = {
8631 	{ "NMI_INTERRUPT_WDG", 0x34 },
8632 	{ "SYSASSERT", 0x35 },
8633 	{ "UCODE_VERSION_MISMATCH", 0x37 },
8634 	{ "BAD_COMMAND", 0x38 },
8635 	{ "BAD_COMMAND", 0x39 },
8636 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
8637 	{ "FATAL_ERROR", 0x3D },
8638 	{ "NMI_TRM_HW_ERR", 0x46 },
8639 	{ "NMI_INTERRUPT_TRM", 0x4C },
8640 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
8641 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
8642 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
8643 	{ "NMI_INTERRUPT_HOST", 0x66 },
8644 	{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
8645 	{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
8646 	{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
8647 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
8648 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
8649 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
8650 	{ "ADVANCED_SYSASSERT", 0 },
8651 };
8652 
8653 const char *
8654 iwx_desc_lookup(uint32_t num)
8655 {
8656 	int i;
8657 
8658 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
8659 		if (advanced_lookup[i].num ==
8660 		    (num & ~IWX_FW_SYSASSERT_CPU_MASK))
8661 			return advanced_lookup[i].name;
8662 
8663 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
8664 	return advanced_lookup[i].name;
8665 }
8666 
8667 /*
8668  * Support for dumping the error log seemed like a good idea ...
8669  * but it's mostly hex junk and the only sensible thing is the
8670  * hw/ucode revision (which we know anyway).  Since it's here,
8671  * I'll just leave it in, just in case e.g. the Intel guys want to
8672  * help us decipher some "ADVANCED_SYSASSERT" later.
8673  */
8674 void
8675 iwx_nic_error(struct iwx_softc *sc)
8676 {
8677 	struct iwx_error_event_table table;
8678 	uint32_t base;
8679 
8680 	printf("%s: dumping device error log\n", DEVNAME(sc));
8681 	base = sc->sc_uc.uc_lmac_error_event_table[0];
8682 	if (base < 0x800000) {
8683 		printf("%s: Invalid error log pointer 0x%08x\n",
8684 		    DEVNAME(sc), base);
8685 		return;
8686 	}
8687 
8688 	if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8689 		printf("%s: reading errlog failed\n", DEVNAME(sc));
8690 		return;
8691 	}
8692 
8693 	if (!table.valid) {
8694 		printf("%s: errlog not found, skipping\n", DEVNAME(sc));
8695 		return;
8696 	}
8697 
8698 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
8699 		printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
8700 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
8701 		    sc->sc_flags, table.valid);
8702 	}
8703 
8704 	printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
8705 	    iwx_desc_lookup(table.error_id));
8706 	printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
8707 	    table.trm_hw_status0);
8708 	printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
8709 	    table.trm_hw_status1);
8710 	printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
8711 	printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
8712 	printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
8713 	printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
8714 	printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
8715 	printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
8716 	printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
8717 	printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
8718 	printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
8719 	printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
8720 	printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
8721 	printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
8722 	    table.fw_rev_type);
8723 	printf("%s: %08X | uCode version major\n", DEVNAME(sc),
8724 	    table.major);
8725 	printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
8726 	    table.minor);
8727 	printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
8728 	printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
8729 	printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
8730 	printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
8731 	printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
8732 	printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
8733 	printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
8734 	printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
8735 	printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
8736 	printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
8737 	printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
8738 	printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
8739 	printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
8740 	printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
8741 	printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
8742 	printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
8743 	printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
8744 
8745 	if (sc->sc_uc.uc_umac_error_event_table)
8746 		iwx_nic_umac_error(sc);
8747 }
8748 
8749 void
8750 iwx_dump_driver_status(struct iwx_softc *sc)
8751 {
8752 	int i;
8753 
8754 	printf("driver status:\n");
8755 	for (i = 0; i < nitems(sc->txq); i++) {
8756 		struct iwx_tx_ring *ring = &sc->txq[i];
8757 		printf("  tx ring %2d: qid=%-2d cur=%-3d "
8758 		    "queued=%-3d\n",
8759 		    i, ring->qid, ring->cur, ring->queued);
8760 	}
8761 	printf("  rx ring: cur=%d\n", sc->rxq.cur);
8762 	printf("  802.11 state %s\n",
8763 	    ieee80211_state_name[sc->sc_ic.ic_state]);
8764 }
8765 
8766 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
8767 do {									\
8768 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
8769 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
8770 	_var_ = (void *)((_pkt_)+1);					\
8771 } while (/*CONSTCOND*/0)
8772 
8773 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
8774 do {									\
8775 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
8776 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
8777 	_ptr_ = (void *)((_pkt_)+1);					\
8778 } while (/*CONSTCOND*/0)
8779 
8780 int
8781 iwx_rx_pkt_valid(struct iwx_rx_packet *pkt)
8782 {
8783 	int qid, idx, code;
8784 
8785 	qid = pkt->hdr.qid & ~0x80;
8786 	idx = pkt->hdr.idx;
8787 	code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
8788 
8789 	return (!(qid == 0 && idx == 0 && code == 0) &&
8790 	    pkt->len_n_flags != htole32(IWX_FH_RSCSR_FRAME_INVALID));
8791 }
8792 
8793 void
8794 iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf_list *ml)
8795 {
8796 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
8797 	struct iwx_rx_packet *pkt, *nextpkt;
8798 	uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
8799 	struct mbuf *m0, *m;
8800 	const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
8801 	int qid, idx, code, handled = 1;
8802 
8803 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,
8804 	    BUS_DMASYNC_POSTREAD);
8805 
8806 	m0 = data->m;
8807 	while (m0 && offset + minsz < IWX_RBUF_SIZE) {
8808 		pkt = (struct iwx_rx_packet *)(m0->m_data + offset);
8809 		qid = pkt->hdr.qid;
8810 		idx = pkt->hdr.idx;
8811 
8812 		code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
8813 
8814 		if (!iwx_rx_pkt_valid(pkt))
8815 			break;
8816 
8817 		/*
8818 		 * XXX Intel inside (tm)
8819 		 * Any commands in the LONG_GROUP could actually be in the
8820 		 * LEGACY group. Firmware API versions >= 50 reject commands
8821 		 * in group 0, forcing us to use this hack.
8822 		 */
8823 		if (iwx_cmd_groupid(code) == IWX_LONG_GROUP) {
8824 			struct iwx_tx_ring *ring = &sc->txq[qid];
8825 			struct iwx_tx_data *txdata = &ring->data[idx];
8826 			if (txdata->flags & IWX_TXDATA_FLAG_CMD_IS_NARROW)
8827 				code = iwx_cmd_opcode(code);
8828 		}
8829 
8830 		len = sizeof(pkt->len_n_flags) + iwx_rx_packet_len(pkt);
8831 		if (len < minsz || len > (IWX_RBUF_SIZE - offset))
8832 			break;
8833 
8834 		if (code == IWX_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
8835 			/* Take mbuf m0 off the RX ring. */
8836 			if (iwx_rx_addbuf(sc, IWX_RBUF_SIZE, sc->rxq.cur)) {
8837 				ifp->if_ierrors++;
8838 				break;
8839 			}
8840 			KASSERT(data->m != m0);
8841 		}
8842 
8843 		switch (code) {
8844 		case IWX_REPLY_RX_PHY_CMD:
8845 			iwx_rx_rx_phy_cmd(sc, pkt, data);
8846 			break;
8847 
8848 		case IWX_REPLY_RX_MPDU_CMD: {
8849 			size_t maxlen = IWX_RBUF_SIZE - offset - minsz;
8850 			nextoff = offset +
8851 			    roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
8852 			nextpkt = (struct iwx_rx_packet *)
8853 			    (m0->m_data + nextoff);
8854 			if (nextoff + minsz >= IWX_RBUF_SIZE ||
8855 			    !iwx_rx_pkt_valid(nextpkt)) {
8856 				/* No need to copy last frame in buffer. */
8857 				if (offset > 0)
8858 					m_adj(m0, offset);
8859 				iwx_rx_mpdu_mq(sc, m0, pkt->data, maxlen, ml);
8860 				m0 = NULL; /* stack owns m0 now; abort loop */
8861 			} else {
8862 				/*
8863 				 * Create an mbuf which points to the current
8864 				 * packet. Always copy from offset zero to
8865 				 * preserve m_pkthdr.
8866 				 */
8867 				m = m_copym(m0, 0, M_COPYALL, M_DONTWAIT);
8868 				if (m == NULL) {
8869 					ifp->if_ierrors++;
8870 					m_freem(m0);
8871 					m0 = NULL;
8872 					break;
8873 				}
8874 				m_adj(m, offset);
8875 				iwx_rx_mpdu_mq(sc, m, pkt->data, maxlen, ml);
8876 			}
8877  			break;
8878 		}
8879 
8880 		case IWX_BAR_FRAME_RELEASE:
8881 			iwx_rx_bar_frame_release(sc, pkt, ml);
8882 			break;
8883 
8884 		case IWX_TX_CMD:
8885 			iwx_rx_tx_cmd(sc, pkt, data);
8886 			break;
8887 
8888 		case IWX_BA_NOTIF:
8889 			iwx_rx_compressed_ba(sc, pkt);
8890 			break;
8891 
8892 		case IWX_MISSED_BEACONS_NOTIFICATION:
8893 			iwx_rx_bmiss(sc, pkt, data);
8894 			break;
8895 
8896 		case IWX_MFUART_LOAD_NOTIFICATION:
8897 			break;
8898 
8899 		case IWX_ALIVE: {
8900 			struct iwx_alive_resp_v4 *resp4;
8901 			struct iwx_alive_resp_v5 *resp5;
8902 
8903 			DPRINTF(("%s: firmware alive\n", __func__));
8904 			sc->sc_uc.uc_ok = 0;
8905 
8906 			/*
8907 			 * For v5 and above, we can check the version, for older
8908 			 * versions we need to check the size.
8909 			 */
8910 			 if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
8911 			    IWX_ALIVE) == 5) {
8912 				SYNC_RESP_STRUCT(resp5, pkt);
8913 				if (iwx_rx_packet_payload_len(pkt) !=
8914 				    sizeof(*resp5)) {
8915 					sc->sc_uc.uc_intr = 1;
8916 					wakeup(&sc->sc_uc);
8917 					break;
8918 				}
8919 				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
8920 				    resp5->lmac_data[0].dbg_ptrs.error_event_table_ptr);
8921 				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
8922 				    resp5->lmac_data[1].dbg_ptrs.error_event_table_ptr);
8923 				sc->sc_uc.uc_log_event_table = le32toh(
8924 				    resp5->lmac_data[0].dbg_ptrs.log_event_table_ptr);
8925 				sc->sc_uc.uc_umac_error_event_table = le32toh(
8926 				    resp5->umac_data.dbg_ptrs.error_info_addr);
8927 				if (resp5->status == IWX_ALIVE_STATUS_OK)
8928 					sc->sc_uc.uc_ok = 1;
8929 			} else if (iwx_rx_packet_payload_len(pkt) == sizeof(*resp4)) {
8930 				SYNC_RESP_STRUCT(resp4, pkt);
8931 				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
8932 				    resp4->lmac_data[0].dbg_ptrs.error_event_table_ptr);
8933 				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
8934 				    resp4->lmac_data[1].dbg_ptrs.error_event_table_ptr);
8935 				sc->sc_uc.uc_log_event_table = le32toh(
8936 				    resp4->lmac_data[0].dbg_ptrs.log_event_table_ptr);
8937 				sc->sc_uc.uc_umac_error_event_table = le32toh(
8938 				    resp4->umac_data.dbg_ptrs.error_info_addr);
8939 				if (resp4->status == IWX_ALIVE_STATUS_OK)
8940 					sc->sc_uc.uc_ok = 1;
8941 			}
8942 
8943 			sc->sc_uc.uc_intr = 1;
8944 			wakeup(&sc->sc_uc);
8945 			break;
8946 		}
8947 
8948 		case IWX_STATISTICS_NOTIFICATION: {
8949 			struct iwx_notif_statistics *stats;
8950 			SYNC_RESP_STRUCT(stats, pkt);
8951 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
8952 			sc->sc_noise = iwx_get_noise(&stats->rx.general);
8953 			break;
8954 		}
8955 
8956 		case IWX_DTS_MEASUREMENT_NOTIFICATION:
8957 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
8958 				 IWX_DTS_MEASUREMENT_NOTIF_WIDE):
8959 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
8960 				 IWX_TEMP_REPORTING_THRESHOLDS_CMD):
8961 			break;
8962 
8963 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
8964 		    IWX_CT_KILL_NOTIFICATION): {
8965 			struct iwx_ct_kill_notif *notif;
8966 			SYNC_RESP_STRUCT(notif, pkt);
8967 			printf("%s: device at critical temperature (%u degC), "
8968 			    "stopping device\n",
8969 			    DEVNAME(sc), le16toh(notif->temperature));
8970 			sc->sc_flags |= IWX_FLAG_HW_ERR;
8971 			task_add(systq, &sc->init_task);
8972 			break;
8973 		}
8974 
8975 		case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
8976 		    IWX_SESSION_PROTECTION_CMD):
8977 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
8978 		    IWX_NVM_GET_INFO):
8979 		case IWX_ADD_STA_KEY:
8980 		case IWX_PHY_CONFIGURATION_CMD:
8981 		case IWX_TX_ANT_CONFIGURATION_CMD:
8982 		case IWX_ADD_STA:
8983 		case IWX_MAC_CONTEXT_CMD:
8984 		case IWX_REPLY_SF_CFG_CMD:
8985 		case IWX_POWER_TABLE_CMD:
8986 		case IWX_LTR_CONFIG:
8987 		case IWX_PHY_CONTEXT_CMD:
8988 		case IWX_BINDING_CONTEXT_CMD:
8989 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_CFG_CMD):
8990 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_REQ_UMAC):
8991 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC):
8992 		case IWX_REPLY_BEACON_FILTERING_CMD:
8993 		case IWX_MAC_PM_POWER_TABLE:
8994 		case IWX_TIME_QUOTA_CMD:
8995 		case IWX_REMOVE_STA:
8996 		case IWX_TXPATH_FLUSH:
8997 		case IWX_BT_CONFIG:
8998 		case IWX_MCC_UPDATE_CMD:
8999 		case IWX_TIME_EVENT_CMD:
9000 		case IWX_STATISTICS_CMD:
9001 		case IWX_SCD_QUEUE_CFG: {
9002 			size_t pkt_len;
9003 
9004 			if (sc->sc_cmd_resp_pkt[idx] == NULL)
9005 				break;
9006 
9007 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
9008 			    sizeof(*pkt), BUS_DMASYNC_POSTREAD);
9009 
9010 			pkt_len = sizeof(pkt->len_n_flags) +
9011 			    iwx_rx_packet_len(pkt);
9012 
9013 			if ((pkt->hdr.flags & IWX_CMD_FAILED_MSK) ||
9014 			    pkt_len < sizeof(*pkt) ||
9015 			    pkt_len > sc->sc_cmd_resp_len[idx]) {
9016 				free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
9017 				    sc->sc_cmd_resp_len[idx]);
9018 				sc->sc_cmd_resp_pkt[idx] = NULL;
9019 				break;
9020 			}
9021 
9022 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
9023 			    pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD);
9024 			memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
9025 			break;
9026 		}
9027 
9028 		case IWX_INIT_COMPLETE_NOTIF:
9029 			sc->sc_init_complete |= IWX_INIT_COMPLETE;
9030 			wakeup(&sc->sc_init_complete);
9031 			break;
9032 
9033 		case IWX_SCAN_COMPLETE_UMAC: {
9034 			struct iwx_umac_scan_complete *notif;
9035 			SYNC_RESP_STRUCT(notif, pkt);
9036 			iwx_endscan(sc);
9037 			break;
9038 		}
9039 
9040 		case IWX_SCAN_ITERATION_COMPLETE_UMAC: {
9041 			struct iwx_umac_scan_iter_complete_notif *notif;
9042 			SYNC_RESP_STRUCT(notif, pkt);
9043 			iwx_endscan(sc);
9044 			break;
9045 		}
9046 
9047 		case IWX_MCC_CHUB_UPDATE_CMD: {
9048 			struct iwx_mcc_chub_notif *notif;
9049 			SYNC_RESP_STRUCT(notif, pkt);
9050 			iwx_mcc_update(sc, notif);
9051 			break;
9052 		}
9053 
9054 		case IWX_REPLY_ERROR: {
9055 			struct iwx_error_resp *resp;
9056 			SYNC_RESP_STRUCT(resp, pkt);
9057 			printf("%s: firmware error 0x%x, cmd 0x%x\n",
9058 				DEVNAME(sc), le32toh(resp->error_type),
9059 				resp->cmd_id);
9060 			break;
9061 		}
9062 
9063 		case IWX_TIME_EVENT_NOTIFICATION: {
9064 			struct iwx_time_event_notif *notif;
9065 			uint32_t action;
9066 			SYNC_RESP_STRUCT(notif, pkt);
9067 
9068 			if (sc->sc_time_event_uid != le32toh(notif->unique_id))
9069 				break;
9070 			action = le32toh(notif->action);
9071 			if (action & IWX_TE_V2_NOTIF_HOST_EVENT_END)
9072 				sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
9073 			break;
9074 		}
9075 
9076 		case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
9077 		    IWX_SESSION_PROTECTION_NOTIF):
9078 			break;
9079 
9080 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP,
9081 		    IWX_FSEQ_VER_MISMATCH_NOTIFICATION):
9082 		    break;
9083 
9084 		/*
9085 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
9086 		 * messages. Just ignore them for now.
9087 		 */
9088 		case IWX_DEBUG_LOG_MSG:
9089 			break;
9090 
9091 		case IWX_MCAST_FILTER_CMD:
9092 			break;
9093 
9094 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_DQA_ENABLE_CMD):
9095 			break;
9096 
9097 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_SOC_CONFIGURATION_CMD):
9098 			break;
9099 
9100 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_INIT_EXTENDED_CFG_CMD):
9101 			break;
9102 
9103 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9104 		    IWX_NVM_ACCESS_COMPLETE):
9105 			break;
9106 
9107 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RX_NO_DATA_NOTIF):
9108 			break; /* happens in monitor mode; ignore for now */
9109 
9110 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_TLC_MNG_CONFIG_CMD):
9111 			break;
9112 
9113 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9114 		    IWX_TLC_MNG_UPDATE_NOTIF): {
9115 			struct iwx_tlc_update_notif *notif;
9116 			SYNC_RESP_STRUCT(notif, pkt);
9117 			if (iwx_rx_packet_payload_len(pkt) == sizeof(*notif))
9118 				iwx_rs_update(sc, notif);
9119 			break;
9120 		}
9121 
9122 		default:
9123 			handled = 0;
9124 			printf("%s: unhandled firmware response 0x%x/0x%x "
9125 			    "rx ring %d[%d]\n",
9126 			    DEVNAME(sc), code, pkt->len_n_flags,
9127 			    (qid & ~0x80), idx);
9128 			break;
9129 		}
9130 
9131 		/*
9132 		 * uCode sets bit 0x80 when it originates the notification,
9133 		 * i.e. when the notification is not a direct response to a
9134 		 * command sent by the driver.
9135 		 * For example, uCode issues IWX_REPLY_RX when it sends a
9136 		 * received frame to the driver.
9137 		 */
9138 		if (handled && !(qid & (1 << 7))) {
9139 			iwx_cmd_done(sc, qid, idx, code);
9140 		}
9141 
9142 		offset += roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
9143 	}
9144 
9145 	if (m0 && m0 != data->m)
9146 		m_freem(m0);
9147 }
9148 
9149 void
9150 iwx_notif_intr(struct iwx_softc *sc)
9151 {
9152 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
9153 	uint16_t hw;
9154 
9155 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
9156 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
9157 
9158 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
9159 	hw &= (IWX_RX_MQ_RING_COUNT - 1);
9160 	while (sc->rxq.cur != hw) {
9161 		struct iwx_rx_data *data = &sc->rxq.data[sc->rxq.cur];
9162 		iwx_rx_pkt(sc, data, &ml);
9163 		sc->rxq.cur = (sc->rxq.cur + 1) % IWX_RX_MQ_RING_COUNT;
9164 	}
9165 	if_input(&sc->sc_ic.ic_if, &ml);
9166 
9167 	/*
9168 	 * Tell the firmware what we have processed.
9169 	 * Seems like the hardware gets upset unless we align the write by 8??
9170 	 */
9171 	hw = (hw == 0) ? IWX_RX_MQ_RING_COUNT - 1 : hw - 1;
9172 	IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, hw & ~7);
9173 }
9174 
9175 int
9176 iwx_intr(void *arg)
9177 {
9178 	struct iwx_softc *sc = arg;
9179 	struct ieee80211com *ic = &sc->sc_ic;
9180 	struct ifnet *ifp = IC2IFP(ic);
9181 	int handled = 0;
9182 	int r1, r2, rv = 0;
9183 
9184 	IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
9185 
9186 	if (sc->sc_flags & IWX_FLAG_USE_ICT) {
9187 		uint32_t *ict = sc->ict_dma.vaddr;
9188 		int tmp;
9189 
9190 		tmp = htole32(ict[sc->ict_cur]);
9191 		if (!tmp)
9192 			goto out_ena;
9193 
9194 		/*
9195 		 * ok, there was something.  keep plowing until we have all.
9196 		 */
9197 		r1 = r2 = 0;
9198 		while (tmp) {
9199 			r1 |= tmp;
9200 			ict[sc->ict_cur] = 0;
9201 			sc->ict_cur = (sc->ict_cur+1) % IWX_ICT_COUNT;
9202 			tmp = htole32(ict[sc->ict_cur]);
9203 		}
9204 
9205 		/* this is where the fun begins.  don't ask */
9206 		if (r1 == 0xffffffff)
9207 			r1 = 0;
9208 
9209 		/* i am not expected to understand this */
9210 		if (r1 & 0xc0000)
9211 			r1 |= 0x8000;
9212 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
9213 	} else {
9214 		r1 = IWX_READ(sc, IWX_CSR_INT);
9215 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
9216 			goto out;
9217 		r2 = IWX_READ(sc, IWX_CSR_FH_INT_STATUS);
9218 	}
9219 	if (r1 == 0 && r2 == 0) {
9220 		goto out_ena;
9221 	}
9222 
9223 	IWX_WRITE(sc, IWX_CSR_INT, r1 | ~sc->sc_intmask);
9224 
9225 	if (r1 & IWX_CSR_INT_BIT_ALIVE) {
9226 		int i;
9227 
9228 		/* Firmware has now configured the RFH. */
9229 		for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
9230 			iwx_update_rx_desc(sc, &sc->rxq, i);
9231 		IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
9232 	}
9233 
9234 	handled |= (r1 & (IWX_CSR_INT_BIT_ALIVE /*| IWX_CSR_INT_BIT_SCD*/));
9235 
9236 	if (r1 & IWX_CSR_INT_BIT_RF_KILL) {
9237 		handled |= IWX_CSR_INT_BIT_RF_KILL;
9238 		iwx_check_rfkill(sc);
9239 		task_add(systq, &sc->init_task);
9240 		rv = 1;
9241 		goto out_ena;
9242 	}
9243 
9244 	if (r1 & IWX_CSR_INT_BIT_SW_ERR) {
9245 		if (ifp->if_flags & IFF_DEBUG) {
9246 			iwx_nic_error(sc);
9247 			iwx_dump_driver_status(sc);
9248 		}
9249 		printf("%s: fatal firmware error\n", DEVNAME(sc));
9250 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
9251 			task_add(systq, &sc->init_task);
9252 		rv = 1;
9253 		goto out;
9254 
9255 	}
9256 
9257 	if (r1 & IWX_CSR_INT_BIT_HW_ERR) {
9258 		handled |= IWX_CSR_INT_BIT_HW_ERR;
9259 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
9260 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
9261 			sc->sc_flags |= IWX_FLAG_HW_ERR;
9262 			task_add(systq, &sc->init_task);
9263 		}
9264 		rv = 1;
9265 		goto out;
9266 	}
9267 
9268 	/* firmware chunk loaded */
9269 	if (r1 & IWX_CSR_INT_BIT_FH_TX) {
9270 		IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_TX_MASK);
9271 		handled |= IWX_CSR_INT_BIT_FH_TX;
9272 
9273 		sc->sc_fw_chunk_done = 1;
9274 		wakeup(&sc->sc_fw);
9275 	}
9276 
9277 	if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX |
9278 	    IWX_CSR_INT_BIT_RX_PERIODIC)) {
9279 		if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX)) {
9280 			handled |= (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX);
9281 			IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_RX_MASK);
9282 		}
9283 		if (r1 & IWX_CSR_INT_BIT_RX_PERIODIC) {
9284 			handled |= IWX_CSR_INT_BIT_RX_PERIODIC;
9285 			IWX_WRITE(sc, IWX_CSR_INT, IWX_CSR_INT_BIT_RX_PERIODIC);
9286 		}
9287 
9288 		/* Disable periodic interrupt; we use it as just a one-shot. */
9289 		IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG, IWX_CSR_INT_PERIODIC_DIS);
9290 
9291 		/*
9292 		 * Enable periodic interrupt in 8 msec only if we received
9293 		 * real RX interrupt (instead of just periodic int), to catch
9294 		 * any dangling Rx interrupt.  If it was just the periodic
9295 		 * interrupt, there was no dangling Rx activity, and no need
9296 		 * to extend the periodic interrupt; one-shot is enough.
9297 		 */
9298 		if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX))
9299 			IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG,
9300 			    IWX_CSR_INT_PERIODIC_ENA);
9301 
9302 		iwx_notif_intr(sc);
9303 	}
9304 
9305 	rv = 1;
9306 
9307  out_ena:
9308 	iwx_restore_interrupts(sc);
9309  out:
9310 	return rv;
9311 }
9312 
9313 int
9314 iwx_intr_msix(void *arg)
9315 {
9316 	struct iwx_softc *sc = arg;
9317 	struct ieee80211com *ic = &sc->sc_ic;
9318 	struct ifnet *ifp = IC2IFP(ic);
9319 	uint32_t inta_fh, inta_hw;
9320 	int vector = 0;
9321 
9322 	inta_fh = IWX_READ(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD);
9323 	inta_hw = IWX_READ(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD);
9324 	IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
9325 	IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
9326 	inta_fh &= sc->sc_fh_mask;
9327 	inta_hw &= sc->sc_hw_mask;
9328 
9329 	if (inta_fh & IWX_MSIX_FH_INT_CAUSES_Q0 ||
9330 	    inta_fh & IWX_MSIX_FH_INT_CAUSES_Q1) {
9331 		iwx_notif_intr(sc);
9332 	}
9333 
9334 	/* firmware chunk loaded */
9335 	if (inta_fh & IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
9336 		sc->sc_fw_chunk_done = 1;
9337 		wakeup(&sc->sc_fw);
9338 	}
9339 
9340 	if ((inta_fh & IWX_MSIX_FH_INT_CAUSES_FH_ERR) ||
9341 	    (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
9342 	    (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
9343 		if (ifp->if_flags & IFF_DEBUG) {
9344 			iwx_nic_error(sc);
9345 			iwx_dump_driver_status(sc);
9346 		}
9347 		printf("%s: fatal firmware error\n", DEVNAME(sc));
9348 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
9349 			task_add(systq, &sc->init_task);
9350 		return 1;
9351 	}
9352 
9353 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
9354 		iwx_check_rfkill(sc);
9355 		task_add(systq, &sc->init_task);
9356 	}
9357 
9358 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
9359 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
9360 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
9361 			sc->sc_flags |= IWX_FLAG_HW_ERR;
9362 			task_add(systq, &sc->init_task);
9363 		}
9364 		return 1;
9365 	}
9366 
9367 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_ALIVE) {
9368 		int i;
9369 
9370 		/* Firmware has now configured the RFH. */
9371 		for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
9372 			iwx_update_rx_desc(sc, &sc->rxq, i);
9373 		IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
9374 	}
9375 
9376 	/*
9377 	 * Before sending the interrupt the HW disables it to prevent
9378 	 * a nested interrupt. This is done by writing 1 to the corresponding
9379 	 * bit in the mask register. After handling the interrupt, it should be
9380 	 * re-enabled by clearing this bit. This register is defined as
9381 	 * write 1 clear (W1C) register, meaning that it's being clear
9382 	 * by writing 1 to the bit.
9383 	 */
9384 	IWX_WRITE(sc, IWX_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
9385 	return 1;
9386 }
9387 
9388 typedef void *iwx_match_t;
9389 
9390 static const struct pci_matchid iwx_devices[] = {
9391 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_1 },
9392 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_2 },
9393 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_3 },
9394 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_4,},
9395 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_5,},
9396 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_6,},
9397 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_7,},
9398 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_8,},
9399 };
9400 
9401 
9402 int
9403 iwx_match(struct device *parent, iwx_match_t match __unused, void *aux)
9404 {
9405 	struct pci_attach_args *pa = aux;
9406 	return pci_matchbyid(pa, iwx_devices, nitems(iwx_devices));
9407 }
9408 
9409 /*
9410  * The device info table below contains device-specific config overrides.
9411  * The most important parameter derived from this table is the name of the
9412  * firmware image to load.
9413  *
9414  * The Linux iwlwifi driver uses an "old" and a "new" device info table.
9415  * The "old" table matches devices based on PCI vendor/product IDs only.
9416  * The "new" table extends this with various device parameters derived
9417  * from MAC type, RF type, and PCI subdevice ID.
9418  *
9419  * In iwlwifi "old" and "new" tables share the same array, where "old"
9420  * entries contain dummy values for data defined only for "new" entries.
9421  * As of 2022, Linux developers are still in the process of moving entries
9422  * from "old" to "new" style and it looks like this effort has stalled in
9423  * in some work-in-progress state for quite a while. Linux commits moving
9424  * entries from "old" to "new" have at times been reverted due to regressions.
9425  * Part of this complexity comes from iwlwifi supporting both iwm(4) and iwx(4)
9426  * devices in the same driver.
9427  *
9428  * We try to avoid this mess while still recognizing supported iwx(4) devices
9429  * correctly. Our table below contains only "new" entries declared in iwlwifi
9430  * with the _IWL_DEV_INFO() macro (with a leading underscore).
9431  * Other devices are matched based on PCI vendor/product ID as usual.
9432  */
9433 
9434 struct iwx_dev_info {
9435 	uint16_t device;
9436 	uint16_t subdevice;
9437 	uint16_t mac_type;
9438 	uint16_t rf_type;
9439 	uint8_t mac_step;
9440 	uint8_t rf_id;
9441 	uint8_t no_160;
9442 	uint8_t cores;
9443 	uint8_t cdb;
9444 	uint8_t jacket;
9445 	const struct iwx_device_cfg *cfg;
9446 };
9447 
9448 #define _IWX_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
9449 		      _rf_id, _no_160, _cores, _cdb, _jacket, _cfg) \
9450 	{ .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg),  \
9451 	  .mac_type = _mac_type, .rf_type = _rf_type,	   \
9452 	  .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id,		   \
9453 	  .mac_step = _mac_step, .cdb = _cdb, .jacket = _jacket }
9454 
9455 /*
9456  * When adding entries to this table keep in mind that entries must
9457  * be listed in the same order as in the Linux driver. Code walks this
9458  * table backwards and uses the first matching entry it finds.
9459  * Device firmware must be available in fw_update(8).
9460  */
9461 static const struct iwx_dev_info iwx_dev_info_table[] = {
9462 	/* Qu with Jf, C step */
9463 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9464 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9465 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9466 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9467 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9461_160 */
9468 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9469 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9470 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9471 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9472 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* iwl9461 */
9473 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9474 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9475 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9476 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9477 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462_160 */
9478 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9479 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9480 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9481 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9482 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462 */
9483 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9484 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9485 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9486 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9487 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560_160 */
9488 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9489 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9490 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9491 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9492 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560 */
9493 	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
9494 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9495 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9496 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9497 		      IWX_CFG_ANY,
9498 		      iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550s */
9499 	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
9500 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9501 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9502 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9503 		      IWX_CFG_ANY,
9504 		      iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550i */
9505 
9506 	/* QuZ with Jf */
9507 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9508 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9509 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9510 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9511 		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461_160 */
9512 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9513 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9514 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9515 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9516 		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461 */
9517 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9518 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9519 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9520 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9521 		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462_160 */
9522 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9523 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9524 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9525 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9526 		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462 */
9527 	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
9528 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9529 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9530 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9531 		      IWX_CFG_ANY,
9532 		      iwx_9560_quz_a0_jf_b0_cfg), /* killer_1550s */
9533 	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
9534 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9535 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9536 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9537 		      IWX_CFG_ANY,
9538 		      iwx_9560_quz_a0_jf_b0_cfg), /* 9560_killer_1550i */
9539 
9540 	/* Qu with Hr, B step */
9541 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9542 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
9543 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9544 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9545 		      iwx_qu_b0_hr1_b0), /* AX101 */
9546 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9547 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
9548 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9549 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9550 		      iwx_qu_b0_hr_b0), /* AX203 */
9551 
9552 	/* Qu with Hr, C step */
9553 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9554 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9555 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9556 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9557 		      iwx_qu_c0_hr1_b0), /* AX101 */
9558 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9559 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9560 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9561 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9562 		      iwx_qu_c0_hr_b0), /* AX203 */
9563 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9564 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9565 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9566 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9567 		      iwx_qu_c0_hr_b0), /* AX201 */
9568 
9569 	/* QuZ with Hr */
9570 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9571 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9572 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9573 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9574 		      iwx_quz_a0_hr1_b0), /* AX101 */
9575 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9576 		      IWX_CFG_MAC_TYPE_QUZ, IWX_SILICON_B_STEP,
9577 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9578 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9579 		      iwx_cfg_quz_a0_hr_b0), /* AX203 */
9580 };
9581 
9582 int
9583 iwx_preinit(struct iwx_softc *sc)
9584 {
9585 	struct ieee80211com *ic = &sc->sc_ic;
9586 	struct ifnet *ifp = IC2IFP(ic);
9587 	int err;
9588 
9589 	err = iwx_prepare_card_hw(sc);
9590 	if (err) {
9591 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
9592 		return err;
9593 	}
9594 
9595 	if (sc->attached) {
9596 		/* Update MAC in case the upper layers changed it. */
9597 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
9598 		    ((struct arpcom *)ifp)->ac_enaddr);
9599 		return 0;
9600 	}
9601 
9602 	err = iwx_start_hw(sc);
9603 	if (err) {
9604 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
9605 		return err;
9606 	}
9607 
9608 	err = iwx_run_init_mvm_ucode(sc, 1);
9609 	iwx_stop_device(sc);
9610 	if (err)
9611 		return err;
9612 
9613 	/* Print version info and MAC address on first successful fw load. */
9614 	sc->attached = 1;
9615 	printf("%s: hw rev 0x%x, fw ver %s, address %s\n",
9616 	    DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
9617 	    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
9618 
9619 	if (sc->sc_nvm.sku_cap_11n_enable)
9620 		iwx_setup_ht_rates(sc);
9621 	if (sc->sc_nvm.sku_cap_11ac_enable)
9622 		iwx_setup_vht_rates(sc);
9623 
9624 	/* not all hardware can do 5GHz band */
9625 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
9626 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
9627 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
9628 
9629 	/* Configure channel information obtained from firmware. */
9630 	ieee80211_channel_init(ifp);
9631 
9632 	/* Configure MAC address. */
9633 	err = if_setlladdr(ifp, ic->ic_myaddr);
9634 	if (err)
9635 		printf("%s: could not set MAC address (error %d)\n",
9636 		    DEVNAME(sc), err);
9637 
9638 	ieee80211_media_init(ifp, iwx_media_change, ieee80211_media_status);
9639 
9640 	return 0;
9641 }
9642 
9643 void
9644 iwx_attach_hook(struct device *self)
9645 {
9646 	struct iwx_softc *sc = (void *)self;
9647 
9648 	KASSERT(!cold);
9649 
9650 	iwx_preinit(sc);
9651 }
9652 
9653 const struct iwx_device_cfg *
9654 iwx_find_device_cfg(struct iwx_softc *sc)
9655 {
9656 	pcireg_t sreg;
9657 	pci_product_id_t sdev_id;
9658 	uint16_t mac_type, rf_type;
9659 	uint8_t mac_step, cdb, jacket, rf_id, no_160, cores;
9660 	int i;
9661 
9662 	sreg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_SUBSYS_ID_REG);
9663 	sdev_id = PCI_PRODUCT(sreg);
9664 	mac_type = IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev);
9665 	mac_step = IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2);
9666 	rf_type = IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id);
9667 	cdb = IWX_CSR_HW_RFID_IS_CDB(sc->sc_hw_rf_id);
9668 	jacket = IWX_CSR_HW_RFID_IS_JACKET(sc->sc_hw_rf_id);
9669 
9670 	rf_id = IWX_SUBDEVICE_RF_ID(sdev_id);
9671 	no_160 = IWX_SUBDEVICE_NO_160(sdev_id);
9672 	cores = IWX_SUBDEVICE_CORES(sdev_id);
9673 
9674 	for (i = nitems(iwx_dev_info_table) - 1; i >= 0; i--) {
9675 		 const struct iwx_dev_info *dev_info = &iwx_dev_info_table[i];
9676 
9677 		if (dev_info->device != (uint16_t)IWX_CFG_ANY &&
9678 		    dev_info->device != sc->sc_pid)
9679 			continue;
9680 
9681 		if (dev_info->subdevice != (uint16_t)IWX_CFG_ANY &&
9682 		    dev_info->subdevice != sdev_id)
9683 			continue;
9684 
9685 		if (dev_info->mac_type != (uint16_t)IWX_CFG_ANY &&
9686 		    dev_info->mac_type != mac_type)
9687 			continue;
9688 
9689 		if (dev_info->mac_step != (uint8_t)IWX_CFG_ANY &&
9690 		    dev_info->mac_step != mac_step)
9691 			continue;
9692 
9693 		if (dev_info->rf_type != (uint16_t)IWX_CFG_ANY &&
9694 		    dev_info->rf_type != rf_type)
9695 			continue;
9696 
9697 		if (dev_info->cdb != (uint8_t)IWX_CFG_ANY &&
9698 		    dev_info->cdb != cdb)
9699 			continue;
9700 
9701 		if (dev_info->jacket != (uint8_t)IWX_CFG_ANY &&
9702 		    dev_info->jacket != jacket)
9703 			continue;
9704 
9705 		if (dev_info->rf_id != (uint8_t)IWX_CFG_ANY &&
9706 		    dev_info->rf_id != rf_id)
9707 			continue;
9708 
9709 		if (dev_info->no_160 != (uint8_t)IWX_CFG_ANY &&
9710 		    dev_info->no_160 != no_160)
9711 			continue;
9712 
9713 		if (dev_info->cores != (uint8_t)IWX_CFG_ANY &&
9714 		    dev_info->cores != cores)
9715 			continue;
9716 
9717 		return dev_info->cfg;
9718 	}
9719 
9720 	return NULL;
9721 }
9722 
9723 
9724 void
9725 iwx_attach(struct device *parent, struct device *self, void *aux)
9726 {
9727 	struct iwx_softc *sc = (void *)self;
9728 	struct pci_attach_args *pa = aux;
9729 	pci_intr_handle_t ih;
9730 	pcireg_t reg, memtype;
9731 	struct ieee80211com *ic = &sc->sc_ic;
9732 	struct ifnet *ifp = &ic->ic_if;
9733 	const char *intrstr;
9734 	const struct iwx_device_cfg *cfg;
9735 	int err;
9736 	int txq_i, i, j;
9737 
9738 	sc->sc_pid = pa->pa_id;
9739 	sc->sc_pct = pa->pa_pc;
9740 	sc->sc_pcitag = pa->pa_tag;
9741 	sc->sc_dmat = pa->pa_dmat;
9742 
9743 	rw_init(&sc->ioctl_rwl, "iwxioctl");
9744 
9745 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
9746 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
9747 	if (err == 0) {
9748 		printf("%s: PCIe capability structure not found!\n",
9749 		    DEVNAME(sc));
9750 		return;
9751 	}
9752 
9753 	/*
9754 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
9755 	 * PCI Tx retries from interfering with C3 CPU state.
9756 	 */
9757 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
9758 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
9759 
9760 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
9761 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
9762 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
9763 	if (err) {
9764 		printf("%s: can't map mem space\n", DEVNAME(sc));
9765 		return;
9766 	}
9767 
9768 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
9769 		sc->sc_msix = 1;
9770 	} else if (pci_intr_map_msi(pa, &ih)) {
9771 		if (pci_intr_map(pa, &ih)) {
9772 			printf("%s: can't map interrupt\n", DEVNAME(sc));
9773 			return;
9774 		}
9775 		/* Hardware bug workaround. */
9776 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
9777 		    PCI_COMMAND_STATUS_REG);
9778 		if (reg & PCI_COMMAND_INTERRUPT_DISABLE)
9779 			reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
9780 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
9781 		    PCI_COMMAND_STATUS_REG, reg);
9782 	}
9783 
9784 	intrstr = pci_intr_string(sc->sc_pct, ih);
9785 	if (sc->sc_msix)
9786 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
9787 		    iwx_intr_msix, sc, DEVNAME(sc));
9788 	else
9789 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
9790 		    iwx_intr, sc, DEVNAME(sc));
9791 
9792 	if (sc->sc_ih == NULL) {
9793 		printf("\n");
9794 		printf("%s: can't establish interrupt", DEVNAME(sc));
9795 		if (intrstr != NULL)
9796 			printf(" at %s", intrstr);
9797 		printf("\n");
9798 		return;
9799 	}
9800 	printf(", %s\n", intrstr);
9801 
9802 	/* Clear pending interrupts. */
9803 	IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
9804 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
9805 	IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
9806 
9807 	sc->sc_hw_rev = IWX_READ(sc, IWX_CSR_HW_REV);
9808 	sc->sc_hw_rf_id = IWX_READ(sc, IWX_CSR_HW_RF_ID);
9809 
9810 	/*
9811 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
9812 	 * changed, and now the revision step also includes bit 0-1 (no more
9813 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
9814 	 * in the old format.
9815 	 */
9816 	sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
9817 			(IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
9818 
9819 	switch (PCI_PRODUCT(pa->pa_id)) {
9820 	case PCI_PRODUCT_INTEL_WL_22500_1:
9821 		sc->sc_fwname = IWX_CC_A_FW;
9822 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
9823 		sc->sc_integrated = 0;
9824 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
9825 		sc->sc_low_latency_xtal = 0;
9826 		sc->sc_xtal_latency = 0;
9827 		sc->sc_tx_with_siso_diversity = 0;
9828 		sc->sc_uhb_supported = 0;
9829 		break;
9830 	case PCI_PRODUCT_INTEL_WL_22500_2:
9831 	case PCI_PRODUCT_INTEL_WL_22500_5:
9832 		/* These devices should be QuZ only. */
9833 		if (sc->sc_hw_rev != IWX_CSR_HW_REV_TYPE_QUZ) {
9834 			printf("%s: unsupported AX201 adapter\n", DEVNAME(sc));
9835 			return;
9836 		}
9837 		sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
9838 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
9839 		sc->sc_integrated = 1;
9840 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
9841 		sc->sc_low_latency_xtal = 0;
9842 		sc->sc_xtal_latency = 500;
9843 		sc->sc_tx_with_siso_diversity = 0;
9844 		sc->sc_uhb_supported = 0;
9845 		break;
9846 	case PCI_PRODUCT_INTEL_WL_22500_3:
9847 		if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
9848 			sc->sc_fwname = IWX_QU_C_HR_B_FW;
9849 		else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
9850 			sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
9851 		else
9852 			sc->sc_fwname = IWX_QU_B_HR_B_FW;
9853 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
9854 		sc->sc_integrated = 1;
9855 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
9856 		sc->sc_low_latency_xtal = 0;
9857 		sc->sc_xtal_latency = 500;
9858 		sc->sc_tx_with_siso_diversity = 0;
9859 		sc->sc_uhb_supported = 0;
9860 		break;
9861 	case PCI_PRODUCT_INTEL_WL_22500_4:
9862 	case PCI_PRODUCT_INTEL_WL_22500_7:
9863 	case PCI_PRODUCT_INTEL_WL_22500_8:
9864 		if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
9865 			sc->sc_fwname = IWX_QU_C_HR_B_FW;
9866 		else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
9867 			sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
9868 		else
9869 			sc->sc_fwname = IWX_QU_B_HR_B_FW;
9870 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
9871 		sc->sc_integrated = 1;
9872 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_1820;
9873 		sc->sc_low_latency_xtal = 0;
9874 		sc->sc_xtal_latency = 1820;
9875 		sc->sc_tx_with_siso_diversity = 0;
9876 		sc->sc_uhb_supported = 0;
9877 		break;
9878 	case PCI_PRODUCT_INTEL_WL_22500_6:
9879 		if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
9880 			sc->sc_fwname = IWX_QU_C_HR_B_FW;
9881 		else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
9882 			sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
9883 		else
9884 			sc->sc_fwname = IWX_QU_B_HR_B_FW;
9885 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
9886 		sc->sc_integrated = 1;
9887 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
9888 		sc->sc_low_latency_xtal = 1;
9889 		sc->sc_xtal_latency = 12000;
9890 		sc->sc_tx_with_siso_diversity = 0;
9891 		sc->sc_uhb_supported = 0;
9892 		break;
9893 	default:
9894 		printf("%s: unknown adapter type\n", DEVNAME(sc));
9895 		return;
9896 	}
9897 
9898 	cfg = iwx_find_device_cfg(sc);
9899 	if (cfg) {
9900 		sc->sc_fwname = cfg->fw_name;
9901 		sc->sc_tx_with_siso_diversity = cfg->tx_with_siso_diversity;
9902 		sc->sc_uhb_supported = cfg->uhb_supported;
9903 	}
9904 
9905 	/* Allocate DMA memory for loading firmware. */
9906 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ctxt_info_dma,
9907 	    sizeof(struct iwx_context_info), 0);
9908 	if (err) {
9909 		printf("%s: could not allocate memory for loading firmware\n",
9910 		    DEVNAME(sc));
9911 		return;
9912 	}
9913 
9914 	/* Allocate interrupt cause table (ICT).*/
9915 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
9916 	    IWX_ICT_SIZE, 1<<IWX_ICT_PADDR_SHIFT);
9917 	if (err) {
9918 		printf("%s: could not allocate ICT table\n", DEVNAME(sc));
9919 		goto fail1;
9920 	}
9921 
9922 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
9923 		err = iwx_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
9924 		if (err) {
9925 			printf("%s: could not allocate TX ring %d\n",
9926 			    DEVNAME(sc), txq_i);
9927 			goto fail4;
9928 		}
9929 	}
9930 
9931 	err = iwx_alloc_rx_ring(sc, &sc->rxq);
9932 	if (err) {
9933 		printf("%s: could not allocate RX ring\n", DEVNAME(sc));
9934 		goto fail4;
9935 	}
9936 
9937 	sc->sc_nswq = taskq_create("iwxns", 1, IPL_NET, 0);
9938 	if (sc->sc_nswq == NULL)
9939 		goto fail4;
9940 
9941 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
9942 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
9943 	ic->ic_state = IEEE80211_S_INIT;
9944 
9945 	/* Set device capabilities. */
9946 	ic->ic_caps =
9947 	    IEEE80211_C_QOS | IEEE80211_C_TX_AMPDU | /* A-MPDU */
9948 	    IEEE80211_C_ADDBA_OFFLOAD | /* device sends ADDBA/DELBA frames */
9949 	    IEEE80211_C_WEP |		/* WEP */
9950 	    IEEE80211_C_RSN |		/* WPA/RSN */
9951 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
9952 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
9953 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
9954 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
9955 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
9956 
9957 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20 | IEEE80211_HTCAP_SGI40;
9958 	ic->ic_htcaps |= IEEE80211_HTCAP_CBW20_40;
9959 	ic->ic_htcaps |=
9960 	    (IEEE80211_HTCAP_SMPS_DIS << IEEE80211_HTCAP_SMPS_SHIFT);
9961 	ic->ic_htxcaps = 0;
9962 	ic->ic_txbfcaps = 0;
9963 	ic->ic_aselcaps = 0;
9964 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
9965 
9966 	ic->ic_vhtcaps = IEEE80211_VHTCAP_MAX_MPDU_LENGTH_3895 |
9967 	    (IEEE80211_VHTCAP_MAX_AMPDU_LEN_64K <<
9968 	    IEEE80211_VHTCAP_MAX_AMPDU_LEN_SHIFT) |
9969 	    (IEEE80211_VHTCAP_CHAN_WIDTH_80 <<
9970 	     IEEE80211_VHTCAP_CHAN_WIDTH_SHIFT) | IEEE80211_VHTCAP_SGI80 |
9971 	    IEEE80211_VHTCAP_RX_ANT_PATTERN | IEEE80211_VHTCAP_TX_ANT_PATTERN;
9972 
9973 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
9974 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
9975 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
9976 
9977 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
9978 		sc->sc_phyctxt[i].id = i;
9979 		sc->sc_phyctxt[i].sco = IEEE80211_HTOP0_SCO_SCN;
9980 		sc->sc_phyctxt[i].vht_chan_width =
9981 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT;
9982 	}
9983 
9984 	/* IBSS channel undefined for now. */
9985 	ic->ic_ibss_chan = &ic->ic_channels[1];
9986 
9987 	ic->ic_max_rssi = IWX_MAX_DBM - IWX_MIN_DBM;
9988 
9989 	ifp->if_softc = sc;
9990 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
9991 	ifp->if_ioctl = iwx_ioctl;
9992 	ifp->if_start = iwx_start;
9993 	ifp->if_watchdog = iwx_watchdog;
9994 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
9995 
9996 	if_attach(ifp);
9997 	ieee80211_ifattach(ifp);
9998 	ieee80211_media_init(ifp, iwx_media_change, ieee80211_media_status);
9999 
10000 #if NBPFILTER > 0
10001 	iwx_radiotap_attach(sc);
10002 #endif
10003 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
10004 		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
10005 		rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
10006 		rxba->sc = sc;
10007 		timeout_set(&rxba->session_timer, iwx_rx_ba_session_expired,
10008 		    rxba);
10009 		timeout_set(&rxba->reorder_buf.reorder_timer,
10010 		    iwx_reorder_timer_expired, &rxba->reorder_buf);
10011 		for (j = 0; j < nitems(rxba->entries); j++)
10012 			ml_init(&rxba->entries[j].frames);
10013 	}
10014 	task_set(&sc->init_task, iwx_init_task, sc);
10015 	task_set(&sc->newstate_task, iwx_newstate_task, sc);
10016 	task_set(&sc->ba_task, iwx_ba_task, sc);
10017 	task_set(&sc->setkey_task, iwx_setkey_task, sc);
10018 	task_set(&sc->mac_ctxt_task, iwx_mac_ctxt_task, sc);
10019 	task_set(&sc->phy_ctxt_task, iwx_phy_ctxt_task, sc);
10020 	task_set(&sc->bgscan_done_task, iwx_bgscan_done_task, sc);
10021 
10022 	ic->ic_node_alloc = iwx_node_alloc;
10023 	ic->ic_bgscan_start = iwx_bgscan;
10024 	ic->ic_bgscan_done = iwx_bgscan_done;
10025 	ic->ic_set_key = iwx_set_key;
10026 	ic->ic_delete_key = iwx_delete_key;
10027 
10028 	/* Override 802.11 state transition machine. */
10029 	sc->sc_newstate = ic->ic_newstate;
10030 	ic->ic_newstate = iwx_newstate;
10031 	ic->ic_updateprot = iwx_updateprot;
10032 	ic->ic_updateslot = iwx_updateslot;
10033 	ic->ic_updateedca = iwx_updateedca;
10034 	ic->ic_updatedtim = iwx_updatedtim;
10035 	ic->ic_ampdu_rx_start = iwx_ampdu_rx_start;
10036 	ic->ic_ampdu_rx_stop = iwx_ampdu_rx_stop;
10037 	ic->ic_ampdu_tx_start = iwx_ampdu_tx_start;
10038 	ic->ic_ampdu_tx_stop = NULL;
10039 	/*
10040 	 * We cannot read the MAC address without loading the
10041 	 * firmware from disk. Postpone until mountroot is done.
10042 	 */
10043 	config_mountroot(self, iwx_attach_hook);
10044 
10045 	return;
10046 
10047 fail4:	while (--txq_i >= 0)
10048 		iwx_free_tx_ring(sc, &sc->txq[txq_i]);
10049 	iwx_free_rx_ring(sc, &sc->rxq);
10050 	if (sc->ict_dma.vaddr != NULL)
10051 		iwx_dma_contig_free(&sc->ict_dma);
10052 
10053 fail1:	iwx_dma_contig_free(&sc->ctxt_info_dma);
10054 	return;
10055 }
10056 
10057 #if NBPFILTER > 0
10058 void
10059 iwx_radiotap_attach(struct iwx_softc *sc)
10060 {
10061 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
10062 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
10063 
10064 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
10065 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
10066 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWX_RX_RADIOTAP_PRESENT);
10067 
10068 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
10069 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
10070 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWX_TX_RADIOTAP_PRESENT);
10071 }
10072 #endif
10073 
10074 void
10075 iwx_init_task(void *arg1)
10076 {
10077 	struct iwx_softc *sc = arg1;
10078 	struct ifnet *ifp = &sc->sc_ic.ic_if;
10079 	int s = splnet();
10080 	int generation = sc->sc_generation;
10081 	int fatal = (sc->sc_flags & (IWX_FLAG_HW_ERR | IWX_FLAG_RFKILL));
10082 
10083 	rw_enter_write(&sc->ioctl_rwl);
10084 	if (generation != sc->sc_generation) {
10085 		rw_exit(&sc->ioctl_rwl);
10086 		splx(s);
10087 		return;
10088 	}
10089 
10090 	if (ifp->if_flags & IFF_RUNNING)
10091 		iwx_stop(ifp);
10092 	else
10093 		sc->sc_flags &= ~IWX_FLAG_HW_ERR;
10094 
10095 	if (!fatal && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
10096 		iwx_init(ifp);
10097 
10098 	rw_exit(&sc->ioctl_rwl);
10099 	splx(s);
10100 }
10101 
10102 void
10103 iwx_resume(struct iwx_softc *sc)
10104 {
10105 	pcireg_t reg;
10106 
10107 	/*
10108 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
10109 	 * PCI Tx retries from interfering with C3 CPU state.
10110 	 */
10111 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
10112 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
10113 
10114 	if (!sc->sc_msix) {
10115 		/* Hardware bug workaround. */
10116 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
10117 		    PCI_COMMAND_STATUS_REG);
10118 		if (reg & PCI_COMMAND_INTERRUPT_DISABLE)
10119 			reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
10120 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
10121 		    PCI_COMMAND_STATUS_REG, reg);
10122 	}
10123 
10124 	iwx_disable_interrupts(sc);
10125 }
10126 
10127 int
10128 iwx_wakeup(struct iwx_softc *sc)
10129 {
10130 	struct ieee80211com *ic = &sc->sc_ic;
10131 	struct ifnet *ifp = &sc->sc_ic.ic_if;
10132 	int err;
10133 
10134 	err = iwx_start_hw(sc);
10135 	if (err)
10136 		return err;
10137 
10138 	err = iwx_init_hw(sc);
10139 	if (err)
10140 		return err;
10141 
10142 	refcnt_init(&sc->task_refs);
10143 	ifq_clr_oactive(&ifp->if_snd);
10144 	ifp->if_flags |= IFF_RUNNING;
10145 
10146 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
10147 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
10148 	else
10149 		ieee80211_begin_scan(ifp);
10150 
10151 	return 0;
10152 }
10153 
10154 int
10155 iwx_activate(struct device *self, int act)
10156 {
10157 	struct iwx_softc *sc = (struct iwx_softc *)self;
10158 	struct ifnet *ifp = &sc->sc_ic.ic_if;
10159 	int err = 0;
10160 
10161 	switch (act) {
10162 	case DVACT_QUIESCE:
10163 		if (ifp->if_flags & IFF_RUNNING) {
10164 			rw_enter_write(&sc->ioctl_rwl);
10165 			iwx_stop(ifp);
10166 			rw_exit(&sc->ioctl_rwl);
10167 		}
10168 		break;
10169 	case DVACT_RESUME:
10170 		iwx_resume(sc);
10171 		break;
10172 	case DVACT_WAKEUP:
10173 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP) {
10174 			err = iwx_wakeup(sc);
10175 			if (err)
10176 				printf("%s: could not initialize hardware\n",
10177 				    DEVNAME(sc));
10178 		}
10179 		break;
10180 	}
10181 
10182 	return 0;
10183 }
10184 
10185 struct cfdriver iwx_cd = {
10186 	NULL, "iwx", DV_IFNET
10187 };
10188 
10189 const struct cfattach iwx_ca = {
10190 	sizeof(struct iwx_softc), iwx_match, iwx_attach,
10191 	NULL, iwx_activate
10192 };
10193