xref: /openbsd-src/sys/dev/pci/if_iwx.c (revision f1dd7b858388b4a23f4f67a4957ec5ff656ebbe8)
1 /*	$OpenBSD: if_iwx.c,v 1.56 2021/05/16 15:10:20 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5  *   Author: Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2014 Fixup Software Ltd.
7  * Copyright (c) 2017, 2019, 2020 Stefan Sperling <stsp@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ******************************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2017 Intel Deutschland GmbH
34  * Copyright(c) 2018 - 2019 Intel Corporation
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * BSD LICENSE
46  *
47  * Copyright(c) 2017 Intel Deutschland GmbH
48  * Copyright(c) 2018 - 2019 Intel Corporation
49  * All rights reserved.
50  *
51  * Redistribution and use in source and binary forms, with or without
52  * modification, are permitted provided that the following conditions
53  * are met:
54  *
55  *  * Redistributions of source code must retain the above copyright
56  *    notice, this list of conditions and the following disclaimer.
57  *  * Redistributions in binary form must reproduce the above copyright
58  *    notice, this list of conditions and the following disclaimer in
59  *    the documentation and/or other materials provided with the
60  *    distribution.
61  *  * Neither the name Intel Corporation nor the names of its
62  *    contributors may be used to endorse or promote products derived
63  *    from this software without specific prior written permission.
64  *
65  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
66  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
67  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
68  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
69  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
70  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
71  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
72  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
73  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
74  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
75  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
76  *
77  *****************************************************************************
78  */
79 
80 /*-
81  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
82  *
83  * Permission to use, copy, modify, and distribute this software for any
84  * purpose with or without fee is hereby granted, provided that the above
85  * copyright notice and this permission notice appear in all copies.
86  *
87  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
88  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
89  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
90  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
91  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
92  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
93  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
94  */
95 
96 #include "bpfilter.h"
97 
98 #include <sys/param.h>
99 #include <sys/conf.h>
100 #include <sys/kernel.h>
101 #include <sys/malloc.h>
102 #include <sys/mbuf.h>
103 #include <sys/mutex.h>
104 #include <sys/proc.h>
105 #include <sys/rwlock.h>
106 #include <sys/socket.h>
107 #include <sys/sockio.h>
108 #include <sys/systm.h>
109 #include <sys/endian.h>
110 
111 #include <sys/refcnt.h>
112 #include <sys/task.h>
113 #include <machine/bus.h>
114 #include <machine/intr.h>
115 
116 #include <dev/pci/pcireg.h>
117 #include <dev/pci/pcivar.h>
118 #include <dev/pci/pcidevs.h>
119 
120 #if NBPFILTER > 0
121 #include <net/bpf.h>
122 #endif
123 #include <net/if.h>
124 #include <net/if_dl.h>
125 #include <net/if_media.h>
126 
127 #include <netinet/in.h>
128 #include <netinet/if_ether.h>
129 
130 #include <net80211/ieee80211_var.h>
131 #include <net80211/ieee80211_radiotap.h>
132 #include <net80211/ieee80211_priv.h> /* for SEQ_LT */
133 #undef DPRINTF /* defined in ieee80211_priv.h */
134 
135 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
136 
137 #define IC2IFP(_ic_) (&(_ic_)->ic_if)
138 
139 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
140 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
141 
142 #ifdef IWX_DEBUG
143 #define DPRINTF(x)	do { if (iwx_debug > 0) printf x; } while (0)
144 #define DPRINTFN(n, x)	do { if (iwx_debug >= (n)) printf x; } while (0)
145 int iwx_debug = 1;
146 #else
147 #define DPRINTF(x)	do { ; } while (0)
148 #define DPRINTFN(n, x)	do { ; } while (0)
149 #endif
150 
151 #include <dev/pci/if_iwxreg.h>
152 #include <dev/pci/if_iwxvar.h>
153 
154 const uint8_t iwx_nvm_channels_8000[] = {
155 	/* 2.4 GHz */
156 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
157 	/* 5 GHz */
158 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
159 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
160 	149, 153, 157, 161, 165, 169, 173, 177, 181
161 };
162 
163 static const uint8_t iwx_nvm_channels_uhb[] = {
164 	/* 2.4 GHz */
165 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
166 	/* 5 GHz */
167 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
168 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
169 	149, 153, 157, 161, 165, 169, 173, 177, 181,
170 	/* 6-7 GHz */
171 	1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69,
172 	73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129,
173 	133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185,
174 	189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233
175 };
176 
177 #define IWX_NUM_2GHZ_CHANNELS	14
178 
179 const struct iwx_rate {
180 	uint16_t rate;
181 	uint8_t plcp;
182 	uint8_t ht_plcp;
183 } iwx_rates[] = {
184 		/* Legacy */		/* HT */
185 	{   2,	IWX_RATE_1M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
186 	{   4,	IWX_RATE_2M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP },
187 	{  11,	IWX_RATE_5M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
188 	{  22,	IWX_RATE_11M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP },
189 	{  12,	IWX_RATE_6M_PLCP,	IWX_RATE_HT_SISO_MCS_0_PLCP },
190 	{  18,	IWX_RATE_9M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
191 	{  24,	IWX_RATE_12M_PLCP,	IWX_RATE_HT_SISO_MCS_1_PLCP },
192 	{  26,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_8_PLCP },
193 	{  36,	IWX_RATE_18M_PLCP,	IWX_RATE_HT_SISO_MCS_2_PLCP },
194 	{  48,	IWX_RATE_24M_PLCP,	IWX_RATE_HT_SISO_MCS_3_PLCP },
195 	{  52,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_9_PLCP },
196 	{  72,	IWX_RATE_36M_PLCP,	IWX_RATE_HT_SISO_MCS_4_PLCP },
197 	{  78,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_10_PLCP },
198 	{  96,	IWX_RATE_48M_PLCP,	IWX_RATE_HT_SISO_MCS_5_PLCP },
199 	{ 104,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_11_PLCP },
200 	{ 108,	IWX_RATE_54M_PLCP,	IWX_RATE_HT_SISO_MCS_6_PLCP },
201 	{ 128,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_SISO_MCS_7_PLCP },
202 	{ 156,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_12_PLCP },
203 	{ 208,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_13_PLCP },
204 	{ 234,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_14_PLCP },
205 	{ 260,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_15_PLCP },
206 };
207 #define IWX_RIDX_CCK	0
208 #define IWX_RIDX_OFDM	4
209 #define IWX_RIDX_MAX	(nitems(iwx_rates)-1)
210 #define IWX_RIDX_IS_CCK(_i_) ((_i_) < IWX_RIDX_OFDM)
211 #define IWX_RIDX_IS_OFDM(_i_) ((_i_) >= IWX_RIDX_OFDM)
212 #define IWX_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
213 
214 /* Convert an MCS index into an iwx_rates[] index. */
215 const int iwx_mcs2ridx[] = {
216 	IWX_RATE_MCS_0_INDEX,
217 	IWX_RATE_MCS_1_INDEX,
218 	IWX_RATE_MCS_2_INDEX,
219 	IWX_RATE_MCS_3_INDEX,
220 	IWX_RATE_MCS_4_INDEX,
221 	IWX_RATE_MCS_5_INDEX,
222 	IWX_RATE_MCS_6_INDEX,
223 	IWX_RATE_MCS_7_INDEX,
224 	IWX_RATE_MCS_8_INDEX,
225 	IWX_RATE_MCS_9_INDEX,
226 	IWX_RATE_MCS_10_INDEX,
227 	IWX_RATE_MCS_11_INDEX,
228 	IWX_RATE_MCS_12_INDEX,
229 	IWX_RATE_MCS_13_INDEX,
230 	IWX_RATE_MCS_14_INDEX,
231 	IWX_RATE_MCS_15_INDEX,
232 };
233 
234 uint8_t	iwx_lookup_cmd_ver(struct iwx_softc *, uint8_t, uint8_t);
235 int	iwx_is_mimo_ht_plcp(uint8_t);
236 int	iwx_is_mimo_mcs(int);
237 int	iwx_store_cscheme(struct iwx_softc *, uint8_t *, size_t);
238 int	iwx_alloc_fw_monitor_block(struct iwx_softc *, uint8_t, uint8_t);
239 int	iwx_alloc_fw_monitor(struct iwx_softc *, uint8_t);
240 int	iwx_apply_debug_destination(struct iwx_softc *);
241 int	iwx_ctxt_info_init(struct iwx_softc *, const struct iwx_fw_sects *);
242 void	iwx_ctxt_info_free_fw_img(struct iwx_softc *);
243 void	iwx_ctxt_info_free_paging(struct iwx_softc *);
244 int	iwx_init_fw_sec(struct iwx_softc *, const struct iwx_fw_sects *,
245 	    struct iwx_context_info_dram *);
246 int	iwx_firmware_store_section(struct iwx_softc *, enum iwx_ucode_type,
247 	    uint8_t *, size_t);
248 int	iwx_set_default_calib(struct iwx_softc *, const void *);
249 void	iwx_fw_info_free(struct iwx_fw_info *);
250 int	iwx_read_firmware(struct iwx_softc *);
251 uint32_t iwx_read_prph(struct iwx_softc *, uint32_t);
252 void	iwx_write_prph(struct iwx_softc *, uint32_t, uint32_t);
253 int	iwx_read_mem(struct iwx_softc *, uint32_t, void *, int);
254 int	iwx_write_mem(struct iwx_softc *, uint32_t, const void *, int);
255 int	iwx_write_mem32(struct iwx_softc *, uint32_t, uint32_t);
256 int	iwx_poll_bit(struct iwx_softc *, int, uint32_t, uint32_t, int);
257 int	iwx_nic_lock(struct iwx_softc *);
258 void	iwx_nic_assert_locked(struct iwx_softc *);
259 void	iwx_nic_unlock(struct iwx_softc *);
260 void	iwx_set_bits_mask_prph(struct iwx_softc *, uint32_t, uint32_t,
261 	    uint32_t);
262 void	iwx_set_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
263 void	iwx_clear_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
264 int	iwx_dma_contig_alloc(bus_dma_tag_t, struct iwx_dma_info *, bus_size_t,
265 	    bus_size_t);
266 void	iwx_dma_contig_free(struct iwx_dma_info *);
267 int	iwx_alloc_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
268 void	iwx_disable_rx_dma(struct iwx_softc *);
269 void	iwx_reset_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
270 void	iwx_free_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
271 int	iwx_alloc_tx_ring(struct iwx_softc *, struct iwx_tx_ring *, int);
272 void	iwx_reset_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
273 void	iwx_free_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
274 void	iwx_enable_rfkill_int(struct iwx_softc *);
275 int	iwx_check_rfkill(struct iwx_softc *);
276 void	iwx_enable_interrupts(struct iwx_softc *);
277 void	iwx_enable_fwload_interrupt(struct iwx_softc *);
278 void	iwx_restore_interrupts(struct iwx_softc *);
279 void	iwx_disable_interrupts(struct iwx_softc *);
280 void	iwx_ict_reset(struct iwx_softc *);
281 int	iwx_set_hw_ready(struct iwx_softc *);
282 int	iwx_prepare_card_hw(struct iwx_softc *);
283 void	iwx_force_power_gating(struct iwx_softc *);
284 void	iwx_apm_config(struct iwx_softc *);
285 int	iwx_apm_init(struct iwx_softc *);
286 void	iwx_apm_stop(struct iwx_softc *);
287 int	iwx_allow_mcast(struct iwx_softc *);
288 void	iwx_init_msix_hw(struct iwx_softc *);
289 void	iwx_conf_msix_hw(struct iwx_softc *, int);
290 int	iwx_start_hw(struct iwx_softc *);
291 void	iwx_stop_device(struct iwx_softc *);
292 void	iwx_nic_config(struct iwx_softc *);
293 int	iwx_nic_rx_init(struct iwx_softc *);
294 int	iwx_nic_init(struct iwx_softc *);
295 int	iwx_enable_txq(struct iwx_softc *, int, int, int, int);
296 void	iwx_post_alive(struct iwx_softc *);
297 void	iwx_protect_session(struct iwx_softc *, struct iwx_node *, uint32_t,
298 	    uint32_t);
299 void	iwx_unprotect_session(struct iwx_softc *, struct iwx_node *);
300 void	iwx_init_channel_map(struct iwx_softc *, uint16_t *, uint32_t *, int);
301 void	iwx_setup_ht_rates(struct iwx_softc *);
302 int	iwx_mimo_enabled(struct iwx_softc *);
303 void	iwx_mac_ctxt_task(void *);
304 void	iwx_updateprot(struct ieee80211com *);
305 void	iwx_updateslot(struct ieee80211com *);
306 void	iwx_updateedca(struct ieee80211com *);
307 void	iwx_init_reorder_buffer(struct iwx_reorder_buffer *, uint16_t,
308 	    uint16_t);
309 void	iwx_clear_reorder_buffer(struct iwx_softc *, struct iwx_rxba_data *);
310 int	iwx_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
311 	    uint8_t);
312 void	iwx_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
313 	    uint8_t);
314 void	iwx_rx_ba_session_expired(void *);
315 void	iwx_reorder_timer_expired(void *);
316 void	iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t,
317 	    uint16_t, uint16_t, int, int);
318 #ifdef notyet
319 int	iwx_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
320 	    uint8_t);
321 void	iwx_ampdu_tx_stop(struct ieee80211com *, struct ieee80211_node *,
322 	    uint8_t);
323 #endif
324 void	iwx_ba_task(void *);
325 
326 int	iwx_set_mac_addr_from_csr(struct iwx_softc *, struct iwx_nvm_data *);
327 int	iwx_is_valid_mac_addr(const uint8_t *);
328 int	iwx_nvm_get(struct iwx_softc *);
329 int	iwx_load_firmware(struct iwx_softc *);
330 int	iwx_start_fw(struct iwx_softc *);
331 int	iwx_send_tx_ant_cfg(struct iwx_softc *, uint8_t);
332 int	iwx_send_phy_cfg_cmd(struct iwx_softc *);
333 int	iwx_load_ucode_wait_alive(struct iwx_softc *);
334 int	iwx_send_dqa_cmd(struct iwx_softc *);
335 int	iwx_run_init_mvm_ucode(struct iwx_softc *, int);
336 int	iwx_config_ltr(struct iwx_softc *);
337 void	iwx_update_rx_desc(struct iwx_softc *, struct iwx_rx_ring *, int);
338 int	iwx_rx_addbuf(struct iwx_softc *, int, int);
339 int	iwx_rxmq_get_signal_strength(struct iwx_softc *, struct iwx_rx_mpdu_desc *);
340 void	iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *,
341 	    struct iwx_rx_data *);
342 int	iwx_get_noise(const struct iwx_statistics_rx_non_phy *);
343 int	iwx_rx_hwdecrypt(struct iwx_softc *, struct mbuf *, uint32_t,
344 	    struct ieee80211_rxinfo *);
345 int	iwx_ccmp_decap(struct iwx_softc *, struct mbuf *,
346 	    struct ieee80211_node *, struct ieee80211_rxinfo *);
347 void	iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, uint32_t, int, int,
348 	    uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
349 void	iwx_rx_tx_cmd_single(struct iwx_softc *, struct iwx_rx_packet *,
350 	    struct iwx_node *);
351 void	iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *,
352 	    struct iwx_rx_data *);
353 void	iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *,
354 	    struct iwx_rx_data *);
355 int	iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t);
356 int	iwx_phy_ctxt_cmd_uhb(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
357 	    uint8_t, uint32_t, uint32_t);
358 int	iwx_phy_ctxt_cmd(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
359 	    uint8_t, uint32_t, uint32_t);
360 int	iwx_send_cmd(struct iwx_softc *, struct iwx_host_cmd *);
361 int	iwx_send_cmd_pdu(struct iwx_softc *, uint32_t, uint32_t, uint16_t,
362 	    const void *);
363 int	iwx_send_cmd_status(struct iwx_softc *, struct iwx_host_cmd *,
364 	    uint32_t *);
365 int	iwx_send_cmd_pdu_status(struct iwx_softc *, uint32_t, uint16_t,
366 	    const void *, uint32_t *);
367 void	iwx_free_resp(struct iwx_softc *, struct iwx_host_cmd *);
368 void	iwx_cmd_done(struct iwx_softc *, int, int, int);
369 const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *, struct iwx_node *,
370 	    struct ieee80211_frame *, struct iwx_tx_cmd_gen2 *);
371 void	iwx_tx_update_byte_tbl(struct iwx_tx_ring *, int, uint16_t, uint16_t);
372 int	iwx_tx(struct iwx_softc *, struct mbuf *, struct ieee80211_node *, int);
373 int	iwx_flush_tx_path(struct iwx_softc *);
374 int	iwx_beacon_filter_send_cmd(struct iwx_softc *,
375 	    struct iwx_beacon_filter_cmd *);
376 int	iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *, int);
377 void	iwx_power_build_cmd(struct iwx_softc *, struct iwx_node *,
378 	    struct iwx_mac_power_cmd *);
379 int	iwx_power_mac_update_mode(struct iwx_softc *, struct iwx_node *);
380 int	iwx_power_update_device(struct iwx_softc *);
381 int	iwx_enable_beacon_filter(struct iwx_softc *, struct iwx_node *);
382 int	iwx_disable_beacon_filter(struct iwx_softc *);
383 int	iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int);
384 int	iwx_add_aux_sta(struct iwx_softc *);
385 int	iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *);
386 int	iwx_fill_probe_req(struct iwx_softc *, struct iwx_scan_probe_req *);
387 int	iwx_config_umac_scan(struct iwx_softc *);
388 int	iwx_umac_scan(struct iwx_softc *, int);
389 void	iwx_mcc_update(struct iwx_softc *, struct iwx_mcc_chub_notif *);
390 uint8_t	iwx_ridx2rate(struct ieee80211_rateset *, int);
391 int	iwx_rval2ridx(int);
392 void	iwx_ack_rates(struct iwx_softc *, struct iwx_node *, int *, int *);
393 void	iwx_mac_ctxt_cmd_common(struct iwx_softc *, struct iwx_node *,
394 	    struct iwx_mac_ctx_cmd *, uint32_t);
395 void	iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *, struct iwx_node *,
396 	    struct iwx_mac_data_sta *, int);
397 int	iwx_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *, uint32_t, int);
398 int	iwx_clear_statistics(struct iwx_softc *);
399 int	iwx_update_quotas(struct iwx_softc *, struct iwx_node *, int);
400 void	iwx_add_task(struct iwx_softc *, struct taskq *, struct task *);
401 void	iwx_del_task(struct iwx_softc *, struct taskq *, struct task *);
402 int	iwx_scan(struct iwx_softc *);
403 int	iwx_bgscan(struct ieee80211com *);
404 int	iwx_umac_scan_abort(struct iwx_softc *);
405 int	iwx_scan_abort(struct iwx_softc *);
406 int	iwx_rs_rval2idx(uint8_t);
407 uint16_t iwx_rs_ht_rates(struct iwx_softc *, struct ieee80211_node *, int);
408 int	iwx_rs_init(struct iwx_softc *, struct iwx_node *);
409 int	iwx_enable_data_tx_queues(struct iwx_softc *);
410 int	iwx_auth(struct iwx_softc *);
411 int	iwx_deauth(struct iwx_softc *);
412 int	iwx_assoc(struct iwx_softc *);
413 int	iwx_disassoc(struct iwx_softc *);
414 int	iwx_run(struct iwx_softc *);
415 int	iwx_run_stop(struct iwx_softc *);
416 struct ieee80211_node *iwx_node_alloc(struct ieee80211com *);
417 int	iwx_set_key(struct ieee80211com *, struct ieee80211_node *,
418 	    struct ieee80211_key *);
419 void	iwx_setkey_task(void *);
420 void	iwx_delete_key(struct ieee80211com *,
421 	    struct ieee80211_node *, struct ieee80211_key *);
422 int	iwx_media_change(struct ifnet *);
423 void	iwx_newstate_task(void *);
424 int	iwx_newstate(struct ieee80211com *, enum ieee80211_state, int);
425 void	iwx_endscan(struct iwx_softc *);
426 void	iwx_fill_sf_command(struct iwx_softc *, struct iwx_sf_cfg_cmd *,
427 	    struct ieee80211_node *);
428 int	iwx_sf_config(struct iwx_softc *, int);
429 int	iwx_send_bt_init_conf(struct iwx_softc *);
430 int	iwx_send_soc_conf(struct iwx_softc *);
431 int	iwx_send_update_mcc_cmd(struct iwx_softc *, const char *);
432 int	iwx_send_temp_report_ths_cmd(struct iwx_softc *);
433 int	iwx_init_hw(struct iwx_softc *);
434 int	iwx_init(struct ifnet *);
435 void	iwx_start(struct ifnet *);
436 void	iwx_stop(struct ifnet *);
437 void	iwx_watchdog(struct ifnet *);
438 int	iwx_ioctl(struct ifnet *, u_long, caddr_t);
439 const char *iwx_desc_lookup(uint32_t);
440 void	iwx_nic_error(struct iwx_softc *);
441 void	iwx_nic_umac_error(struct iwx_softc *);
442 int	iwx_detect_duplicate(struct iwx_softc *, struct mbuf *,
443 	    struct iwx_rx_mpdu_desc *, struct ieee80211_rxinfo *);
444 int	iwx_is_sn_less(uint16_t, uint16_t, uint16_t);
445 void	iwx_release_frames(struct iwx_softc *, struct ieee80211_node *,
446 	    struct iwx_rxba_data *, struct iwx_reorder_buffer *, uint16_t,
447 	    struct mbuf_list *);
448 int	iwx_oldsn_workaround(struct iwx_softc *, struct ieee80211_node *,
449 	    int, struct iwx_reorder_buffer *, uint32_t, uint32_t);
450 int	iwx_rx_reorder(struct iwx_softc *, struct mbuf *, int,
451 	    struct iwx_rx_mpdu_desc *, int, int, uint32_t,
452 	    struct ieee80211_rxinfo *, struct mbuf_list *);
453 void	iwx_rx_mpdu_mq(struct iwx_softc *, struct mbuf *, void *, size_t,
454 	    struct mbuf_list *);
455 int	iwx_rx_pkt_valid(struct iwx_rx_packet *);
456 void	iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *,
457 	    struct mbuf_list *);
458 void	iwx_notif_intr(struct iwx_softc *);
459 int	iwx_intr(void *);
460 int	iwx_intr_msix(void *);
461 int	iwx_match(struct device *, void *, void *);
462 int	iwx_preinit(struct iwx_softc *);
463 void	iwx_attach_hook(struct device *);
464 void	iwx_attach(struct device *, struct device *, void *);
465 void	iwx_init_task(void *);
466 int	iwx_activate(struct device *, int);
467 int	iwx_resume(struct iwx_softc *);
468 
469 #if NBPFILTER > 0
470 void	iwx_radiotap_attach(struct iwx_softc *);
471 #endif
472 
473 uint8_t
474 iwx_lookup_cmd_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
475 {
476 	const struct iwx_fw_cmd_version *entry;
477 	int i;
478 
479 	for (i = 0; i < sc->n_cmd_versions; i++) {
480 		entry = &sc->cmd_versions[i];
481 		if (entry->group == grp && entry->cmd == cmd)
482 			return entry->cmd_ver;
483 	}
484 
485 	return IWX_FW_CMD_VER_UNKNOWN;
486 }
487 
488 int
489 iwx_is_mimo_ht_plcp(uint8_t ht_plcp)
490 {
491 	return (ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP &&
492 	    (ht_plcp & IWX_RATE_HT_MCS_NSS_MSK));
493 }
494 
495 int
496 iwx_is_mimo_mcs(int mcs)
497 {
498 	int ridx = iwx_mcs2ridx[mcs];
499 	return iwx_is_mimo_ht_plcp(iwx_rates[ridx].ht_plcp);
500 
501 }
502 
503 int
504 iwx_store_cscheme(struct iwx_softc *sc, uint8_t *data, size_t dlen)
505 {
506 	struct iwx_fw_cscheme_list *l = (void *)data;
507 
508 	if (dlen < sizeof(*l) ||
509 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
510 		return EINVAL;
511 
512 	/* we don't actually store anything for now, always use s/w crypto */
513 
514 	return 0;
515 }
516 
517 int
518 iwx_ctxt_info_alloc_dma(struct iwx_softc *sc,
519     const struct iwx_fw_onesect *sec, struct iwx_dma_info *dram)
520 {
521 	int err = iwx_dma_contig_alloc(sc->sc_dmat, dram, sec->fws_len, 0);
522 	if (err) {
523 		printf("%s: could not allocate context info DMA memory\n",
524 		    DEVNAME(sc));
525 		return err;
526 	}
527 
528 	memcpy(dram->vaddr, sec->fws_data, sec->fws_len);
529 
530 	return 0;
531 }
532 
533 void iwx_ctxt_info_free_paging(struct iwx_softc *sc)
534 {
535 	struct iwx_self_init_dram *dram = &sc->init_dram;
536 	int i;
537 
538 	if (!dram->paging)
539 		return;
540 
541 	/* free paging*/
542 	for (i = 0; i < dram->paging_cnt; i++)
543 		iwx_dma_contig_free(dram->paging);
544 
545 	free(dram->paging, M_DEVBUF, dram->paging_cnt * sizeof(*dram->paging));
546 	dram->paging_cnt = 0;
547 	dram->paging = NULL;
548 }
549 
550 int
551 iwx_get_num_sections(const struct iwx_fw_sects *fws, int start)
552 {
553 	int i = 0;
554 
555 	while (start < fws->fw_count &&
556 	       fws->fw_sect[start].fws_devoff != IWX_CPU1_CPU2_SEPARATOR_SECTION &&
557 	       fws->fw_sect[start].fws_devoff != IWX_PAGING_SEPARATOR_SECTION) {
558 		start++;
559 		i++;
560 	}
561 
562 	return i;
563 }
564 
565 int
566 iwx_init_fw_sec(struct iwx_softc *sc, const struct iwx_fw_sects *fws,
567     struct iwx_context_info_dram *ctxt_dram)
568 {
569 	struct iwx_self_init_dram *dram = &sc->init_dram;
570 	int i, ret, fw_cnt = 0;
571 
572 	KASSERT(dram->paging == NULL);
573 
574 	dram->lmac_cnt = iwx_get_num_sections(fws, 0);
575 	/* add 1 due to separator */
576 	dram->umac_cnt = iwx_get_num_sections(fws, dram->lmac_cnt + 1);
577 	/* add 2 due to separators */
578 	dram->paging_cnt = iwx_get_num_sections(fws,
579 	    dram->lmac_cnt + dram->umac_cnt + 2);
580 
581 	dram->fw = mallocarray(dram->umac_cnt + dram->lmac_cnt,
582 	    sizeof(*dram->fw), M_DEVBUF,  M_ZERO | M_NOWAIT);
583 	if (!dram->fw) {
584 		printf("%s: could not allocate memory for firmware sections\n",
585 		    DEVNAME(sc));
586 		return ENOMEM;
587 	}
588 
589 	dram->paging = mallocarray(dram->paging_cnt, sizeof(*dram->paging),
590 	    M_DEVBUF, M_ZERO | M_NOWAIT);
591 	if (!dram->paging) {
592 		printf("%s: could not allocate memory for firmware paging\n",
593 		    DEVNAME(sc));
594 		return ENOMEM;
595 	}
596 
597 	/* initialize lmac sections */
598 	for (i = 0; i < dram->lmac_cnt; i++) {
599 		ret = iwx_ctxt_info_alloc_dma(sc, &fws->fw_sect[i],
600 						   &dram->fw[fw_cnt]);
601 		if (ret)
602 			return ret;
603 		ctxt_dram->lmac_img[i] =
604 			htole64(dram->fw[fw_cnt].paddr);
605 		DPRINTF(("%s: firmware LMAC section %d at 0x%llx size %lld\n", __func__, i,
606 		    (unsigned long long)dram->fw[fw_cnt].paddr,
607 		    (unsigned long long)dram->fw[fw_cnt].size));
608 		fw_cnt++;
609 	}
610 
611 	/* initialize umac sections */
612 	for (i = 0; i < dram->umac_cnt; i++) {
613 		/* access FW with +1 to make up for lmac separator */
614 		ret = iwx_ctxt_info_alloc_dma(sc,
615 		    &fws->fw_sect[fw_cnt + 1], &dram->fw[fw_cnt]);
616 		if (ret)
617 			return ret;
618 		ctxt_dram->umac_img[i] =
619 			htole64(dram->fw[fw_cnt].paddr);
620 		DPRINTF(("%s: firmware UMAC section %d at 0x%llx size %lld\n", __func__, i,
621 			(unsigned long long)dram->fw[fw_cnt].paddr,
622 			(unsigned long long)dram->fw[fw_cnt].size));
623 		fw_cnt++;
624 	}
625 
626 	/*
627 	 * Initialize paging.
628 	 * Paging memory isn't stored in dram->fw as the umac and lmac - it is
629 	 * stored separately.
630 	 * This is since the timing of its release is different -
631 	 * while fw memory can be released on alive, the paging memory can be
632 	 * freed only when the device goes down.
633 	 * Given that, the logic here in accessing the fw image is a bit
634 	 * different - fw_cnt isn't changing so loop counter is added to it.
635 	 */
636 	for (i = 0; i < dram->paging_cnt; i++) {
637 		/* access FW with +2 to make up for lmac & umac separators */
638 		int fw_idx = fw_cnt + i + 2;
639 
640 		ret = iwx_ctxt_info_alloc_dma(sc,
641 		    &fws->fw_sect[fw_idx], &dram->paging[i]);
642 		if (ret)
643 			return ret;
644 
645 		ctxt_dram->virtual_img[i] = htole64(dram->paging[i].paddr);
646 		DPRINTF(("%s: firmware paging section %d at 0x%llx size %lld\n", __func__, i,
647 		    (unsigned long long)dram->paging[i].paddr,
648 		    (unsigned long long)dram->paging[i].size));
649 	}
650 
651 	return 0;
652 }
653 
654 int
655 iwx_alloc_fw_monitor_block(struct iwx_softc *sc, uint8_t max_power,
656     uint8_t min_power)
657 {
658 	struct iwx_dma_info *fw_mon = &sc->fw_mon;
659 	uint32_t size = 0;
660 	uint8_t power;
661 	int err;
662 
663 	if (fw_mon->size)
664 		return 0;
665 
666 	for (power = max_power; power >= min_power; power--) {
667 		size = (1 << power);
668 
669 		err = iwx_dma_contig_alloc(sc->sc_dmat, fw_mon, size, 0);
670 		if (err)
671 			continue;
672 
673 		DPRINTF(("%s: allocated 0x%08x bytes for firmware monitor.\n",
674 			 DEVNAME(sc), size));
675 		break;
676 	}
677 
678 	if (err) {
679 		fw_mon->size = 0;
680 		return err;
681 	}
682 
683 	if (power != max_power)
684 		DPRINTF(("%s: Sorry - debug buffer is only %luK while you requested %luK\n",
685 			DEVNAME(sc), (unsigned long)(1 << (power - 10)),
686 			(unsigned long)(1 << (max_power - 10))));
687 
688 	return 0;
689 }
690 
691 int
692 iwx_alloc_fw_monitor(struct iwx_softc *sc, uint8_t max_power)
693 {
694 	if (!max_power) {
695 		/* default max_power is maximum */
696 		max_power = 26;
697 	} else {
698 		max_power += 11;
699 	}
700 
701 	if (max_power > 26) {
702 		 DPRINTF(("%s: External buffer size for monitor is too big %d, "
703 		     "check the FW TLV\n", DEVNAME(sc), max_power));
704 		return 0;
705 	}
706 
707 	if (sc->fw_mon.size)
708 		return 0;
709 
710 	return iwx_alloc_fw_monitor_block(sc, max_power, 11);
711 }
712 
713 int
714 iwx_apply_debug_destination(struct iwx_softc *sc)
715 {
716 	struct iwx_fw_dbg_dest_tlv_v1 *dest_v1;
717 	int i, err;
718 	uint8_t mon_mode, size_power, base_shift, end_shift;
719 	uint32_t base_reg, end_reg;
720 
721 	dest_v1 = sc->sc_fw.dbg_dest_tlv_v1;
722 	mon_mode = dest_v1->monitor_mode;
723 	size_power = dest_v1->size_power;
724 	base_reg = le32toh(dest_v1->base_reg);
725 	end_reg = le32toh(dest_v1->end_reg);
726 	base_shift = dest_v1->base_shift;
727 	end_shift = dest_v1->end_shift;
728 
729 	DPRINTF(("%s: applying debug destination %d\n", DEVNAME(sc), mon_mode));
730 
731 	if (mon_mode == EXTERNAL_MODE) {
732 		err = iwx_alloc_fw_monitor(sc, size_power);
733 		if (err)
734 			return err;
735 	}
736 
737 	if (!iwx_nic_lock(sc))
738 		return EBUSY;
739 
740 	for (i = 0; i < sc->sc_fw.n_dest_reg; i++) {
741 		uint32_t addr, val;
742 		uint8_t op;
743 
744 		addr = le32toh(dest_v1->reg_ops[i].addr);
745 		val = le32toh(dest_v1->reg_ops[i].val);
746 		op = dest_v1->reg_ops[i].op;
747 
748 		DPRINTF(("%s: op=%u addr=%u val=%u\n", __func__, op, addr, val));
749 		switch (op) {
750 		case CSR_ASSIGN:
751 			IWX_WRITE(sc, addr, val);
752 			break;
753 		case CSR_SETBIT:
754 			IWX_SETBITS(sc, addr, (1 << val));
755 			break;
756 		case CSR_CLEARBIT:
757 			IWX_CLRBITS(sc, addr, (1 << val));
758 			break;
759 		case PRPH_ASSIGN:
760 			iwx_write_prph(sc, addr, val);
761 			break;
762 		case PRPH_SETBIT:
763 			iwx_set_bits_prph(sc, addr, (1 << val));
764 			break;
765 		case PRPH_CLEARBIT:
766 			iwx_clear_bits_prph(sc, addr, (1 << val));
767 			break;
768 		case PRPH_BLOCKBIT:
769 			if (iwx_read_prph(sc, addr) & (1 << val))
770 				goto monitor;
771 			break;
772 		default:
773 			DPRINTF(("%s: FW debug - unknown OP %d\n",
774 			    DEVNAME(sc), op));
775 			break;
776 		}
777 	}
778 
779 monitor:
780 	if (mon_mode == EXTERNAL_MODE && sc->fw_mon.size) {
781 		iwx_write_prph(sc, le32toh(base_reg),
782 		    sc->fw_mon.paddr >> base_shift);
783 		iwx_write_prph(sc, end_reg,
784 		    (sc->fw_mon.paddr + sc->fw_mon.size - 256)
785 		    >> end_shift);
786 	}
787 
788 	iwx_nic_unlock(sc);
789 	return 0;
790 }
791 
792 int
793 iwx_ctxt_info_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
794 {
795 	struct iwx_context_info *ctxt_info;
796 	struct iwx_context_info_rbd_cfg *rx_cfg;
797 	uint32_t control_flags = 0, rb_size;
798 	uint64_t paddr;
799 	int err;
800 
801 	ctxt_info = sc->ctxt_info_dma.vaddr;
802 
803 	ctxt_info->version.version = 0;
804 	ctxt_info->version.mac_id =
805 		htole16((uint16_t)IWX_READ(sc, IWX_CSR_HW_REV));
806 	/* size is in DWs */
807 	ctxt_info->version.size = htole16(sizeof(*ctxt_info) / 4);
808 
809 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_22560)
810 		rb_size = IWX_CTXT_INFO_RB_SIZE_2K;
811 	else
812 		rb_size = IWX_CTXT_INFO_RB_SIZE_4K;
813 
814 	KASSERT(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF);
815 	control_flags = IWX_CTXT_INFO_TFD_FORMAT_LONG |
816 			(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) <<
817 			 IWX_CTXT_INFO_RB_CB_SIZE_POS) |
818 			(rb_size << IWX_CTXT_INFO_RB_SIZE_POS);
819 	ctxt_info->control.control_flags = htole32(control_flags);
820 
821 	/* initialize RX default queue */
822 	rx_cfg = &ctxt_info->rbd_cfg;
823 	rx_cfg->free_rbd_addr = htole64(sc->rxq.free_desc_dma.paddr);
824 	rx_cfg->used_rbd_addr = htole64(sc->rxq.used_desc_dma.paddr);
825 	rx_cfg->status_wr_ptr = htole64(sc->rxq.stat_dma.paddr);
826 
827 	/* initialize TX command queue */
828 	ctxt_info->hcmd_cfg.cmd_queue_addr =
829 		htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
830 	ctxt_info->hcmd_cfg.cmd_queue_size =
831 		IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
832 
833 	/* allocate ucode sections in dram and set addresses */
834 	err = iwx_init_fw_sec(sc, fws, &ctxt_info->dram);
835 	if (err) {
836 		iwx_ctxt_info_free_fw_img(sc);
837 		return err;
838 	}
839 
840 	/* Configure debug, if exists */
841 	if (sc->sc_fw.dbg_dest_tlv_v1) {
842 		err = iwx_apply_debug_destination(sc);
843 		if (err) {
844 			iwx_ctxt_info_free_fw_img(sc);
845 			return err;
846 		}
847 	}
848 
849 	/*
850 	 * Write the context info DMA base address. The device expects a
851 	 * 64-bit address but a simple bus_space_write_8 to this register
852 	 * won't work on some devices, such as the AX201.
853 	 */
854 	paddr = sc->ctxt_info_dma.paddr;
855 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA, paddr & 0xffffffff);
856 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA + 4, paddr >> 32);
857 
858 	/* kick FW self load */
859 	if (!iwx_nic_lock(sc))
860 		return EBUSY;
861 	iwx_write_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
862 	iwx_nic_unlock(sc);
863 
864 	/* Context info will be released upon alive or failure to get one */
865 
866 	return 0;
867 }
868 
869 void
870 iwx_ctxt_info_free_fw_img(struct iwx_softc *sc)
871 {
872 	struct iwx_self_init_dram *dram = &sc->init_dram;
873 	int i;
874 
875 	if (!dram->fw)
876 		return;
877 
878 	for (i = 0; i < dram->lmac_cnt + dram->umac_cnt; i++)
879 		iwx_dma_contig_free(&dram->fw[i]);
880 
881 	free(dram->fw, M_DEVBUF,
882 	    (dram->lmac_cnt + dram->umac_cnt) * sizeof(*dram->fw));
883 	dram->lmac_cnt = 0;
884 	dram->umac_cnt = 0;
885 	dram->fw = NULL;
886 }
887 
888 int
889 iwx_firmware_store_section(struct iwx_softc *sc, enum iwx_ucode_type type,
890     uint8_t *data, size_t dlen)
891 {
892 	struct iwx_fw_sects *fws;
893 	struct iwx_fw_onesect *fwone;
894 
895 	if (type >= IWX_UCODE_TYPE_MAX)
896 		return EINVAL;
897 	if (dlen < sizeof(uint32_t))
898 		return EINVAL;
899 
900 	fws = &sc->sc_fw.fw_sects[type];
901 	DPRINTF(("%s: ucode type %d section %d\n", DEVNAME(sc), type, fws->fw_count));
902 	if (fws->fw_count >= IWX_UCODE_SECT_MAX)
903 		return EINVAL;
904 
905 	fwone = &fws->fw_sect[fws->fw_count];
906 
907 	/* first 32bit are device load offset */
908 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
909 
910 	/* rest is data */
911 	fwone->fws_data = data + sizeof(uint32_t);
912 	fwone->fws_len = dlen - sizeof(uint32_t);
913 
914 	fws->fw_count++;
915 	fws->fw_totlen += fwone->fws_len;
916 
917 	return 0;
918 }
919 
920 #define IWX_DEFAULT_SCAN_CHANNELS	40
921 /* Newer firmware might support more channels. Raise this value if needed. */
922 #define IWX_MAX_SCAN_CHANNELS		52 /* as of 8265-34 firmware image */
923 
924 struct iwx_tlv_calib_data {
925 	uint32_t ucode_type;
926 	struct iwx_tlv_calib_ctrl calib;
927 } __packed;
928 
929 int
930 iwx_set_default_calib(struct iwx_softc *sc, const void *data)
931 {
932 	const struct iwx_tlv_calib_data *def_calib = data;
933 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
934 
935 	if (ucode_type >= IWX_UCODE_TYPE_MAX)
936 		return EINVAL;
937 
938 	sc->sc_default_calib[ucode_type].flow_trigger =
939 	    def_calib->calib.flow_trigger;
940 	sc->sc_default_calib[ucode_type].event_trigger =
941 	    def_calib->calib.event_trigger;
942 
943 	return 0;
944 }
945 
946 void
947 iwx_fw_info_free(struct iwx_fw_info *fw)
948 {
949 	free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
950 	fw->fw_rawdata = NULL;
951 	fw->fw_rawsize = 0;
952 	/* don't touch fw->fw_status */
953 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
954 }
955 
956 #define IWX_FW_ADDR_CACHE_CONTROL 0xC0000000
957 
958 int
959 iwx_read_firmware(struct iwx_softc *sc)
960 {
961 	struct iwx_fw_info *fw = &sc->sc_fw;
962 	struct iwx_tlv_ucode_header *uhdr;
963 	struct iwx_ucode_tlv tlv;
964 	uint32_t tlv_type;
965 	uint8_t *data;
966 	int err;
967 	size_t len;
968 
969 	if (fw->fw_status == IWX_FW_STATUS_DONE)
970 		return 0;
971 
972 	while (fw->fw_status == IWX_FW_STATUS_INPROGRESS)
973 		tsleep_nsec(&sc->sc_fw, 0, "iwxfwp", INFSLP);
974 	fw->fw_status = IWX_FW_STATUS_INPROGRESS;
975 
976 	if (fw->fw_rawdata != NULL)
977 		iwx_fw_info_free(fw);
978 
979 	err = loadfirmware(sc->sc_fwname,
980 	    (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
981 	if (err) {
982 		printf("%s: could not read firmware %s (error %d)\n",
983 		    DEVNAME(sc), sc->sc_fwname, err);
984 		goto out;
985 	}
986 
987 	sc->sc_capaflags = 0;
988 	sc->sc_capa_n_scan_channels = IWX_DEFAULT_SCAN_CHANNELS;
989 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
990 
991 	uhdr = (void *)fw->fw_rawdata;
992 	if (*(uint32_t *)fw->fw_rawdata != 0
993 	    || le32toh(uhdr->magic) != IWX_TLV_UCODE_MAGIC) {
994 		printf("%s: invalid firmware %s\n",
995 		    DEVNAME(sc), sc->sc_fwname);
996 		err = EINVAL;
997 		goto out;
998 	}
999 
1000 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
1001 	    IWX_UCODE_MAJOR(le32toh(uhdr->ver)),
1002 	    IWX_UCODE_MINOR(le32toh(uhdr->ver)),
1003 	    IWX_UCODE_API(le32toh(uhdr->ver)));
1004 	data = uhdr->data;
1005 	len = fw->fw_rawsize - sizeof(*uhdr);
1006 
1007 	while (len >= sizeof(tlv)) {
1008 		size_t tlv_len;
1009 		void *tlv_data;
1010 
1011 		memcpy(&tlv, data, sizeof(tlv));
1012 		tlv_len = le32toh(tlv.length);
1013 		tlv_type = le32toh(tlv.type);
1014 
1015 		len -= sizeof(tlv);
1016 		data += sizeof(tlv);
1017 		tlv_data = data;
1018 
1019 		if (len < tlv_len) {
1020 			printf("%s: firmware too short: %zu bytes\n",
1021 			    DEVNAME(sc), len);
1022 			err = EINVAL;
1023 			goto parse_out;
1024 		}
1025 
1026 		switch (tlv_type) {
1027 		case IWX_UCODE_TLV_PROBE_MAX_LEN:
1028 			if (tlv_len < sizeof(uint32_t)) {
1029 				err = EINVAL;
1030 				goto parse_out;
1031 			}
1032 			sc->sc_capa_max_probe_len
1033 			    = le32toh(*(uint32_t *)tlv_data);
1034 			if (sc->sc_capa_max_probe_len >
1035 			    IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
1036 				err = EINVAL;
1037 				goto parse_out;
1038 			}
1039 			break;
1040 		case IWX_UCODE_TLV_PAN:
1041 			if (tlv_len) {
1042 				err = EINVAL;
1043 				goto parse_out;
1044 			}
1045 			sc->sc_capaflags |= IWX_UCODE_TLV_FLAGS_PAN;
1046 			break;
1047 		case IWX_UCODE_TLV_FLAGS:
1048 			if (tlv_len < sizeof(uint32_t)) {
1049 				err = EINVAL;
1050 				goto parse_out;
1051 			}
1052 			/*
1053 			 * Apparently there can be many flags, but Linux driver
1054 			 * parses only the first one, and so do we.
1055 			 *
1056 			 * XXX: why does this override IWX_UCODE_TLV_PAN?
1057 			 * Intentional or a bug?  Observations from
1058 			 * current firmware file:
1059 			 *  1) TLV_PAN is parsed first
1060 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
1061 			 * ==> this resets TLV_PAN to itself... hnnnk
1062 			 */
1063 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
1064 			break;
1065 		case IWX_UCODE_TLV_CSCHEME:
1066 			err = iwx_store_cscheme(sc, tlv_data, tlv_len);
1067 			if (err)
1068 				goto parse_out;
1069 			break;
1070 		case IWX_UCODE_TLV_NUM_OF_CPU: {
1071 			uint32_t num_cpu;
1072 			if (tlv_len != sizeof(uint32_t)) {
1073 				err = EINVAL;
1074 				goto parse_out;
1075 			}
1076 			num_cpu = le32toh(*(uint32_t *)tlv_data);
1077 			if (num_cpu < 1 || num_cpu > 2) {
1078 				err = EINVAL;
1079 				goto parse_out;
1080 			}
1081 			break;
1082 		}
1083 		case IWX_UCODE_TLV_SEC_RT:
1084 			err = iwx_firmware_store_section(sc,
1085 			    IWX_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
1086 			if (err)
1087 				goto parse_out;
1088 			break;
1089 		case IWX_UCODE_TLV_SEC_INIT:
1090 			err = iwx_firmware_store_section(sc,
1091 			    IWX_UCODE_TYPE_INIT, tlv_data, tlv_len);
1092 			if (err)
1093 				goto parse_out;
1094 			break;
1095 		case IWX_UCODE_TLV_SEC_WOWLAN:
1096 			err = iwx_firmware_store_section(sc,
1097 			    IWX_UCODE_TYPE_WOW, tlv_data, tlv_len);
1098 			if (err)
1099 				goto parse_out;
1100 			break;
1101 		case IWX_UCODE_TLV_DEF_CALIB:
1102 			if (tlv_len != sizeof(struct iwx_tlv_calib_data)) {
1103 				err = EINVAL;
1104 				goto parse_out;
1105 			}
1106 			err = iwx_set_default_calib(sc, tlv_data);
1107 			if (err)
1108 				goto parse_out;
1109 			break;
1110 		case IWX_UCODE_TLV_PHY_SKU:
1111 			if (tlv_len != sizeof(uint32_t)) {
1112 				err = EINVAL;
1113 				goto parse_out;
1114 			}
1115 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
1116 			break;
1117 
1118 		case IWX_UCODE_TLV_API_CHANGES_SET: {
1119 			struct iwx_ucode_api *api;
1120 			int idx, i;
1121 			if (tlv_len != sizeof(*api)) {
1122 				err = EINVAL;
1123 				goto parse_out;
1124 			}
1125 			api = (struct iwx_ucode_api *)tlv_data;
1126 			idx = le32toh(api->api_index);
1127 			if (idx >= howmany(IWX_NUM_UCODE_TLV_API, 32)) {
1128 				err = EINVAL;
1129 				goto parse_out;
1130 			}
1131 			for (i = 0; i < 32; i++) {
1132 				if ((le32toh(api->api_flags) & (1 << i)) == 0)
1133 					continue;
1134 				setbit(sc->sc_ucode_api, i + (32 * idx));
1135 			}
1136 			break;
1137 		}
1138 
1139 		case IWX_UCODE_TLV_ENABLED_CAPABILITIES: {
1140 			struct iwx_ucode_capa *capa;
1141 			int idx, i;
1142 			if (tlv_len != sizeof(*capa)) {
1143 				err = EINVAL;
1144 				goto parse_out;
1145 			}
1146 			capa = (struct iwx_ucode_capa *)tlv_data;
1147 			idx = le32toh(capa->api_index);
1148 			if (idx >= howmany(IWX_NUM_UCODE_TLV_CAPA, 32)) {
1149 				goto parse_out;
1150 			}
1151 			for (i = 0; i < 32; i++) {
1152 				if ((le32toh(capa->api_capa) & (1 << i)) == 0)
1153 					continue;
1154 				setbit(sc->sc_enabled_capa, i + (32 * idx));
1155 			}
1156 			break;
1157 		}
1158 
1159 		case IWX_UCODE_TLV_SDIO_ADMA_ADDR:
1160 		case IWX_UCODE_TLV_FW_GSCAN_CAPA:
1161 			/* ignore, not used by current driver */
1162 			break;
1163 
1164 		case IWX_UCODE_TLV_SEC_RT_USNIFFER:
1165 			err = iwx_firmware_store_section(sc,
1166 			    IWX_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
1167 			    tlv_len);
1168 			if (err)
1169 				goto parse_out;
1170 			break;
1171 
1172 		case IWX_UCODE_TLV_PAGING:
1173 			if (tlv_len != sizeof(uint32_t)) {
1174 				err = EINVAL;
1175 				goto parse_out;
1176 			}
1177 			break;
1178 
1179 		case IWX_UCODE_TLV_N_SCAN_CHANNELS:
1180 			if (tlv_len != sizeof(uint32_t)) {
1181 				err = EINVAL;
1182 				goto parse_out;
1183 			}
1184 			sc->sc_capa_n_scan_channels =
1185 			  le32toh(*(uint32_t *)tlv_data);
1186 			if (sc->sc_capa_n_scan_channels > IWX_MAX_SCAN_CHANNELS) {
1187 				err = ERANGE;
1188 				goto parse_out;
1189 			}
1190 			break;
1191 
1192 		case IWX_UCODE_TLV_FW_VERSION:
1193 			if (tlv_len != sizeof(uint32_t) * 3) {
1194 				err = EINVAL;
1195 				goto parse_out;
1196 			}
1197 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
1198 			    "%u.%u.%u",
1199 			    le32toh(((uint32_t *)tlv_data)[0]),
1200 			    le32toh(((uint32_t *)tlv_data)[1]),
1201 			    le32toh(((uint32_t *)tlv_data)[2]));
1202 			break;
1203 
1204 		case IWX_UCODE_TLV_FW_DBG_DEST: {
1205 			struct iwx_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
1206 
1207 			fw->dbg_dest_ver = (uint8_t *)tlv_data;
1208 			if (*fw->dbg_dest_ver != 0) {
1209 				err = EINVAL;
1210 				goto parse_out;
1211 			}
1212 
1213 			if (fw->dbg_dest_tlv_init)
1214 				break;
1215 			fw->dbg_dest_tlv_init = true;
1216 
1217 			dest_v1 = (void *)tlv_data;
1218 			fw->dbg_dest_tlv_v1 = dest_v1;
1219 			fw->n_dest_reg = tlv_len -
1220 			    offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops);
1221 			fw->n_dest_reg /= sizeof(dest_v1->reg_ops[0]);
1222 			DPRINTF(("%s: found debug dest; n_dest_reg=%d\n", __func__, fw->n_dest_reg));
1223 			break;
1224 		}
1225 
1226 		case IWX_UCODE_TLV_FW_DBG_CONF: {
1227 			struct iwx_fw_dbg_conf_tlv *conf = (void *)tlv_data;
1228 
1229 			if (!fw->dbg_dest_tlv_init ||
1230 			    conf->id >= nitems(fw->dbg_conf_tlv) ||
1231 			    fw->dbg_conf_tlv[conf->id] != NULL)
1232 				break;
1233 
1234 			DPRINTF(("Found debug configuration: %d\n", conf->id));
1235 			fw->dbg_conf_tlv[conf->id] = conf;
1236 			fw->dbg_conf_tlv_len[conf->id] = tlv_len;
1237 			break;
1238 		}
1239 
1240 		case IWX_UCODE_TLV_UMAC_DEBUG_ADDRS: {
1241 			struct iwx_umac_debug_addrs *dbg_ptrs =
1242 				(void *)tlv_data;
1243 
1244 			if (tlv_len != sizeof(*dbg_ptrs)) {
1245 				err = EINVAL;
1246 				goto parse_out;
1247 			}
1248 			if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1249 				break;
1250 			sc->sc_uc.uc_umac_error_event_table =
1251 				le32toh(dbg_ptrs->error_info_addr) &
1252 				~IWX_FW_ADDR_CACHE_CONTROL;
1253 			sc->sc_uc.error_event_table_tlv_status |=
1254 				IWX_ERROR_EVENT_TABLE_UMAC;
1255 			break;
1256 		}
1257 
1258 		case IWX_UCODE_TLV_LMAC_DEBUG_ADDRS: {
1259 			struct iwx_lmac_debug_addrs *dbg_ptrs =
1260 				(void *)tlv_data;
1261 
1262 			if (tlv_len != sizeof(*dbg_ptrs)) {
1263 				err = EINVAL;
1264 				goto parse_out;
1265 			}
1266 			if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1267 				break;
1268 			sc->sc_uc.uc_lmac_error_event_table[0] =
1269 				le32toh(dbg_ptrs->error_event_table_ptr) &
1270 				~IWX_FW_ADDR_CACHE_CONTROL;
1271 			sc->sc_uc.error_event_table_tlv_status |=
1272 				IWX_ERROR_EVENT_TABLE_LMAC1;
1273 			break;
1274 		}
1275 
1276 		case IWX_UCODE_TLV_FW_MEM_SEG:
1277 			break;
1278 
1279 		case IWX_UCODE_TLV_CMD_VERSIONS:
1280 			if (tlv_len % sizeof(struct iwx_fw_cmd_version)) {
1281 				tlv_len /= sizeof(struct iwx_fw_cmd_version);
1282 				tlv_len *= sizeof(struct iwx_fw_cmd_version);
1283 			}
1284 			if (sc->n_cmd_versions != 0) {
1285 				err = EINVAL;
1286 				goto parse_out;
1287 			}
1288 			if (tlv_len > sizeof(sc->cmd_versions)) {
1289 				err = EINVAL;
1290 				goto parse_out;
1291 			}
1292 			memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
1293 			sc->n_cmd_versions = tlv_len / sizeof(struct iwx_fw_cmd_version);
1294 			break;
1295 
1296 		case IWX_UCODE_TLV_FW_RECOVERY_INFO:
1297 			break;
1298 
1299 		case IWX_UCODE_TLV_FW_FSEQ_VERSION:
1300 			break;
1301 
1302 		/* undocumented TLVs found in iwx-cc-a0-46 image */
1303 		case 58:
1304 		case 0x1000003:
1305 		case 0x1000004:
1306 			break;
1307 
1308 		/* undocumented TLVs found in iwx-cc-a0-48 image */
1309 		case 0x1000000:
1310 		case 0x1000002:
1311 			break;
1312 
1313 		default:
1314 			err = EINVAL;
1315 			goto parse_out;
1316 		}
1317 
1318 		len -= roundup(tlv_len, 4);
1319 		data += roundup(tlv_len, 4);
1320 	}
1321 
1322 	KASSERT(err == 0);
1323 
1324  parse_out:
1325 	if (err) {
1326 		printf("%s: firmware parse error %d, "
1327 		    "section type %d\n", DEVNAME(sc), err, tlv_type);
1328 	}
1329 
1330  out:
1331 	if (err) {
1332 		fw->fw_status = IWX_FW_STATUS_NONE;
1333 		if (fw->fw_rawdata != NULL)
1334 			iwx_fw_info_free(fw);
1335 	} else
1336 		fw->fw_status = IWX_FW_STATUS_DONE;
1337 	wakeup(&sc->sc_fw);
1338 
1339 	return err;
1340 }
1341 
1342 uint32_t
1343 iwx_read_prph(struct iwx_softc *sc, uint32_t addr)
1344 {
1345 	iwx_nic_assert_locked(sc);
1346 	IWX_WRITE(sc,
1347 	    IWX_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
1348 	IWX_BARRIER_READ_WRITE(sc);
1349 	return IWX_READ(sc, IWX_HBUS_TARG_PRPH_RDAT);
1350 }
1351 
1352 void
1353 iwx_write_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1354 {
1355 	iwx_nic_assert_locked(sc);
1356 	IWX_WRITE(sc,
1357 	    IWX_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
1358 	IWX_BARRIER_WRITE(sc);
1359 	IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WDAT, val);
1360 }
1361 
1362 void
1363 iwx_write_prph64(struct iwx_softc *sc, uint64_t addr, uint64_t val)
1364 {
1365 	iwx_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
1366 	iwx_write_prph(sc, (uint32_t)addr + 4, val >> 32);
1367 }
1368 
1369 int
1370 iwx_read_mem(struct iwx_softc *sc, uint32_t addr, void *buf, int dwords)
1371 {
1372 	int offs, err = 0;
1373 	uint32_t *vals = buf;
1374 
1375 	if (iwx_nic_lock(sc)) {
1376 		IWX_WRITE(sc, IWX_HBUS_TARG_MEM_RADDR, addr);
1377 		for (offs = 0; offs < dwords; offs++)
1378 			vals[offs] = le32toh(IWX_READ(sc, IWX_HBUS_TARG_MEM_RDAT));
1379 		iwx_nic_unlock(sc);
1380 	} else {
1381 		err = EBUSY;
1382 	}
1383 	return err;
1384 }
1385 
1386 int
1387 iwx_write_mem(struct iwx_softc *sc, uint32_t addr, const void *buf, int dwords)
1388 {
1389 	int offs;
1390 	const uint32_t *vals = buf;
1391 
1392 	if (iwx_nic_lock(sc)) {
1393 		IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WADDR, addr);
1394 		/* WADDR auto-increments */
1395 		for (offs = 0; offs < dwords; offs++) {
1396 			uint32_t val = vals ? vals[offs] : 0;
1397 			IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WDAT, val);
1398 		}
1399 		iwx_nic_unlock(sc);
1400 	} else {
1401 		return EBUSY;
1402 	}
1403 	return 0;
1404 }
1405 
1406 int
1407 iwx_write_mem32(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1408 {
1409 	return iwx_write_mem(sc, addr, &val, 1);
1410 }
1411 
1412 int
1413 iwx_poll_bit(struct iwx_softc *sc, int reg, uint32_t bits, uint32_t mask,
1414     int timo)
1415 {
1416 	for (;;) {
1417 		if ((IWX_READ(sc, reg) & mask) == (bits & mask)) {
1418 			return 1;
1419 		}
1420 		if (timo < 10) {
1421 			return 0;
1422 		}
1423 		timo -= 10;
1424 		DELAY(10);
1425 	}
1426 }
1427 
1428 int
1429 iwx_nic_lock(struct iwx_softc *sc)
1430 {
1431 	if (sc->sc_nic_locks > 0) {
1432 		iwx_nic_assert_locked(sc);
1433 		sc->sc_nic_locks++;
1434 		return 1; /* already locked */
1435 	}
1436 
1437 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
1438 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1439 
1440 	DELAY(2);
1441 
1442 	if (iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
1443 	    IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1444 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1445 	     | IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1446 		sc->sc_nic_locks++;
1447 		return 1;
1448 	}
1449 
1450 	printf("%s: acquiring device failed\n", DEVNAME(sc));
1451 	return 0;
1452 }
1453 
1454 void
1455 iwx_nic_assert_locked(struct iwx_softc *sc)
1456 {
1457 	uint32_t reg = IWX_READ(sc, IWX_CSR_GP_CNTRL);
1458 	if ((reg & IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) == 0)
1459 		panic("%s: mac clock not ready", DEVNAME(sc));
1460 	if (reg & IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)
1461 		panic("%s: mac gone to sleep", DEVNAME(sc));
1462 	if (sc->sc_nic_locks <= 0)
1463 		panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1464 }
1465 
1466 void
1467 iwx_nic_unlock(struct iwx_softc *sc)
1468 {
1469 	if (sc->sc_nic_locks > 0) {
1470 		if (--sc->sc_nic_locks == 0)
1471 			IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
1472 			    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1473 	} else
1474 		printf("%s: NIC already unlocked\n", DEVNAME(sc));
1475 }
1476 
1477 void
1478 iwx_set_bits_mask_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits,
1479     uint32_t mask)
1480 {
1481 	uint32_t val;
1482 
1483 	/* XXX: no error path? */
1484 	if (iwx_nic_lock(sc)) {
1485 		val = iwx_read_prph(sc, reg) & mask;
1486 		val |= bits;
1487 		iwx_write_prph(sc, reg, val);
1488 		iwx_nic_unlock(sc);
1489 	}
1490 }
1491 
1492 void
1493 iwx_set_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1494 {
1495 	iwx_set_bits_mask_prph(sc, reg, bits, ~0);
1496 }
1497 
1498 void
1499 iwx_clear_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1500 {
1501 	iwx_set_bits_mask_prph(sc, reg, 0, ~bits);
1502 }
1503 
1504 int
1505 iwx_dma_contig_alloc(bus_dma_tag_t tag, struct iwx_dma_info *dma,
1506     bus_size_t size, bus_size_t alignment)
1507 {
1508 	int nsegs, err;
1509 	caddr_t va;
1510 
1511 	dma->tag = tag;
1512 	dma->size = size;
1513 
1514 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1515 	    &dma->map);
1516 	if (err)
1517 		goto fail;
1518 
1519 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1520 	    BUS_DMA_NOWAIT);
1521 	if (err)
1522 		goto fail;
1523 
1524 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
1525 	    BUS_DMA_NOWAIT);
1526 	if (err)
1527 		goto fail;
1528 	dma->vaddr = va;
1529 
1530 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1531 	    BUS_DMA_NOWAIT);
1532 	if (err)
1533 		goto fail;
1534 
1535 	memset(dma->vaddr, 0, size);
1536 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1537 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1538 
1539 	return 0;
1540 
1541 fail:	iwx_dma_contig_free(dma);
1542 	return err;
1543 }
1544 
1545 void
1546 iwx_dma_contig_free(struct iwx_dma_info *dma)
1547 {
1548 	if (dma->map != NULL) {
1549 		if (dma->vaddr != NULL) {
1550 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1551 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1552 			bus_dmamap_unload(dma->tag, dma->map);
1553 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1554 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1555 			dma->vaddr = NULL;
1556 		}
1557 		bus_dmamap_destroy(dma->tag, dma->map);
1558 		dma->map = NULL;
1559 	}
1560 }
1561 
1562 int
1563 iwx_alloc_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1564 {
1565 	bus_size_t size;
1566 	int i, err;
1567 
1568 	ring->cur = 0;
1569 
1570 	/* Allocate RX descriptors (256-byte aligned). */
1571 	size = IWX_RX_MQ_RING_COUNT * sizeof(uint64_t);
1572 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 256);
1573 	if (err) {
1574 		printf("%s: could not allocate RX ring DMA memory\n",
1575 		    DEVNAME(sc));
1576 		goto fail;
1577 	}
1578 	ring->desc = ring->free_desc_dma.vaddr;
1579 
1580 	/* Allocate RX status area (16-byte aligned). */
1581 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1582 	    sizeof(*ring->stat), 16);
1583 	if (err) {
1584 		printf("%s: could not allocate RX status DMA memory\n",
1585 		    DEVNAME(sc));
1586 		goto fail;
1587 	}
1588 	ring->stat = ring->stat_dma.vaddr;
1589 
1590 	size = IWX_RX_MQ_RING_COUNT * sizeof(uint32_t);
1591 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1592 	    size, 256);
1593 	if (err) {
1594 		printf("%s: could not allocate RX ring DMA memory\n",
1595 		    DEVNAME(sc));
1596 		goto fail;
1597 	}
1598 
1599 	for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
1600 		struct iwx_rx_data *data = &ring->data[i];
1601 
1602 		memset(data, 0, sizeof(*data));
1603 		err = bus_dmamap_create(sc->sc_dmat, IWX_RBUF_SIZE, 1,
1604 		    IWX_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1605 		    &data->map);
1606 		if (err) {
1607 			printf("%s: could not create RX buf DMA map\n",
1608 			    DEVNAME(sc));
1609 			goto fail;
1610 		}
1611 
1612 		err = iwx_rx_addbuf(sc, IWX_RBUF_SIZE, i);
1613 		if (err)
1614 			goto fail;
1615 	}
1616 	return 0;
1617 
1618 fail:	iwx_free_rx_ring(sc, ring);
1619 	return err;
1620 }
1621 
1622 void
1623 iwx_disable_rx_dma(struct iwx_softc *sc)
1624 {
1625 	int ntries;
1626 
1627 	if (iwx_nic_lock(sc)) {
1628 		iwx_write_prph(sc, IWX_RFH_RXF_DMA_CFG, 0);
1629 		for (ntries = 0; ntries < 1000; ntries++) {
1630 			if (iwx_read_prph(sc, IWX_RFH_GEN_STATUS) &
1631 			    IWX_RXF_DMA_IDLE)
1632 				break;
1633 			DELAY(10);
1634 		}
1635 		iwx_nic_unlock(sc);
1636 	}
1637 }
1638 
1639 void
1640 iwx_reset_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1641 {
1642 	ring->cur = 0;
1643 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1644 	    ring->stat_dma.size, BUS_DMASYNC_PREWRITE);
1645 	memset(ring->stat, 0, sizeof(*ring->stat));
1646 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1647 	    ring->stat_dma.size, BUS_DMASYNC_POSTWRITE);
1648 
1649 }
1650 
1651 void
1652 iwx_free_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1653 {
1654 	int i;
1655 
1656 	iwx_dma_contig_free(&ring->free_desc_dma);
1657 	iwx_dma_contig_free(&ring->stat_dma);
1658 	iwx_dma_contig_free(&ring->used_desc_dma);
1659 
1660 	for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
1661 		struct iwx_rx_data *data = &ring->data[i];
1662 
1663 		if (data->m != NULL) {
1664 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1665 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1666 			bus_dmamap_unload(sc->sc_dmat, data->map);
1667 			m_freem(data->m);
1668 			data->m = NULL;
1669 		}
1670 		if (data->map != NULL)
1671 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1672 	}
1673 }
1674 
1675 int
1676 iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid)
1677 {
1678 	bus_addr_t paddr;
1679 	bus_size_t size;
1680 	int i, err;
1681 
1682 	ring->qid = qid;
1683 	ring->queued = 0;
1684 	ring->cur = 0;
1685 	ring->tail = 0;
1686 
1687 	/* Allocate TX descriptors (256-byte aligned). */
1688 	size = IWX_TX_RING_COUNT * sizeof(struct iwx_tfh_tfd);
1689 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1690 	if (err) {
1691 		printf("%s: could not allocate TX ring DMA memory\n",
1692 		    DEVNAME(sc));
1693 		goto fail;
1694 	}
1695 	ring->desc = ring->desc_dma.vaddr;
1696 
1697 	/*
1698 	 * There is no need to allocate DMA buffers for unused rings.
1699 	 * The hardware supports up to 31 Tx rings which is more
1700 	 * than we currently need.
1701 	 *
1702 	 * In DQA mode we use 1 command queue + 4 DQA mgmt/data queues.
1703 	 * The command is queue 0 (sc->txq[0]), and 4 mgmt/data frame queues
1704 	 * are sc->tqx[ac + IWX_DQA_AUX_QUEUE + 1], i.e. sc->txq[2:5],
1705 	 * in order to provide one queue per EDCA category.
1706 	 *
1707 	 * Tx aggregation will require additional queues (one queue per TID
1708 	 * for which aggregation is enabled) but we do not implement this yet.
1709 	 */
1710 	if (qid > IWX_DQA_MIN_MGMT_QUEUE)
1711 		return 0;
1712 
1713 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl,
1714 	    sizeof(struct iwx_agn_scd_bc_tbl), 0);
1715 	if (err) {
1716 		printf("%s: could not allocate byte count table DMA memory\n",
1717 		    DEVNAME(sc));
1718 		goto fail;
1719 	}
1720 
1721 	size = IWX_TX_RING_COUNT * sizeof(struct iwx_device_cmd);
1722 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size,
1723 	    IWX_FIRST_TB_SIZE_ALIGN);
1724 	if (err) {
1725 		printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc));
1726 		goto fail;
1727 	}
1728 	ring->cmd = ring->cmd_dma.vaddr;
1729 
1730 	paddr = ring->cmd_dma.paddr;
1731 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
1732 		struct iwx_tx_data *data = &ring->data[i];
1733 		size_t mapsize;
1734 
1735 		data->cmd_paddr = paddr;
1736 		paddr += sizeof(struct iwx_device_cmd);
1737 
1738 		/* FW commands may require more mapped space than packets. */
1739 		if (qid == IWX_DQA_CMD_QUEUE)
1740 			mapsize = (sizeof(struct iwx_cmd_header) +
1741 			    IWX_MAX_CMD_PAYLOAD_SIZE);
1742 		else
1743 			mapsize = MCLBYTES;
1744 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
1745 		    IWX_TFH_NUM_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,
1746 		    &data->map);
1747 		if (err) {
1748 			printf("%s: could not create TX buf DMA map\n",
1749 			    DEVNAME(sc));
1750 			goto fail;
1751 		}
1752 	}
1753 	KASSERT(paddr == ring->cmd_dma.paddr + size);
1754 	return 0;
1755 
1756 fail:	iwx_free_tx_ring(sc, ring);
1757 	return err;
1758 }
1759 
1760 void
1761 iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
1762 {
1763 	int i;
1764 
1765 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
1766 		struct iwx_tx_data *data = &ring->data[i];
1767 
1768 		if (data->m != NULL) {
1769 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1770 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1771 			bus_dmamap_unload(sc->sc_dmat, data->map);
1772 			m_freem(data->m);
1773 			data->m = NULL;
1774 		}
1775 	}
1776 
1777 	/* Clear byte count table. */
1778 	memset(ring->bc_tbl.vaddr, 0, ring->bc_tbl.size);
1779 
1780 	/* Clear TX descriptors. */
1781 	memset(ring->desc, 0, ring->desc_dma.size);
1782 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1783 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1784 	sc->qfullmsk &= ~(1 << ring->qid);
1785 	ring->queued = 0;
1786 	ring->cur = 0;
1787 	ring->tail = 0;
1788 }
1789 
1790 void
1791 iwx_free_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
1792 {
1793 	int i;
1794 
1795 	iwx_dma_contig_free(&ring->desc_dma);
1796 	iwx_dma_contig_free(&ring->cmd_dma);
1797 	iwx_dma_contig_free(&ring->bc_tbl);
1798 
1799 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
1800 		struct iwx_tx_data *data = &ring->data[i];
1801 
1802 		if (data->m != NULL) {
1803 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1804 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1805 			bus_dmamap_unload(sc->sc_dmat, data->map);
1806 			m_freem(data->m);
1807 			data->m = NULL;
1808 		}
1809 		if (data->map != NULL)
1810 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1811 	}
1812 }
1813 
1814 void
1815 iwx_enable_rfkill_int(struct iwx_softc *sc)
1816 {
1817 	if (!sc->sc_msix) {
1818 		sc->sc_intmask = IWX_CSR_INT_BIT_RF_KILL;
1819 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
1820 	} else {
1821 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
1822 		    sc->sc_fh_init_mask);
1823 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
1824 		    ~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL);
1825 		sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL;
1826 	}
1827 
1828 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
1829 	    IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
1830 }
1831 
1832 int
1833 iwx_check_rfkill(struct iwx_softc *sc)
1834 {
1835 	uint32_t v;
1836 	int s;
1837 	int rv;
1838 
1839 	s = splnet();
1840 
1841 	/*
1842 	 * "documentation" is not really helpful here:
1843 	 *  27:	HW_RF_KILL_SW
1844 	 *	Indicates state of (platform's) hardware RF-Kill switch
1845 	 *
1846 	 * But apparently when it's off, it's on ...
1847 	 */
1848 	v = IWX_READ(sc, IWX_CSR_GP_CNTRL);
1849 	rv = (v & IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1850 	if (rv) {
1851 		sc->sc_flags |= IWX_FLAG_RFKILL;
1852 	} else {
1853 		sc->sc_flags &= ~IWX_FLAG_RFKILL;
1854 	}
1855 
1856 	splx(s);
1857 	return rv;
1858 }
1859 
1860 void
1861 iwx_enable_interrupts(struct iwx_softc *sc)
1862 {
1863 	if (!sc->sc_msix) {
1864 		sc->sc_intmask = IWX_CSR_INI_SET_MASK;
1865 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
1866 	} else {
1867 		/*
1868 		 * fh/hw_mask keeps all the unmasked causes.
1869 		 * Unlike msi, in msix cause is enabled when it is unset.
1870 		 */
1871 		sc->sc_hw_mask = sc->sc_hw_init_mask;
1872 		sc->sc_fh_mask = sc->sc_fh_init_mask;
1873 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
1874 		    ~sc->sc_fh_mask);
1875 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
1876 		    ~sc->sc_hw_mask);
1877 	}
1878 }
1879 
1880 void
1881 iwx_enable_fwload_interrupt(struct iwx_softc *sc)
1882 {
1883 	if (!sc->sc_msix) {
1884 		sc->sc_intmask = IWX_CSR_INT_BIT_ALIVE | IWX_CSR_INT_BIT_FH_RX;
1885 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
1886 	} else {
1887 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
1888 		    ~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE);
1889 		sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_ALIVE;
1890 		/*
1891 		 * Leave all the FH causes enabled to get the ALIVE
1892 		 * notification.
1893 		 */
1894 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
1895 		    ~sc->sc_fh_init_mask);
1896 		sc->sc_fh_mask = sc->sc_fh_init_mask;
1897 	}
1898 }
1899 
1900 void
1901 iwx_restore_interrupts(struct iwx_softc *sc)
1902 {
1903 	IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
1904 }
1905 
1906 void
1907 iwx_disable_interrupts(struct iwx_softc *sc)
1908 {
1909 	int s = splnet();
1910 
1911 	if (!sc->sc_msix) {
1912 		IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
1913 
1914 		/* acknowledge all interrupts */
1915 		IWX_WRITE(sc, IWX_CSR_INT, ~0);
1916 		IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
1917 	} else {
1918 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
1919 		    sc->sc_fh_init_mask);
1920 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
1921 		    sc->sc_hw_init_mask);
1922 	}
1923 
1924 	splx(s);
1925 }
1926 
1927 void
1928 iwx_ict_reset(struct iwx_softc *sc)
1929 {
1930 	iwx_disable_interrupts(sc);
1931 
1932 	memset(sc->ict_dma.vaddr, 0, IWX_ICT_SIZE);
1933 	sc->ict_cur = 0;
1934 
1935 	/* Set physical address of ICT (4KB aligned). */
1936 	IWX_WRITE(sc, IWX_CSR_DRAM_INT_TBL_REG,
1937 	    IWX_CSR_DRAM_INT_TBL_ENABLE
1938 	    | IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK
1939 	    | IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER
1940 	    | sc->ict_dma.paddr >> IWX_ICT_PADDR_SHIFT);
1941 
1942 	/* Switch to ICT interrupt mode in driver. */
1943 	sc->sc_flags |= IWX_FLAG_USE_ICT;
1944 
1945 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
1946 	iwx_enable_interrupts(sc);
1947 }
1948 
1949 #define IWX_HW_READY_TIMEOUT 50
1950 int
1951 iwx_set_hw_ready(struct iwx_softc *sc)
1952 {
1953 	int ready;
1954 
1955 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
1956 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1957 
1958 	ready = iwx_poll_bit(sc, IWX_CSR_HW_IF_CONFIG_REG,
1959 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1960 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1961 	    IWX_HW_READY_TIMEOUT);
1962 	if (ready)
1963 		IWX_SETBITS(sc, IWX_CSR_MBOX_SET_REG,
1964 		    IWX_CSR_MBOX_SET_REG_OS_ALIVE);
1965 
1966 	return ready;
1967 }
1968 #undef IWX_HW_READY_TIMEOUT
1969 
1970 int
1971 iwx_prepare_card_hw(struct iwx_softc *sc)
1972 {
1973 	int t = 0;
1974 
1975 	if (iwx_set_hw_ready(sc))
1976 		return 0;
1977 
1978 	IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
1979 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1980 	DELAY(1000);
1981 
1982 
1983 	/* If HW is not ready, prepare the conditions to check again */
1984 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
1985 	    IWX_CSR_HW_IF_CONFIG_REG_PREPARE);
1986 
1987 	do {
1988 		if (iwx_set_hw_ready(sc))
1989 			return 0;
1990 		DELAY(200);
1991 		t += 200;
1992 	} while (t < 150000);
1993 
1994 	return ETIMEDOUT;
1995 }
1996 
1997 void
1998 iwx_force_power_gating(struct iwx_softc *sc)
1999 {
2000 	iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2001 	    IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2002 	DELAY(20);
2003 	iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2004 	    IWX_HPM_HIPM_GEN_CFG_CR_PG_EN |
2005 	    IWX_HPM_HIPM_GEN_CFG_CR_SLP_EN);
2006 	DELAY(20);
2007 	iwx_clear_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2008 	    IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2009 }
2010 
2011 void
2012 iwx_apm_config(struct iwx_softc *sc)
2013 {
2014 	pcireg_t lctl, cap;
2015 
2016 	/*
2017 	 * L0S states have been found to be unstable with our devices
2018 	 * and in newer hardware they are not officially supported at
2019 	 * all, so we must always set the L0S_DISABLED bit.
2020 	 */
2021 	IWX_SETBITS(sc, IWX_CSR_GIO_REG, IWX_CSR_GIO_REG_VAL_L0S_DISABLED);
2022 
2023 	lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
2024 	    sc->sc_cap_off + PCI_PCIE_LCSR);
2025 	sc->sc_pm_support = !(lctl & PCI_PCIE_LCSR_ASPM_L0S);
2026 	cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
2027 	    sc->sc_cap_off + PCI_PCIE_DCSR2);
2028 	sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
2029 	DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
2030 	    DEVNAME(sc),
2031 	    (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
2032 	    sc->sc_ltr_enabled ? "En" : "Dis"));
2033 }
2034 
2035 /*
2036  * Start up NIC's basic functionality after it has been reset
2037  * e.g. after platform boot or shutdown.
2038  * NOTE:  This does not load uCode nor start the embedded processor
2039  */
2040 int
2041 iwx_apm_init(struct iwx_softc *sc)
2042 {
2043 	int err = 0;
2044 
2045 	/*
2046 	 * Disable L0s without affecting L1;
2047 	 *  don't wait for ICH L0s (ICH bug W/A)
2048 	 */
2049 	IWX_SETBITS(sc, IWX_CSR_GIO_CHICKEN_BITS,
2050 	    IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
2051 
2052 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
2053 	IWX_SETBITS(sc, IWX_CSR_DBG_HPET_MEM_REG, IWX_CSR_DBG_HPET_MEM_REG_VAL);
2054 
2055 	/*
2056 	 * Enable HAP INTA (interrupt from management bus) to
2057 	 * wake device's PCI Express link L1a -> L0s
2058 	 */
2059 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2060 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
2061 
2062 	iwx_apm_config(sc);
2063 
2064 	/*
2065 	 * Set "initialization complete" bit to move adapter from
2066 	 * D0U* --> D0A* (powered-up active) state.
2067 	 */
2068 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2069 
2070 	/*
2071 	 * Wait for clock stabilization; once stabilized, access to
2072 	 * device-internal resources is supported, e.g. iwx_write_prph()
2073 	 * and accesses to uCode SRAM.
2074 	 */
2075 	if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2076 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2077 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2078 		printf("%s: timeout waiting for clock stabilization\n",
2079 		    DEVNAME(sc));
2080 		err = ETIMEDOUT;
2081 		goto out;
2082 	}
2083  out:
2084 	if (err)
2085 		printf("%s: apm init error %d\n", DEVNAME(sc), err);
2086 	return err;
2087 }
2088 
2089 void
2090 iwx_apm_stop(struct iwx_softc *sc)
2091 {
2092 	IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2093 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2094 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2095 	    IWX_CSR_HW_IF_CONFIG_REG_PREPARE |
2096 	    IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
2097 	DELAY(1000);
2098 	IWX_CLRBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2099 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2100 	DELAY(5000);
2101 
2102 	/* stop device's busmaster DMA activity */
2103 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_STOP_MASTER);
2104 
2105 	if (!iwx_poll_bit(sc, IWX_CSR_RESET,
2106 	    IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED,
2107 	    IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
2108 		printf("%s: timeout waiting for master\n", DEVNAME(sc));
2109 
2110 	/*
2111 	 * Clear "initialization complete" bit to move adapter from
2112 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
2113 	 */
2114 	IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2115 	    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2116 }
2117 
2118 void
2119 iwx_init_msix_hw(struct iwx_softc *sc)
2120 {
2121 	iwx_conf_msix_hw(sc, 0);
2122 
2123 	if (!sc->sc_msix)
2124 		return;
2125 
2126 	sc->sc_fh_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_FH_INT_MASK_AD);
2127 	sc->sc_fh_mask = sc->sc_fh_init_mask;
2128 	sc->sc_hw_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_HW_INT_MASK_AD);
2129 	sc->sc_hw_mask = sc->sc_hw_init_mask;
2130 }
2131 
2132 void
2133 iwx_conf_msix_hw(struct iwx_softc *sc, int stopped)
2134 {
2135 	int vector = 0;
2136 
2137 	if (!sc->sc_msix) {
2138 		/* Newer chips default to MSIX. */
2139 		if (!stopped && iwx_nic_lock(sc)) {
2140 			iwx_write_prph(sc, IWX_UREG_CHICK,
2141 			    IWX_UREG_CHICK_MSI_ENABLE);
2142 			iwx_nic_unlock(sc);
2143 		}
2144 		return;
2145 	}
2146 
2147 	if (!stopped && iwx_nic_lock(sc)) {
2148 		iwx_write_prph(sc, IWX_UREG_CHICK, IWX_UREG_CHICK_MSIX_ENABLE);
2149 		iwx_nic_unlock(sc);
2150 	}
2151 
2152 	/* Disable all interrupts */
2153 	IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, ~0);
2154 	IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, ~0);
2155 
2156 	/* Map fallback-queue (command/mgmt) to a single vector */
2157 	IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(0),
2158 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2159 	/* Map RSS queue (data) to the same vector */
2160 	IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(1),
2161 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2162 
2163 	/* Enable the RX queues cause interrupts */
2164 	IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2165 	    IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1);
2166 
2167 	/* Map non-RX causes to the same vector */
2168 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
2169 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2170 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
2171 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2172 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_S2D),
2173 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2174 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_FH_ERR),
2175 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2176 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_ALIVE),
2177 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2178 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_WAKEUP),
2179 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2180 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_IML),
2181 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2182 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_CT_KILL),
2183 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2184 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RF_KILL),
2185 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2186 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_PERIODIC),
2187 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2188 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SW_ERR),
2189 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2190 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SCD),
2191 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2192 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_FH_TX),
2193 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2194 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HW_ERR),
2195 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2196 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HAP),
2197 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2198 
2199 	/* Enable non-RX causes interrupts */
2200 	IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2201 	    IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
2202 	    IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
2203 	    IWX_MSIX_FH_INT_CAUSES_S2D |
2204 	    IWX_MSIX_FH_INT_CAUSES_FH_ERR);
2205 	IWX_CLRBITS(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2206 	    IWX_MSIX_HW_INT_CAUSES_REG_ALIVE |
2207 	    IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP |
2208 	    IWX_MSIX_HW_INT_CAUSES_REG_IML |
2209 	    IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL |
2210 	    IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL |
2211 	    IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC |
2212 	    IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR |
2213 	    IWX_MSIX_HW_INT_CAUSES_REG_SCD |
2214 	    IWX_MSIX_HW_INT_CAUSES_REG_FH_TX |
2215 	    IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR |
2216 	    IWX_MSIX_HW_INT_CAUSES_REG_HAP);
2217 }
2218 
2219 int
2220 iwx_start_hw(struct iwx_softc *sc)
2221 {
2222 	int err;
2223 	int t = 0;
2224 
2225 	err = iwx_prepare_card_hw(sc);
2226 	if (err)
2227 		return err;
2228 
2229 	/* Reset the entire device */
2230 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2231 	DELAY(5000);
2232 
2233 	if (sc->sc_integrated) {
2234 		IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2235 		    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2236 		DELAY(20);
2237 		if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2238 		    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2239 		    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2240 			printf("%s: timeout waiting for clock stabilization\n",
2241 			    DEVNAME(sc));
2242 			return ETIMEDOUT;
2243 		}
2244 
2245 		iwx_force_power_gating(sc);
2246 
2247 		/* Reset the entire device */
2248 		IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2249 		DELAY(5000);
2250 	}
2251 
2252 	err = iwx_apm_init(sc);
2253 	if (err)
2254 		return err;
2255 
2256 	iwx_init_msix_hw(sc);
2257 
2258 	while (t < 150000 && !iwx_set_hw_ready(sc)) {
2259 		DELAY(200);
2260 		t += 200;
2261 		if (iwx_set_hw_ready(sc)) {
2262 			break;
2263 		}
2264 	}
2265 	if (t >= 150000)
2266 		return ETIMEDOUT;
2267 
2268 	iwx_enable_rfkill_int(sc);
2269 	iwx_check_rfkill(sc);
2270 
2271 	return 0;
2272 }
2273 
2274 void
2275 iwx_stop_device(struct iwx_softc *sc)
2276 {
2277 	int qid;
2278 
2279 	iwx_disable_interrupts(sc);
2280 	sc->sc_flags &= ~IWX_FLAG_USE_ICT;
2281 
2282 	iwx_disable_rx_dma(sc);
2283 	iwx_reset_rx_ring(sc, &sc->rxq);
2284 	for (qid = 0; qid < nitems(sc->txq); qid++)
2285 		iwx_reset_tx_ring(sc, &sc->txq[qid]);
2286 
2287 	/* Make sure (redundant) we've released our request to stay awake */
2288 	IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2289 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2290 	if (sc->sc_nic_locks > 0)
2291 		printf("%s: %d active NIC locks forcefully cleared\n",
2292 		    DEVNAME(sc), sc->sc_nic_locks);
2293 	sc->sc_nic_locks = 0;
2294 
2295 	/* Stop the device, and put it in low power state */
2296 	iwx_apm_stop(sc);
2297 
2298 	/* Reset the on-board processor. */
2299 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2300 	DELAY(5000);
2301 
2302 	/*
2303 	 * Upon stop, the IVAR table gets erased, so msi-x won't
2304 	 * work. This causes a bug in RF-KILL flows, since the interrupt
2305 	 * that enables radio won't fire on the correct irq, and the
2306 	 * driver won't be able to handle the interrupt.
2307 	 * Configure the IVAR table again after reset.
2308 	 */
2309 	iwx_conf_msix_hw(sc, 1);
2310 
2311 	/*
2312 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2313 	 * Clear the interrupt again.
2314 	 */
2315 	iwx_disable_interrupts(sc);
2316 
2317 	/* Even though we stop the HW we still want the RF kill interrupt. */
2318 	iwx_enable_rfkill_int(sc);
2319 	iwx_check_rfkill(sc);
2320 
2321 	iwx_prepare_card_hw(sc);
2322 
2323 	iwx_ctxt_info_free_paging(sc);
2324 }
2325 
2326 void
2327 iwx_nic_config(struct iwx_softc *sc)
2328 {
2329 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2330 	uint32_t mask, val, reg_val = 0;
2331 
2332 	radio_cfg_type = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_TYPE) >>
2333 	    IWX_FW_PHY_CFG_RADIO_TYPE_POS;
2334 	radio_cfg_step = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_STEP) >>
2335 	    IWX_FW_PHY_CFG_RADIO_STEP_POS;
2336 	radio_cfg_dash = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_DASH) >>
2337 	    IWX_FW_PHY_CFG_RADIO_DASH_POS;
2338 
2339 	reg_val |= IWX_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2340 	    IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2341 	reg_val |= IWX_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2342 	    IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2343 
2344 	/* radio configuration */
2345 	reg_val |= radio_cfg_type << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2346 	reg_val |= radio_cfg_step << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2347 	reg_val |= radio_cfg_dash << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2348 
2349 	mask = IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2350 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2351 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2352 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2353 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2354 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2355 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2356 
2357 	val = IWX_READ(sc, IWX_CSR_HW_IF_CONFIG_REG);
2358 	val &= ~mask;
2359 	val |= reg_val;
2360 	IWX_WRITE(sc, IWX_CSR_HW_IF_CONFIG_REG, val);
2361 }
2362 
2363 int
2364 iwx_nic_rx_init(struct iwx_softc *sc)
2365 {
2366 	IWX_WRITE_1(sc, IWX_CSR_INT_COALESCING, IWX_HOST_INT_TIMEOUT_DEF);
2367 
2368 	/*
2369 	 * We don't configure the RFH; the firmware will do that.
2370 	 * Rx descriptors are set when firmware sends an ALIVE interrupt.
2371 	 */
2372 	return 0;
2373 }
2374 
2375 int
2376 iwx_nic_init(struct iwx_softc *sc)
2377 {
2378 	int err;
2379 
2380 	iwx_apm_init(sc);
2381 	iwx_nic_config(sc);
2382 
2383 	err = iwx_nic_rx_init(sc);
2384 	if (err)
2385 		return err;
2386 
2387 	IWX_SETBITS(sc, IWX_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2388 
2389 	return 0;
2390 }
2391 
2392 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2393 const uint8_t iwx_ac_to_tx_fifo[] = {
2394 	IWX_GEN2_EDCA_TX_FIFO_BE,
2395 	IWX_GEN2_EDCA_TX_FIFO_BK,
2396 	IWX_GEN2_EDCA_TX_FIFO_VI,
2397 	IWX_GEN2_EDCA_TX_FIFO_VO,
2398 };
2399 
2400 int
2401 iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid,
2402     int num_slots)
2403 {
2404 	struct iwx_tx_queue_cfg_cmd cmd;
2405 	struct iwx_rx_packet *pkt;
2406 	struct iwx_tx_queue_cfg_rsp *resp;
2407 	struct iwx_host_cmd hcmd = {
2408 		.id = IWX_SCD_QUEUE_CFG,
2409 		.flags = IWX_CMD_WANT_RESP,
2410 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2411 	};
2412 	struct iwx_tx_ring *ring = &sc->txq[qid];
2413 	int err, fwqid;
2414 	uint32_t wr_idx;
2415 	size_t resp_len;
2416 
2417 	iwx_reset_tx_ring(sc, ring);
2418 
2419 	memset(&cmd, 0, sizeof(cmd));
2420 	cmd.sta_id = sta_id;
2421 	cmd.tid = tid;
2422 	cmd.flags = htole16(IWX_TX_QUEUE_CFG_ENABLE_QUEUE);
2423 	cmd.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2424 	cmd.byte_cnt_addr = htole64(ring->bc_tbl.paddr);
2425 	cmd.tfdq_addr = htole64(ring->desc_dma.paddr);
2426 
2427 	hcmd.data[0] = &cmd;
2428 	hcmd.len[0] = sizeof(cmd);
2429 
2430 	err = iwx_send_cmd(sc, &hcmd);
2431 	if (err)
2432 		return err;
2433 
2434 	pkt = hcmd.resp_pkt;
2435 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2436 		DPRINTF(("SCD_QUEUE_CFG command failed\n"));
2437 		err = EIO;
2438 		goto out;
2439 	}
2440 
2441 	resp_len = iwx_rx_packet_payload_len(pkt);
2442 	if (resp_len != sizeof(*resp)) {
2443 		DPRINTF(("SCD_QUEUE_CFG returned %zu bytes, expected %zu bytes\n", resp_len, sizeof(*resp)));
2444 		err = EIO;
2445 		goto out;
2446 	}
2447 
2448 	resp = (void *)pkt->data;
2449 	fwqid = le16toh(resp->queue_number);
2450 	wr_idx = le16toh(resp->write_pointer);
2451 
2452 	/* Unlike iwlwifi, we do not support dynamic queue ID assignment. */
2453 	if (fwqid != qid) {
2454 		DPRINTF(("requested qid %d but %d was assigned\n", qid, fwqid));
2455 		err = EIO;
2456 		goto out;
2457 	}
2458 
2459 	if (wr_idx != ring->cur) {
2460 		DPRINTF(("fw write index is %d but ring is %d\n", wr_idx, ring->cur));
2461 		err = EIO;
2462 		goto out;
2463 	}
2464 out:
2465 	iwx_free_resp(sc, &hcmd);
2466 	return err;
2467 }
2468 
2469 void
2470 iwx_post_alive(struct iwx_softc *sc)
2471 {
2472 	iwx_ict_reset(sc);
2473 }
2474 
2475 /*
2476  * For the high priority TE use a time event type that has similar priority to
2477  * the FW's action scan priority.
2478  */
2479 #define IWX_ROC_TE_TYPE_NORMAL IWX_TE_P2P_DEVICE_DISCOVERABLE
2480 #define IWX_ROC_TE_TYPE_MGMT_TX IWX_TE_P2P_CLIENT_ASSOC
2481 
2482 int
2483 iwx_send_time_event_cmd(struct iwx_softc *sc,
2484     const struct iwx_time_event_cmd *cmd)
2485 {
2486 	struct iwx_rx_packet *pkt;
2487 	struct iwx_time_event_resp *resp;
2488 	struct iwx_host_cmd hcmd = {
2489 		.id = IWX_TIME_EVENT_CMD,
2490 		.flags = IWX_CMD_WANT_RESP,
2491 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2492 	};
2493 	uint32_t resp_len;
2494 	int err;
2495 
2496 	hcmd.data[0] = cmd;
2497 	hcmd.len[0] = sizeof(*cmd);
2498 	err = iwx_send_cmd(sc, &hcmd);
2499 	if (err)
2500 		return err;
2501 
2502 	pkt = hcmd.resp_pkt;
2503 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2504 		err = EIO;
2505 		goto out;
2506 	}
2507 
2508 	resp_len = iwx_rx_packet_payload_len(pkt);
2509 	if (resp_len != sizeof(*resp)) {
2510 		err = EIO;
2511 		goto out;
2512 	}
2513 
2514 	resp = (void *)pkt->data;
2515 	if (le32toh(resp->status) == 0)
2516 		sc->sc_time_event_uid = le32toh(resp->unique_id);
2517 	else
2518 		err = EIO;
2519 out:
2520 	iwx_free_resp(sc, &hcmd);
2521 	return err;
2522 }
2523 
2524 void
2525 iwx_protect_session(struct iwx_softc *sc, struct iwx_node *in,
2526     uint32_t duration, uint32_t max_delay)
2527 {
2528 	struct iwx_time_event_cmd time_cmd;
2529 
2530 	/* Do nothing if a time event is already scheduled. */
2531 	if (sc->sc_flags & IWX_FLAG_TE_ACTIVE)
2532 		return;
2533 
2534 	memset(&time_cmd, 0, sizeof(time_cmd));
2535 
2536 	time_cmd.action = htole32(IWX_FW_CTXT_ACTION_ADD);
2537 	time_cmd.id_and_color =
2538 	    htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2539 	time_cmd.id = htole32(IWX_TE_BSS_STA_AGGRESSIVE_ASSOC);
2540 
2541 	time_cmd.apply_time = htole32(0);
2542 
2543 	time_cmd.max_frags = IWX_TE_V2_FRAG_NONE;
2544 	time_cmd.max_delay = htole32(max_delay);
2545 	/* TODO: why do we need to interval = bi if it is not periodic? */
2546 	time_cmd.interval = htole32(1);
2547 	time_cmd.duration = htole32(duration);
2548 	time_cmd.repeat = 1;
2549 	time_cmd.policy
2550 	    = htole16(IWX_TE_V2_NOTIF_HOST_EVENT_START |
2551 	        IWX_TE_V2_NOTIF_HOST_EVENT_END |
2552 		IWX_T2_V2_START_IMMEDIATELY);
2553 
2554 	if (iwx_send_time_event_cmd(sc, &time_cmd) == 0)
2555 		sc->sc_flags |= IWX_FLAG_TE_ACTIVE;
2556 
2557 	DELAY(100);
2558 }
2559 
2560 void
2561 iwx_unprotect_session(struct iwx_softc *sc, struct iwx_node *in)
2562 {
2563 	struct iwx_time_event_cmd time_cmd;
2564 
2565 	/* Do nothing if the time event has already ended. */
2566 	if ((sc->sc_flags & IWX_FLAG_TE_ACTIVE) == 0)
2567 		return;
2568 
2569 	memset(&time_cmd, 0, sizeof(time_cmd));
2570 
2571 	time_cmd.action = htole32(IWX_FW_CTXT_ACTION_REMOVE);
2572 	time_cmd.id_and_color =
2573 	    htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2574 	time_cmd.id = htole32(sc->sc_time_event_uid);
2575 
2576 	if (iwx_send_time_event_cmd(sc, &time_cmd) == 0)
2577 		sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
2578 
2579 	DELAY(100);
2580 }
2581 
2582 /*
2583  * NVM read access and content parsing.  We do not support
2584  * external NVM or writing NVM.
2585  */
2586 
2587 uint8_t
2588 iwx_fw_valid_tx_ant(struct iwx_softc *sc)
2589 {
2590 	uint8_t tx_ant;
2591 
2592 	tx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_TX_CHAIN)
2593 	    >> IWX_FW_PHY_CFG_TX_CHAIN_POS);
2594 
2595 	if (sc->sc_nvm.valid_tx_ant)
2596 		tx_ant &= sc->sc_nvm.valid_tx_ant;
2597 
2598 	return tx_ant;
2599 }
2600 
2601 uint8_t
2602 iwx_fw_valid_rx_ant(struct iwx_softc *sc)
2603 {
2604 	uint8_t rx_ant;
2605 
2606 	rx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RX_CHAIN)
2607 	    >> IWX_FW_PHY_CFG_RX_CHAIN_POS);
2608 
2609 	if (sc->sc_nvm.valid_rx_ant)
2610 		rx_ant &= sc->sc_nvm.valid_rx_ant;
2611 
2612 	return rx_ant;
2613 }
2614 
2615 void
2616 iwx_init_channel_map(struct iwx_softc *sc, uint16_t *channel_profile_v3,
2617     uint32_t *channel_profile_v4, int nchan_profile)
2618 {
2619 	struct ieee80211com *ic = &sc->sc_ic;
2620 	struct iwx_nvm_data *data = &sc->sc_nvm;
2621 	int ch_idx;
2622 	struct ieee80211_channel *channel;
2623 	uint32_t ch_flags;
2624 	int is_5ghz;
2625 	int flags, hw_value;
2626 	int nchan;
2627 	const uint8_t *nvm_channels;
2628 
2629 	if (sc->sc_uhb_supported) {
2630 		nchan = nitems(iwx_nvm_channels_uhb);
2631 		nvm_channels = iwx_nvm_channels_uhb;
2632 	} else {
2633 		nchan = nitems(iwx_nvm_channels_8000);
2634 		nvm_channels = iwx_nvm_channels_8000;
2635 	}
2636 
2637 	for (ch_idx = 0; ch_idx < nchan && ch_idx < nchan_profile; ch_idx++) {
2638 		if (channel_profile_v4)
2639 			ch_flags = le32_to_cpup(channel_profile_v4 + ch_idx);
2640 		else
2641 			ch_flags = le16_to_cpup(channel_profile_v3 + ch_idx);
2642 
2643 		is_5ghz = ch_idx >= IWX_NUM_2GHZ_CHANNELS;
2644 		if (is_5ghz && !data->sku_cap_band_52GHz_enable)
2645 			ch_flags &= ~IWX_NVM_CHANNEL_VALID;
2646 
2647 		hw_value = nvm_channels[ch_idx];
2648 		channel = &ic->ic_channels[hw_value];
2649 
2650 		if (!(ch_flags & IWX_NVM_CHANNEL_VALID)) {
2651 			channel->ic_freq = 0;
2652 			channel->ic_flags = 0;
2653 			continue;
2654 		}
2655 
2656 		if (!is_5ghz) {
2657 			flags = IEEE80211_CHAN_2GHZ;
2658 			channel->ic_flags
2659 			    = IEEE80211_CHAN_CCK
2660 			    | IEEE80211_CHAN_OFDM
2661 			    | IEEE80211_CHAN_DYN
2662 			    | IEEE80211_CHAN_2GHZ;
2663 		} else {
2664 			flags = IEEE80211_CHAN_5GHZ;
2665 			channel->ic_flags =
2666 			    IEEE80211_CHAN_A;
2667 		}
2668 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2669 
2670 		if (!(ch_flags & IWX_NVM_CHANNEL_ACTIVE))
2671 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2672 
2673 		if (data->sku_cap_11n_enable)
2674 			channel->ic_flags |= IEEE80211_CHAN_HT;
2675 	}
2676 }
2677 
2678 int
2679 iwx_mimo_enabled(struct iwx_softc *sc)
2680 {
2681 	struct ieee80211com *ic = &sc->sc_ic;
2682 
2683 	return !sc->sc_nvm.sku_cap_mimo_disable &&
2684 	    (ic->ic_userflags & IEEE80211_F_NOMIMO) == 0;
2685 }
2686 
2687 void
2688 iwx_setup_ht_rates(struct iwx_softc *sc)
2689 {
2690 	struct ieee80211com *ic = &sc->sc_ic;
2691 	uint8_t rx_ant;
2692 
2693 	/* TX is supported with the same MCS as RX. */
2694 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2695 
2696 	memset(ic->ic_sup_mcs, 0, sizeof(ic->ic_sup_mcs));
2697 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
2698 
2699 	if (!iwx_mimo_enabled(sc))
2700 		return;
2701 
2702 	rx_ant = iwx_fw_valid_rx_ant(sc);
2703 	if ((rx_ant & IWX_ANT_AB) == IWX_ANT_AB ||
2704 	    (rx_ant & IWX_ANT_BC) == IWX_ANT_BC)
2705 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
2706 }
2707 
2708 void
2709 iwx_init_reorder_buffer(struct iwx_reorder_buffer *reorder_buf,
2710     uint16_t ssn, uint16_t buf_size)
2711 {
2712 	reorder_buf->head_sn = ssn;
2713 	reorder_buf->num_stored = 0;
2714 	reorder_buf->buf_size = buf_size;
2715 	reorder_buf->last_amsdu = 0;
2716 	reorder_buf->last_sub_index = 0;
2717 	reorder_buf->removed = 0;
2718 	reorder_buf->valid = 0;
2719 	reorder_buf->consec_oldsn_drops = 0;
2720 	reorder_buf->consec_oldsn_ampdu_gp2 = 0;
2721 	reorder_buf->consec_oldsn_prev_drop = 0;
2722 }
2723 
2724 void
2725 iwx_clear_reorder_buffer(struct iwx_softc *sc, struct iwx_rxba_data *rxba)
2726 {
2727 	int i;
2728 	struct iwx_reorder_buffer *reorder_buf = &rxba->reorder_buf;
2729 	struct iwx_reorder_buf_entry *entry;
2730 
2731 	for (i = 0; i < reorder_buf->buf_size; i++) {
2732 		entry = &rxba->entries[i];
2733 		ml_purge(&entry->frames);
2734 		timerclear(&entry->reorder_time);
2735 	}
2736 
2737 	reorder_buf->removed = 1;
2738 	timeout_del(&reorder_buf->reorder_timer);
2739 	timerclear(&rxba->last_rx);
2740 	timeout_del(&rxba->session_timer);
2741 	rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
2742 }
2743 
2744 #define RX_REORDER_BUF_TIMEOUT_MQ_USEC (100000ULL)
2745 
2746 void
2747 iwx_rx_ba_session_expired(void *arg)
2748 {
2749 	struct iwx_rxba_data *rxba = arg;
2750 	struct iwx_softc *sc = rxba->sc;
2751 	struct ieee80211com *ic = &sc->sc_ic;
2752 	struct ieee80211_node *ni = ic->ic_bss;
2753 	struct timeval now, timeout, expiry;
2754 	int s;
2755 
2756 	s = splnet();
2757 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0 &&
2758 	    ic->ic_state == IEEE80211_S_RUN &&
2759 	    rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
2760 		getmicrouptime(&now);
2761 		USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
2762 		timeradd(&rxba->last_rx, &timeout, &expiry);
2763 		if (timercmp(&now, &expiry, <)) {
2764 			timeout_add_usec(&rxba->session_timer, rxba->timeout);
2765 		} else {
2766 			ic->ic_stats.is_ht_rx_ba_timeout++;
2767 			ieee80211_delba_request(ic, ni,
2768 			    IEEE80211_REASON_TIMEOUT, 0, rxba->tid);
2769 		}
2770 	}
2771 	splx(s);
2772 }
2773 
2774 void
2775 iwx_reorder_timer_expired(void *arg)
2776 {
2777 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2778 	struct iwx_reorder_buffer *buf = arg;
2779 	struct iwx_rxba_data *rxba = iwx_rxba_data_from_reorder_buf(buf);
2780 	struct iwx_reorder_buf_entry *entries = &rxba->entries[0];
2781 	struct iwx_softc *sc = rxba->sc;
2782 	struct ieee80211com *ic = &sc->sc_ic;
2783 	struct ieee80211_node *ni = ic->ic_bss;
2784 	int i, s;
2785 	uint16_t sn = 0, index = 0;
2786 	int expired = 0;
2787 	int cont = 0;
2788 	struct timeval now, timeout, expiry;
2789 
2790 	if (!buf->num_stored || buf->removed)
2791 		return;
2792 
2793 	s = splnet();
2794 	getmicrouptime(&now);
2795 	USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
2796 
2797 	for (i = 0; i < buf->buf_size ; i++) {
2798 		index = (buf->head_sn + i) % buf->buf_size;
2799 
2800 		if (ml_empty(&entries[index].frames)) {
2801 			/*
2802 			 * If there is a hole and the next frame didn't expire
2803 			 * we want to break and not advance SN.
2804 			 */
2805 			cont = 0;
2806 			continue;
2807 		}
2808 		timeradd(&entries[index].reorder_time, &timeout, &expiry);
2809 		if (!cont && timercmp(&now, &expiry, <))
2810 			break;
2811 
2812 		expired = 1;
2813 		/* continue until next hole after this expired frame */
2814 		cont = 1;
2815 		sn = (buf->head_sn + (i + 1)) & 0xfff;
2816 	}
2817 
2818 	if (expired) {
2819 		/* SN is set to the last expired frame + 1 */
2820 		iwx_release_frames(sc, ni, rxba, buf, sn, &ml);
2821 		if_input(&sc->sc_ic.ic_if, &ml);
2822 		ic->ic_stats.is_ht_rx_ba_window_gap_timeout++;
2823 	} else {
2824 		/*
2825 		 * If no frame expired and there are stored frames, index is now
2826 		 * pointing to the first unexpired frame - modify reorder timeout
2827 		 * accordingly.
2828 		 */
2829 		timeout_add_usec(&buf->reorder_timer,
2830 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
2831 	}
2832 
2833 	splx(s);
2834 }
2835 
2836 #define IWX_MAX_RX_BA_SESSIONS 16
2837 
2838 void
2839 iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid,
2840     uint16_t ssn, uint16_t winsize, int timeout_val, int start)
2841 {
2842 	struct ieee80211com *ic = &sc->sc_ic;
2843 	struct iwx_add_sta_cmd cmd;
2844 	struct iwx_node *in = (void *)ni;
2845 	int err, s;
2846 	uint32_t status;
2847 	struct iwx_rxba_data *rxba = NULL;
2848 	uint8_t baid = 0;
2849 
2850 	s = splnet();
2851 
2852 	if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS) {
2853 		ieee80211_addba_req_refuse(ic, ni, tid);
2854 		splx(s);
2855 		return;
2856 	}
2857 
2858 	memset(&cmd, 0, sizeof(cmd));
2859 
2860 	cmd.sta_id = IWX_STATION_ID;
2861 	cmd.mac_id_n_color
2862 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2863 	cmd.add_modify = IWX_STA_MODE_MODIFY;
2864 
2865 	if (start) {
2866 		cmd.add_immediate_ba_tid = (uint8_t)tid;
2867 		cmd.add_immediate_ba_ssn = htole16(ssn);
2868 		cmd.rx_ba_window = htole16(winsize);
2869 	} else {
2870 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
2871 	}
2872 	cmd.modify_mask = start ? IWX_STA_MODIFY_ADD_BA_TID :
2873 	    IWX_STA_MODIFY_REMOVE_BA_TID;
2874 
2875 	status = IWX_ADD_STA_SUCCESS;
2876 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(cmd), &cmd,
2877 	    &status);
2878 
2879 	if (err || (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS) {
2880 		if (start)
2881 			ieee80211_addba_req_refuse(ic, ni, tid);
2882 		splx(s);
2883 		return;
2884 	}
2885 
2886 	/* Deaggregation is done in hardware. */
2887 	if (start) {
2888 		if (!(status & IWX_ADD_STA_BAID_VALID_MASK)) {
2889 			ieee80211_addba_req_refuse(ic, ni, tid);
2890 			splx(s);
2891 			return;
2892 		}
2893 		baid = (status & IWX_ADD_STA_BAID_MASK) >>
2894 		    IWX_ADD_STA_BAID_SHIFT;
2895 		if (baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
2896 		    baid >= nitems(sc->sc_rxba_data)) {
2897 			ieee80211_addba_req_refuse(ic, ni, tid);
2898 			splx(s);
2899 			return;
2900 		}
2901 		rxba = &sc->sc_rxba_data[baid];
2902 		if (rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
2903 			ieee80211_addba_req_refuse(ic, ni, tid);
2904 			splx(s);
2905 			return;
2906 		}
2907 		rxba->sta_id = IWX_STATION_ID;
2908 		rxba->tid = tid;
2909 		rxba->baid = baid;
2910 		rxba->timeout = timeout_val;
2911 		getmicrouptime(&rxba->last_rx);
2912 		iwx_init_reorder_buffer(&rxba->reorder_buf, ssn,
2913 		    winsize);
2914 		if (timeout_val != 0) {
2915 			struct ieee80211_rx_ba *ba;
2916 			timeout_add_usec(&rxba->session_timer,
2917 			    timeout_val);
2918 			/* XXX disable net80211's BA timeout handler */
2919 			ba = &ni->ni_rx_ba[tid];
2920 			ba->ba_timeout_val = 0;
2921 		}
2922 	} else {
2923 		int i;
2924 		for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
2925 			rxba = &sc->sc_rxba_data[i];
2926 			if (rxba->baid ==
2927 			    IWX_RX_REORDER_DATA_INVALID_BAID)
2928 				continue;
2929 			if (rxba->tid != tid)
2930 				continue;
2931 			iwx_clear_reorder_buffer(sc, rxba);
2932 			break;
2933 		}
2934 	}
2935 
2936 	if (start) {
2937 		sc->sc_rx_ba_sessions++;
2938 		ieee80211_addba_req_accept(ic, ni, tid);
2939 	} else if (sc->sc_rx_ba_sessions > 0)
2940 		sc->sc_rx_ba_sessions--;
2941 
2942 	splx(s);
2943 }
2944 
2945 void
2946 iwx_mac_ctxt_task(void *arg)
2947 {
2948 	struct iwx_softc *sc = arg;
2949 	struct ieee80211com *ic = &sc->sc_ic;
2950 	struct iwx_node *in = (void *)ic->ic_bss;
2951 	int err, s = splnet();
2952 
2953 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
2954 		refcnt_rele_wake(&sc->task_refs);
2955 		splx(s);
2956 		return;
2957 	}
2958 
2959 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
2960 	if (err)
2961 		printf("%s: failed to update MAC\n", DEVNAME(sc));
2962 
2963 	refcnt_rele_wake(&sc->task_refs);
2964 	splx(s);
2965 }
2966 
2967 void
2968 iwx_updateprot(struct ieee80211com *ic)
2969 {
2970 	struct iwx_softc *sc = ic->ic_softc;
2971 
2972 	if (ic->ic_state == IEEE80211_S_RUN)
2973 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
2974 }
2975 
2976 void
2977 iwx_updateslot(struct ieee80211com *ic)
2978 {
2979 	struct iwx_softc *sc = ic->ic_softc;
2980 
2981 	if (ic->ic_state == IEEE80211_S_RUN)
2982 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
2983 }
2984 
2985 void
2986 iwx_updateedca(struct ieee80211com *ic)
2987 {
2988 	struct iwx_softc *sc = ic->ic_softc;
2989 
2990 	if (ic->ic_state == IEEE80211_S_RUN)
2991 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
2992 }
2993 
2994 void
2995 iwx_ba_task(void *arg)
2996 {
2997 	struct iwx_softc *sc = arg;
2998 	struct ieee80211com *ic = &sc->sc_ic;
2999 	struct ieee80211_node *ni = ic->ic_bss;
3000 	int s = splnet();
3001 	int tid;
3002 
3003 	for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3004 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3005 			break;
3006 		if (sc->ba_start_tidmask & (1 << tid)) {
3007 			iwx_sta_rx_agg(sc, ni, tid, sc->ba_ssn[tid],
3008 			    sc->ba_winsize[tid], sc->ba_timeout_val[tid], 1);
3009 			sc->ba_start_tidmask &= ~(1 << tid);
3010 		} else if (sc->ba_stop_tidmask & (1 << tid)) {
3011 			iwx_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3012 			sc->ba_stop_tidmask &= ~(1 << tid);
3013 		}
3014 	}
3015 
3016 	refcnt_rele_wake(&sc->task_refs);
3017 	splx(s);
3018 }
3019 
3020 /*
3021  * This function is called by upper layer when an ADDBA request is received
3022  * from another STA and before the ADDBA response is sent.
3023  */
3024 int
3025 iwx_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3026     uint8_t tid)
3027 {
3028 	struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
3029 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
3030 
3031 	if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS ||
3032 	    tid > IWX_MAX_TID_COUNT || (sc->ba_start_tidmask & (1 << tid)))
3033 		return ENOSPC;
3034 
3035 	sc->ba_start_tidmask |= (1 << tid);
3036 	sc->ba_ssn[tid] = ba->ba_winstart;
3037 	sc->ba_winsize[tid] = ba->ba_winsize;
3038 	sc->ba_timeout_val[tid] = ba->ba_timeout_val;
3039 	iwx_add_task(sc, systq, &sc->ba_task);
3040 
3041 	return EBUSY;
3042 }
3043 
3044 /*
3045  * This function is called by upper layer on teardown of an HT-immediate
3046  * Block Ack agreement (eg. upon receipt of a DELBA frame).
3047  */
3048 void
3049 iwx_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3050     uint8_t tid)
3051 {
3052 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
3053 
3054 	if (tid > IWX_MAX_TID_COUNT || sc->ba_stop_tidmask & (1 << tid))
3055 		return;
3056 
3057 	sc->ba_stop_tidmask = (1 << tid);
3058 	iwx_add_task(sc, systq, &sc->ba_task);
3059 }
3060 
3061 /* Read the mac address from WFMP registers. */
3062 int
3063 iwx_set_mac_addr_from_csr(struct iwx_softc *sc, struct iwx_nvm_data *data)
3064 {
3065 	const uint8_t *hw_addr;
3066 	uint32_t mac_addr0, mac_addr1;
3067 
3068 	if (!iwx_nic_lock(sc))
3069 		return EBUSY;
3070 
3071 	mac_addr0 = htole32(iwx_read_prph(sc, IWX_WFMP_MAC_ADDR_0));
3072 	mac_addr1 = htole32(iwx_read_prph(sc, IWX_WFMP_MAC_ADDR_1));
3073 
3074 	hw_addr = (const uint8_t *)&mac_addr0;
3075 	data->hw_addr[0] = hw_addr[3];
3076 	data->hw_addr[1] = hw_addr[2];
3077 	data->hw_addr[2] = hw_addr[1];
3078 	data->hw_addr[3] = hw_addr[0];
3079 
3080 	hw_addr = (const uint8_t *)&mac_addr1;
3081 	data->hw_addr[4] = hw_addr[1];
3082 	data->hw_addr[5] = hw_addr[0];
3083 
3084 	iwx_nic_unlock(sc);
3085 	return 0;
3086 }
3087 
3088 int
3089 iwx_is_valid_mac_addr(const uint8_t *addr)
3090 {
3091 	static const uint8_t reserved_mac[] = {
3092 		0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3093 	};
3094 
3095 	return (memcmp(reserved_mac, addr, ETHER_ADDR_LEN) != 0 &&
3096 	    memcmp(etherbroadcastaddr, addr, sizeof(etherbroadcastaddr)) != 0 &&
3097 	    memcmp(etheranyaddr, addr, sizeof(etheranyaddr)) != 0 &&
3098 	    !ETHER_IS_MULTICAST(addr));
3099 }
3100 
3101 int
3102 iwx_nvm_get(struct iwx_softc *sc)
3103 {
3104 	struct iwx_nvm_get_info cmd = {};
3105 	struct iwx_nvm_data *nvm = &sc->sc_nvm;
3106 	struct iwx_host_cmd hcmd = {
3107 		.flags = IWX_CMD_WANT_RESP | IWX_CMD_SEND_IN_RFKILL,
3108 		.data = { &cmd, },
3109 		.len = { sizeof(cmd) },
3110 		.id = IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
3111 		    IWX_NVM_GET_INFO)
3112 	};
3113 	int err;
3114 	uint32_t mac_flags;
3115 	/*
3116 	 * All the values in iwx_nvm_get_info_rsp v4 are the same as
3117 	 * in v3, except for the channel profile part of the
3118 	 * regulatory.  So we can just access the new struct, with the
3119 	 * exception of the latter.
3120 	 */
3121 	struct iwx_nvm_get_info_rsp *rsp;
3122 	struct iwx_nvm_get_info_rsp_v3 *rsp_v3;
3123 	int v4 = isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REGULATORY_NVM_INFO);
3124 	size_t resp_len = v4 ? sizeof(*rsp) : sizeof(*rsp_v3);
3125 
3126 	hcmd.resp_pkt_len = sizeof(struct iwx_rx_packet) + resp_len;
3127 	err = iwx_send_cmd(sc, &hcmd);
3128 	if (err)
3129 		return err;
3130 
3131 	if (iwx_rx_packet_payload_len(hcmd.resp_pkt) != resp_len) {
3132 		err = EIO;
3133 		goto out;
3134 	}
3135 
3136 	memset(nvm, 0, sizeof(*nvm));
3137 
3138 	iwx_set_mac_addr_from_csr(sc, nvm);
3139 	if (!iwx_is_valid_mac_addr(nvm->hw_addr)) {
3140 		printf("%s: no valid mac address was found\n", DEVNAME(sc));
3141 		err = EINVAL;
3142 		goto out;
3143 	}
3144 
3145 	rsp = (void *)hcmd.resp_pkt->data;
3146 
3147 	/* Initialize general data */
3148 	nvm->nvm_version = le16toh(rsp->general.nvm_version);
3149 	nvm->n_hw_addrs = rsp->general.n_hw_addrs;
3150 
3151 	/* Initialize MAC sku data */
3152 	mac_flags = le32toh(rsp->mac_sku.mac_sku_flags);
3153 	nvm->sku_cap_11ac_enable =
3154 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
3155 	nvm->sku_cap_11n_enable =
3156 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
3157 	nvm->sku_cap_11ax_enable =
3158 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
3159 	nvm->sku_cap_band_24GHz_enable =
3160 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
3161 	nvm->sku_cap_band_52GHz_enable =
3162 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
3163 	nvm->sku_cap_mimo_disable =
3164 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
3165 
3166 	/* Initialize PHY sku data */
3167 	nvm->valid_tx_ant = (uint8_t)le32toh(rsp->phy_sku.tx_chains);
3168 	nvm->valid_rx_ant = (uint8_t)le32toh(rsp->phy_sku.rx_chains);
3169 
3170 	if (le32toh(rsp->regulatory.lar_enabled) &&
3171 	    isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_SUPPORT)) {
3172 		nvm->lar_enabled = 1;
3173 	}
3174 
3175 	if (v4) {
3176 		iwx_init_channel_map(sc, NULL,
3177 		    rsp->regulatory.channel_profile, IWX_NUM_CHANNELS);
3178 	} else {
3179 		rsp_v3 = (void *)rsp;
3180 		iwx_init_channel_map(sc, rsp_v3->regulatory.channel_profile,
3181 		    NULL, IWX_NUM_CHANNELS_V1);
3182 	}
3183 out:
3184 	iwx_free_resp(sc, &hcmd);
3185 	return err;
3186 }
3187 
3188 int
3189 iwx_load_firmware(struct iwx_softc *sc)
3190 {
3191 	struct iwx_fw_sects *fws;
3192 	int err, w;
3193 
3194 	sc->sc_uc.uc_intr = 0;
3195 
3196 	fws = &sc->sc_fw.fw_sects[IWX_UCODE_TYPE_REGULAR];
3197 	err = iwx_ctxt_info_init(sc, fws);
3198 	if (err) {
3199 		printf("%s: could not init context info\n", DEVNAME(sc));
3200 		return err;
3201 	}
3202 
3203 	/* wait for the firmware to load */
3204 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
3205 		err = tsleep_nsec(&sc->sc_uc, 0, "iwxuc", MSEC_TO_NSEC(100));
3206 	}
3207 	if (err || !sc->sc_uc.uc_ok)
3208 		printf("%s: could not load firmware\n", DEVNAME(sc));
3209 
3210 	iwx_ctxt_info_free_fw_img(sc);
3211 
3212 	if (!sc->sc_uc.uc_ok)
3213 		return EINVAL;
3214 
3215 	return err;
3216 }
3217 
3218 int
3219 iwx_start_fw(struct iwx_softc *sc)
3220 {
3221 	int err;
3222 
3223 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
3224 
3225 	iwx_disable_interrupts(sc);
3226 
3227 	/* make sure rfkill handshake bits are cleared */
3228 	IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, IWX_CSR_UCODE_SW_BIT_RFKILL);
3229 	IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR,
3230 	    IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3231 
3232 	/* clear (again), then enable firwmare load interrupt */
3233 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
3234 
3235 	err = iwx_nic_init(sc);
3236 	if (err) {
3237 		printf("%s: unable to init nic\n", DEVNAME(sc));
3238 		return err;
3239 	}
3240 
3241 	iwx_enable_fwload_interrupt(sc);
3242 
3243 	return iwx_load_firmware(sc);
3244 }
3245 
3246 int
3247 iwx_send_tx_ant_cfg(struct iwx_softc *sc, uint8_t valid_tx_ant)
3248 {
3249 	struct iwx_tx_ant_cfg_cmd tx_ant_cmd = {
3250 		.valid = htole32(valid_tx_ant),
3251 	};
3252 
3253 	return iwx_send_cmd_pdu(sc, IWX_TX_ANT_CONFIGURATION_CMD,
3254 	    0, sizeof(tx_ant_cmd), &tx_ant_cmd);
3255 }
3256 
3257 int
3258 iwx_send_phy_cfg_cmd(struct iwx_softc *sc)
3259 {
3260 	struct iwx_phy_cfg_cmd phy_cfg_cmd;
3261 
3262 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3263 	phy_cfg_cmd.calib_control.event_trigger =
3264 	    sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].event_trigger;
3265 	phy_cfg_cmd.calib_control.flow_trigger =
3266 	    sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].flow_trigger;
3267 
3268 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONFIGURATION_CMD, 0,
3269 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3270 }
3271 
3272 int
3273 iwx_send_dqa_cmd(struct iwx_softc *sc)
3274 {
3275 	struct iwx_dqa_enable_cmd dqa_cmd = {
3276 		.cmd_queue = htole32(IWX_DQA_CMD_QUEUE),
3277 	};
3278 	uint32_t cmd_id;
3279 
3280 	cmd_id = iwx_cmd_id(IWX_DQA_ENABLE_CMD, IWX_DATA_PATH_GROUP, 0);
3281 	return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
3282 }
3283 
3284 int
3285 iwx_load_ucode_wait_alive(struct iwx_softc *sc)
3286 {
3287 	int err;
3288 
3289 	err = iwx_read_firmware(sc);
3290 	if (err)
3291 		return err;
3292 
3293 	err = iwx_start_fw(sc);
3294 	if (err)
3295 		return err;
3296 
3297 	iwx_post_alive(sc);
3298 
3299 	return 0;
3300 }
3301 
3302 int
3303 iwx_run_init_mvm_ucode(struct iwx_softc *sc, int readnvm)
3304 {
3305 	const int wait_flags = IWX_INIT_COMPLETE;
3306 	struct iwx_nvm_access_complete_cmd nvm_complete = {};
3307 	struct iwx_init_extended_cfg_cmd init_cfg = {
3308 		.init_flags = htole32(IWX_INIT_NVM),
3309 	};
3310 	int err;
3311 
3312 	if ((sc->sc_flags & IWX_FLAG_RFKILL) && !readnvm) {
3313 		printf("%s: radio is disabled by hardware switch\n",
3314 		    DEVNAME(sc));
3315 		return EPERM;
3316 	}
3317 
3318 	sc->sc_init_complete = 0;
3319 	err = iwx_load_ucode_wait_alive(sc);
3320 	if (err) {
3321 		printf("%s: failed to load init firmware\n", DEVNAME(sc));
3322 		return err;
3323 	}
3324 
3325 	/*
3326 	 * Send init config command to mark that we are sending NVM
3327 	 * access commands
3328 	 */
3329 	err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_SYSTEM_GROUP,
3330 	    IWX_INIT_EXTENDED_CFG_CMD), 0, sizeof(init_cfg), &init_cfg);
3331 	if (err)
3332 		return err;
3333 
3334 	err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
3335 	    IWX_NVM_ACCESS_COMPLETE), 0, sizeof(nvm_complete), &nvm_complete);
3336 	if (err)
3337 		return err;
3338 
3339 	/* Wait for the init complete notification from the firmware. */
3340 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
3341 		err = tsleep_nsec(&sc->sc_init_complete, 0, "iwxinit",
3342 		    SEC_TO_NSEC(2));
3343 		if (err)
3344 			return err;
3345 	}
3346 
3347 	if (readnvm) {
3348 		err = iwx_nvm_get(sc);
3349 		if (err) {
3350 			printf("%s: failed to read nvm\n", DEVNAME(sc));
3351 			return err;
3352 		}
3353 		if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
3354 			IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
3355 			    sc->sc_nvm.hw_addr);
3356 
3357 	}
3358 	return 0;
3359 }
3360 
3361 int
3362 iwx_config_ltr(struct iwx_softc *sc)
3363 {
3364 	struct iwx_ltr_config_cmd cmd = {
3365 		.flags = htole32(IWX_LTR_CFG_FLAG_FEATURE_ENABLE),
3366 	};
3367 
3368 	if (!sc->sc_ltr_enabled)
3369 		return 0;
3370 
3371 	return iwx_send_cmd_pdu(sc, IWX_LTR_CONFIG, 0, sizeof(cmd), &cmd);
3372 }
3373 
3374 void
3375 iwx_update_rx_desc(struct iwx_softc *sc, struct iwx_rx_ring *ring, int idx)
3376 {
3377 	struct iwx_rx_data *data = &ring->data[idx];
3378 
3379 	((uint64_t *)ring->desc)[idx] =
3380 	    htole64(data->map->dm_segs[0].ds_addr | (idx & 0x0fff));
3381 	bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
3382 	    idx * sizeof(uint64_t), sizeof(uint64_t),
3383 	    BUS_DMASYNC_PREWRITE);
3384 }
3385 
3386 int
3387 iwx_rx_addbuf(struct iwx_softc *sc, int size, int idx)
3388 {
3389 	struct iwx_rx_ring *ring = &sc->rxq;
3390 	struct iwx_rx_data *data = &ring->data[idx];
3391 	struct mbuf *m;
3392 	int err;
3393 	int fatal = 0;
3394 
3395 	m = m_gethdr(M_DONTWAIT, MT_DATA);
3396 	if (m == NULL)
3397 		return ENOBUFS;
3398 
3399 	if (size <= MCLBYTES) {
3400 		MCLGET(m, M_DONTWAIT);
3401 	} else {
3402 		MCLGETL(m, M_DONTWAIT, IWX_RBUF_SIZE);
3403 	}
3404 	if ((m->m_flags & M_EXT) == 0) {
3405 		m_freem(m);
3406 		return ENOBUFS;
3407 	}
3408 
3409 	if (data->m != NULL) {
3410 		bus_dmamap_unload(sc->sc_dmat, data->map);
3411 		fatal = 1;
3412 	}
3413 
3414 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3415 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3416 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
3417 	if (err) {
3418 		/* XXX */
3419 		if (fatal)
3420 			panic("%s: could not load RX mbuf", DEVNAME(sc));
3421 		m_freem(m);
3422 		return err;
3423 	}
3424 	data->m = m;
3425 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3426 
3427 	/* Update RX descriptor. */
3428 	iwx_update_rx_desc(sc, ring, idx);
3429 
3430 	return 0;
3431 }
3432 
3433 int
3434 iwx_rxmq_get_signal_strength(struct iwx_softc *sc,
3435     struct iwx_rx_mpdu_desc *desc)
3436 {
3437 	int energy_a, energy_b;
3438 
3439 	energy_a = desc->v1.energy_a;
3440 	energy_b = desc->v1.energy_b;
3441 	energy_a = energy_a ? -energy_a : -256;
3442 	energy_b = energy_b ? -energy_b : -256;
3443 	return MAX(energy_a, energy_b);
3444 }
3445 
3446 void
3447 iwx_rx_rx_phy_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
3448     struct iwx_rx_data *data)
3449 {
3450 	struct iwx_rx_phy_info *phy_info = (void *)pkt->data;
3451 
3452 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3453 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3454 
3455 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3456 }
3457 
3458 /*
3459  * Retrieve the average noise (in dBm) among receivers.
3460  */
3461 int
3462 iwx_get_noise(const struct iwx_statistics_rx_non_phy *stats)
3463 {
3464 	int i, total, nbant, noise;
3465 
3466 	total = nbant = noise = 0;
3467 	for (i = 0; i < 3; i++) {
3468 		noise = letoh32(stats->beacon_silence_rssi[i]) & 0xff;
3469 		if (noise) {
3470 			total += noise;
3471 			nbant++;
3472 		}
3473 	}
3474 
3475 	/* There should be at least one antenna but check anyway. */
3476 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3477 }
3478 
3479 int
3480 iwx_ccmp_decap(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
3481     struct ieee80211_rxinfo *rxi)
3482 {
3483 	struct ieee80211com *ic = &sc->sc_ic;
3484 	struct ieee80211_key *k;
3485 	struct ieee80211_frame *wh;
3486 	uint64_t pn, *prsc;
3487 	uint8_t *ivp;
3488 	uint8_t tid;
3489 	int hdrlen, hasqos;
3490 
3491 	wh = mtod(m, struct ieee80211_frame *);
3492 	hdrlen = ieee80211_get_hdrlen(wh);
3493 	ivp = (uint8_t *)wh + hdrlen;
3494 
3495 	/* find key for decryption */
3496 	k = ieee80211_get_rxkey(ic, m, ni);
3497 	if (k == NULL || k->k_cipher != IEEE80211_CIPHER_CCMP)
3498 		return 1;
3499 
3500 	/* Check that ExtIV bit is be set. */
3501 	if (!(ivp[3] & IEEE80211_WEP_EXTIV))
3502 		return 1;
3503 
3504 	hasqos = ieee80211_has_qos(wh);
3505 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
3506 	prsc = &k->k_rsc[tid];
3507 
3508 	/* Extract the 48-bit PN from the CCMP header. */
3509 	pn = (uint64_t)ivp[0]       |
3510 	     (uint64_t)ivp[1] <<  8 |
3511 	     (uint64_t)ivp[4] << 16 |
3512 	     (uint64_t)ivp[5] << 24 |
3513 	     (uint64_t)ivp[6] << 32 |
3514 	     (uint64_t)ivp[7] << 40;
3515 	if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
3516 		if (pn < *prsc) {
3517 			ic->ic_stats.is_ccmp_replays++;
3518 			return 1;
3519 		}
3520 	} else if (pn <= *prsc) {
3521 		ic->ic_stats.is_ccmp_replays++;
3522 		return 1;
3523 	}
3524 	/* Last seen packet number is updated in ieee80211_inputm(). */
3525 
3526 	/*
3527 	 * Some firmware versions strip the MIC, and some don't. It is not
3528 	 * clear which of the capability flags could tell us what to expect.
3529 	 * For now, keep things simple and just leave the MIC in place if
3530 	 * it is present.
3531 	 *
3532 	 * The IV will be stripped by ieee80211_inputm().
3533 	 */
3534 	return 0;
3535 }
3536 
3537 int
3538 iwx_rx_hwdecrypt(struct iwx_softc *sc, struct mbuf *m, uint32_t rx_pkt_status,
3539     struct ieee80211_rxinfo *rxi)
3540 {
3541 	struct ieee80211com *ic = &sc->sc_ic;
3542 	struct ifnet *ifp = IC2IFP(ic);
3543 	struct ieee80211_frame *wh;
3544 	struct ieee80211_node *ni;
3545 	int ret = 0;
3546 	uint8_t type, subtype;
3547 
3548 	wh = mtod(m, struct ieee80211_frame *);
3549 
3550 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3551 	if (type == IEEE80211_FC0_TYPE_CTL)
3552 		return 0;
3553 
3554 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3555 	if (ieee80211_has_qos(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA))
3556 		return 0;
3557 
3558 	ni = ieee80211_find_rxnode(ic, wh);
3559 	/* Handle hardware decryption. */
3560 	if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL)
3561 	    && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) &&
3562 	    (ni->ni_flags & IEEE80211_NODE_RXPROT) &&
3563 	    ((!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
3564 	    ni->ni_rsncipher == IEEE80211_CIPHER_CCMP) ||
3565 	    (IEEE80211_IS_MULTICAST(wh->i_addr1) &&
3566 	    ni->ni_rsngroupcipher == IEEE80211_CIPHER_CCMP))) {
3567 		if ((rx_pkt_status & IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
3568 		    IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
3569 			ic->ic_stats.is_ccmp_dec_errs++;
3570 			ret = 1;
3571 			goto out;
3572 		}
3573 		/* Check whether decryption was successful or not. */
3574 		if ((rx_pkt_status &
3575 		    (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
3576 		    IWX_RX_MPDU_RES_STATUS_MIC_OK)) !=
3577 		    (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
3578 		    IWX_RX_MPDU_RES_STATUS_MIC_OK)) {
3579 			ic->ic_stats.is_ccmp_dec_errs++;
3580 			ret = 1;
3581 			goto out;
3582 		}
3583 		rxi->rxi_flags |= IEEE80211_RXI_HWDEC;
3584 	}
3585 out:
3586 	if (ret)
3587 		ifp->if_ierrors++;
3588 	ieee80211_release_node(ic, ni);
3589 	return ret;
3590 }
3591 
3592 void
3593 iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx,
3594     uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
3595     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
3596     struct mbuf_list *ml)
3597 {
3598 	struct ieee80211com *ic = &sc->sc_ic;
3599 	struct ifnet *ifp = IC2IFP(ic);
3600 	struct ieee80211_frame *wh;
3601 	struct ieee80211_node *ni;
3602 	struct ieee80211_channel *bss_chan;
3603 	uint8_t saved_bssid[IEEE80211_ADDR_LEN] = { 0 };
3604 
3605 	if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
3606 		chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
3607 
3608 	wh = mtod(m, struct ieee80211_frame *);
3609 	ni = ieee80211_find_rxnode(ic, wh);
3610 	if (ni == ic->ic_bss) {
3611 		/*
3612 		 * We may switch ic_bss's channel during scans.
3613 		 * Record the current channel so we can restore it later.
3614 		 */
3615 		bss_chan = ni->ni_chan;
3616 		IEEE80211_ADDR_COPY(&saved_bssid, ni->ni_macaddr);
3617 	}
3618 	ni->ni_chan = &ic->ic_channels[chanidx];
3619 
3620 	if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
3621 	    iwx_ccmp_decap(sc, m, ni, rxi) != 0) {
3622 		ifp->if_ierrors++;
3623 		m_freem(m);
3624 		ieee80211_release_node(ic, ni);
3625 		return;
3626 	}
3627 
3628 #if NBPFILTER > 0
3629 	if (sc->sc_drvbpf != NULL) {
3630 		struct iwx_rx_radiotap_header *tap = &sc->sc_rxtap;
3631 		uint16_t chan_flags;
3632 
3633 		tap->wr_flags = 0;
3634 		if (is_shortpre)
3635 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3636 		tap->wr_chan_freq =
3637 		    htole16(ic->ic_channels[chanidx].ic_freq);
3638 		chan_flags = ic->ic_channels[chanidx].ic_flags;
3639 		if (ic->ic_curmode != IEEE80211_MODE_11N)
3640 			chan_flags &= ~IEEE80211_CHAN_HT;
3641 		tap->wr_chan_flags = htole16(chan_flags);
3642 		tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
3643 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3644 		tap->wr_tsft = device_timestamp;
3645 		if (rate_n_flags & IWX_RATE_MCS_HT_MSK) {
3646 			uint8_t mcs = (rate_n_flags &
3647 			    (IWX_RATE_HT_MCS_RATE_CODE_MSK |
3648 			    IWX_RATE_HT_MCS_NSS_MSK));
3649 			tap->wr_rate = (0x80 | mcs);
3650 		} else {
3651 			uint8_t rate = (rate_n_flags &
3652 			    IWX_RATE_LEGACY_RATE_MSK);
3653 			switch (rate) {
3654 			/* CCK rates. */
3655 			case  10: tap->wr_rate =   2; break;
3656 			case  20: tap->wr_rate =   4; break;
3657 			case  55: tap->wr_rate =  11; break;
3658 			case 110: tap->wr_rate =  22; break;
3659 			/* OFDM rates. */
3660 			case 0xd: tap->wr_rate =  12; break;
3661 			case 0xf: tap->wr_rate =  18; break;
3662 			case 0x5: tap->wr_rate =  24; break;
3663 			case 0x7: tap->wr_rate =  36; break;
3664 			case 0x9: tap->wr_rate =  48; break;
3665 			case 0xb: tap->wr_rate =  72; break;
3666 			case 0x1: tap->wr_rate =  96; break;
3667 			case 0x3: tap->wr_rate = 108; break;
3668 			/* Unknown rate: should not happen. */
3669 			default:  tap->wr_rate =   0;
3670 			}
3671 		}
3672 
3673 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
3674 		    m, BPF_DIRECTION_IN);
3675 	}
3676 #endif
3677 	ieee80211_inputm(IC2IFP(ic), m, ni, rxi, ml);
3678 	/*
3679 	 * ieee80211_inputm() might have changed our BSS.
3680 	 * Restore ic_bss's channel if we are still in the same BSS.
3681 	 */
3682 	if (ni == ic->ic_bss && IEEE80211_ADDR_EQ(saved_bssid, ni->ni_macaddr))
3683 		ni->ni_chan = bss_chan;
3684 	ieee80211_release_node(ic, ni);
3685 }
3686 
3687 /*
3688  * Drop duplicate 802.11 retransmissions
3689  * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
3690  * and handle pseudo-duplicate frames which result from deaggregation
3691  * of A-MSDU frames in hardware.
3692  */
3693 int
3694 iwx_detect_duplicate(struct iwx_softc *sc, struct mbuf *m,
3695     struct iwx_rx_mpdu_desc *desc, struct ieee80211_rxinfo *rxi)
3696 {
3697 	struct ieee80211com *ic = &sc->sc_ic;
3698 	struct iwx_node *in = (void *)ic->ic_bss;
3699 	struct iwx_rxq_dup_data *dup_data = &in->dup_data;
3700 	uint8_t tid = IWX_MAX_TID_COUNT, subframe_idx;
3701 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
3702 	uint8_t type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3703 	uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3704 	int hasqos = ieee80211_has_qos(wh);
3705 	uint16_t seq;
3706 
3707 	if (type == IEEE80211_FC0_TYPE_CTL ||
3708 	    (hasqos && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) ||
3709 	    IEEE80211_IS_MULTICAST(wh->i_addr1))
3710 		return 0;
3711 
3712 	if (hasqos) {
3713 		tid = (ieee80211_get_qos(wh) & IEEE80211_QOS_TID);
3714 		if (tid > IWX_MAX_TID_COUNT)
3715 			tid = IWX_MAX_TID_COUNT;
3716 	}
3717 
3718 	/* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
3719 	subframe_idx = desc->amsdu_info &
3720 		IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
3721 
3722 	seq = letoh16(*(u_int16_t *)wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
3723 	if ((wh->i_fc[1] & IEEE80211_FC1_RETRY) &&
3724 	    dup_data->last_seq[tid] == seq &&
3725 	    dup_data->last_sub_frame[tid] >= subframe_idx)
3726 		return 1;
3727 
3728 	/*
3729 	 * Allow the same frame sequence number for all A-MSDU subframes
3730 	 * following the first subframe.
3731 	 * Otherwise these subframes would be discarded as replays.
3732 	 */
3733 	if (dup_data->last_seq[tid] == seq &&
3734 	    subframe_idx > dup_data->last_sub_frame[tid] &&
3735 	    (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU)) {
3736 		rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
3737 	}
3738 
3739 	dup_data->last_seq[tid] = seq;
3740 	dup_data->last_sub_frame[tid] = subframe_idx;
3741 
3742 	return 0;
3743 }
3744 
3745 /*
3746  * Returns true if sn2 - buffer_size < sn1 < sn2.
3747  * To be used only in order to compare reorder buffer head with NSSN.
3748  * We fully trust NSSN unless it is behind us due to reorder timeout.
3749  * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
3750  */
3751 int
3752 iwx_is_sn_less(uint16_t sn1, uint16_t sn2, uint16_t buffer_size)
3753 {
3754 	return SEQ_LT(sn1, sn2) && !SEQ_LT(sn1, sn2 - buffer_size);
3755 }
3756 
3757 void
3758 iwx_release_frames(struct iwx_softc *sc, struct ieee80211_node *ni,
3759     struct iwx_rxba_data *rxba, struct iwx_reorder_buffer *reorder_buf,
3760     uint16_t nssn, struct mbuf_list *ml)
3761 {
3762 	struct iwx_reorder_buf_entry *entries = &rxba->entries[0];
3763 	uint16_t ssn = reorder_buf->head_sn;
3764 
3765 	/* ignore nssn smaller than head sn - this can happen due to timeout */
3766 	if (iwx_is_sn_less(nssn, ssn, reorder_buf->buf_size))
3767 		goto set_timer;
3768 
3769 	while (iwx_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
3770 		int index = ssn % reorder_buf->buf_size;
3771 		struct mbuf *m;
3772 		int chanidx, is_shortpre;
3773 		uint32_t rx_pkt_status, rate_n_flags, device_timestamp;
3774 		struct ieee80211_rxinfo *rxi;
3775 
3776 		/* This data is the same for all A-MSDU subframes. */
3777 		chanidx = entries[index].chanidx;
3778 		rx_pkt_status = entries[index].rx_pkt_status;
3779 		is_shortpre = entries[index].is_shortpre;
3780 		rate_n_flags = entries[index].rate_n_flags;
3781 		device_timestamp = entries[index].device_timestamp;
3782 		rxi = &entries[index].rxi;
3783 
3784 		/*
3785 		 * Empty the list. Will have more than one frame for A-MSDU.
3786 		 * Empty list is valid as well since nssn indicates frames were
3787 		 * received.
3788 		 */
3789 		while ((m = ml_dequeue(&entries[index].frames)) != NULL) {
3790 			iwx_rx_frame(sc, m, chanidx, rx_pkt_status, is_shortpre,
3791 			    rate_n_flags, device_timestamp, rxi, ml);
3792 			reorder_buf->num_stored--;
3793 
3794 			/*
3795 			 * Allow the same frame sequence number and CCMP PN for
3796 			 * all A-MSDU subframes following the first subframe.
3797 			 * Otherwise they would be discarded as replays.
3798 			 */
3799 			rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
3800 			rxi->rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
3801 		}
3802 
3803 		ssn = (ssn + 1) & 0xfff;
3804 	}
3805 	reorder_buf->head_sn = nssn;
3806 
3807 set_timer:
3808 	if (reorder_buf->num_stored && !reorder_buf->removed) {
3809 		timeout_add_usec(&reorder_buf->reorder_timer,
3810 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
3811 	} else
3812 		timeout_del(&reorder_buf->reorder_timer);
3813 }
3814 
3815 int
3816 iwx_oldsn_workaround(struct iwx_softc *sc, struct ieee80211_node *ni, int tid,
3817     struct iwx_reorder_buffer *buffer, uint32_t reorder_data, uint32_t gp2)
3818 {
3819 	struct ieee80211com *ic = &sc->sc_ic;
3820 
3821 	if (gp2 != buffer->consec_oldsn_ampdu_gp2) {
3822 		/* we have a new (A-)MPDU ... */
3823 
3824 		/*
3825 		 * reset counter to 0 if we didn't have any oldsn in
3826 		 * the last A-MPDU (as detected by GP2 being identical)
3827 		 */
3828 		if (!buffer->consec_oldsn_prev_drop)
3829 			buffer->consec_oldsn_drops = 0;
3830 
3831 		/* either way, update our tracking state */
3832 		buffer->consec_oldsn_ampdu_gp2 = gp2;
3833 	} else if (buffer->consec_oldsn_prev_drop) {
3834 		/*
3835 		 * tracking state didn't change, and we had an old SN
3836 		 * indication before - do nothing in this case, we
3837 		 * already noted this one down and are waiting for the
3838 		 * next A-MPDU (by GP2)
3839 		 */
3840 		return 0;
3841 	}
3842 
3843 	/* return unless this MPDU has old SN */
3844 	if (!(reorder_data & IWX_RX_MPDU_REORDER_BA_OLD_SN))
3845 		return 0;
3846 
3847 	/* update state */
3848 	buffer->consec_oldsn_prev_drop = 1;
3849 	buffer->consec_oldsn_drops++;
3850 
3851 	/* if limit is reached, send del BA and reset state */
3852 	if (buffer->consec_oldsn_drops == IWX_AMPDU_CONSEC_DROPS_DELBA) {
3853 		ieee80211_delba_request(ic, ni, IEEE80211_REASON_UNSPECIFIED,
3854 		    0, tid);
3855 		buffer->consec_oldsn_prev_drop = 0;
3856 		buffer->consec_oldsn_drops = 0;
3857 		return 1;
3858 	}
3859 
3860 	return 0;
3861 }
3862 
3863 /*
3864  * Handle re-ordering of frames which were de-aggregated in hardware.
3865  * Returns 1 if the MPDU was consumed (buffered or dropped).
3866  * Returns 0 if the MPDU should be passed to upper layer.
3867  */
3868 int
3869 iwx_rx_reorder(struct iwx_softc *sc, struct mbuf *m, int chanidx,
3870     struct iwx_rx_mpdu_desc *desc, int is_shortpre, int rate_n_flags,
3871     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
3872     struct mbuf_list *ml)
3873 {
3874 	struct ieee80211com *ic = &sc->sc_ic;
3875 	struct ieee80211_frame *wh;
3876 	struct ieee80211_node *ni;
3877 	struct iwx_rxba_data *rxba;
3878 	struct iwx_reorder_buffer *buffer;
3879 	uint32_t reorder_data = le32toh(desc->reorder_data);
3880 	int is_amsdu = (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU);
3881 	int last_subframe =
3882 		(desc->amsdu_info & IWX_RX_MPDU_AMSDU_LAST_SUBFRAME);
3883 	uint8_t tid;
3884 	uint8_t subframe_idx = (desc->amsdu_info &
3885 	    IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
3886 	struct iwx_reorder_buf_entry *entries;
3887 	int index;
3888 	uint16_t nssn, sn;
3889 	uint8_t baid, type, subtype;
3890 	int hasqos;
3891 
3892 	wh = mtod(m, struct ieee80211_frame *);
3893 	hasqos = ieee80211_has_qos(wh);
3894 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
3895 
3896 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3897 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3898 	ni = ieee80211_find_rxnode(ic, wh);
3899 
3900 	/*
3901 	 * We are only interested in Block Ack requests and unicast QoS data.
3902 	 */
3903 	if (IEEE80211_IS_MULTICAST(wh->i_addr1))
3904 		return 0;
3905 	if (hasqos) {
3906 		if (subtype & IEEE80211_FC0_SUBTYPE_NODATA)
3907 			return 0;
3908 	} else {
3909 		if (type != IEEE80211_FC0_TYPE_CTL ||
3910 		    subtype != IEEE80211_FC0_SUBTYPE_BAR)
3911 			return 0;
3912 	}
3913 
3914 	baid = (reorder_data & IWX_RX_MPDU_REORDER_BAID_MASK) >>
3915 		IWX_RX_MPDU_REORDER_BAID_SHIFT;
3916 	if (baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
3917 	    baid >= nitems(sc->sc_rxba_data))
3918 		return 0;
3919 
3920 	rxba = &sc->sc_rxba_data[baid];
3921 	if (rxba == NULL || tid != rxba->tid || rxba->sta_id != IWX_STATION_ID)
3922 		return 0;
3923 
3924 	/* Bypass A-MPDU re-ordering in net80211. */
3925 	rxi->rxi_flags |= IEEE80211_RXI_AMPDU_DONE;
3926 
3927 	nssn = reorder_data & IWX_RX_MPDU_REORDER_NSSN_MASK;
3928 	sn = (reorder_data & IWX_RX_MPDU_REORDER_SN_MASK) >>
3929 		IWX_RX_MPDU_REORDER_SN_SHIFT;
3930 
3931 	buffer = &rxba->reorder_buf;
3932 	entries = &rxba->entries[0];
3933 
3934 	if (!buffer->valid) {
3935 		if (reorder_data & IWX_RX_MPDU_REORDER_BA_OLD_SN)
3936 			return 0;
3937 		buffer->valid = 1;
3938 	}
3939 
3940 	if (type == IEEE80211_FC0_TYPE_CTL &&
3941 	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
3942 		iwx_release_frames(sc, ni, rxba, buffer, nssn, ml);
3943 		goto drop;
3944 	}
3945 
3946 	/*
3947 	 * If there was a significant jump in the nssn - adjust.
3948 	 * If the SN is smaller than the NSSN it might need to first go into
3949 	 * the reorder buffer, in which case we just release up to it and the
3950 	 * rest of the function will take care of storing it and releasing up to
3951 	 * the nssn.
3952 	 */
3953 	if (!iwx_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
3954 	    buffer->buf_size) ||
3955 	    !SEQ_LT(sn, buffer->head_sn + buffer->buf_size)) {
3956 		uint16_t min_sn = SEQ_LT(sn, nssn) ? sn : nssn;
3957 		ic->ic_stats.is_ht_rx_frame_above_ba_winend++;
3958 		iwx_release_frames(sc, ni, rxba, buffer, min_sn, ml);
3959 	}
3960 
3961 	if (iwx_oldsn_workaround(sc, ni, tid, buffer, reorder_data,
3962 	    device_timestamp)) {
3963 		 /* BA session will be torn down. */
3964 		ic->ic_stats.is_ht_rx_ba_window_jump++;
3965 		goto drop;
3966 
3967 	}
3968 
3969 	/* drop any outdated packets */
3970 	if (SEQ_LT(sn, buffer->head_sn)) {
3971 		ic->ic_stats.is_ht_rx_frame_below_ba_winstart++;
3972 		goto drop;
3973 	}
3974 
3975 	/* release immediately if allowed by nssn and no stored frames */
3976 	if (!buffer->num_stored && SEQ_LT(sn, nssn)) {
3977 		if (iwx_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) &&
3978 		   (!is_amsdu || last_subframe))
3979 			buffer->head_sn = nssn;
3980 		return 0;
3981 	}
3982 
3983 	/*
3984 	 * release immediately if there are no stored frames, and the sn is
3985 	 * equal to the head.
3986 	 * This can happen due to reorder timer, where NSSN is behind head_sn.
3987 	 * When we released everything, and we got the next frame in the
3988 	 * sequence, according to the NSSN we can't release immediately,
3989 	 * while technically there is no hole and we can move forward.
3990 	 */
3991 	if (!buffer->num_stored && sn == buffer->head_sn) {
3992 		if (!is_amsdu || last_subframe)
3993 			buffer->head_sn = (buffer->head_sn + 1) & 0xfff;
3994 		return 0;
3995 	}
3996 
3997 	index = sn % buffer->buf_size;
3998 
3999 	/*
4000 	 * Check if we already stored this frame
4001 	 * As AMSDU is either received or not as whole, logic is simple:
4002 	 * If we have frames in that position in the buffer and the last frame
4003 	 * originated from AMSDU had a different SN then it is a retransmission.
4004 	 * If it is the same SN then if the subframe index is incrementing it
4005 	 * is the same AMSDU - otherwise it is a retransmission.
4006 	 */
4007 	if (!ml_empty(&entries[index].frames)) {
4008 		if (!is_amsdu) {
4009 			ic->ic_stats.is_ht_rx_ba_no_buf++;
4010 			goto drop;
4011 		} else if (sn != buffer->last_amsdu ||
4012 		    buffer->last_sub_index >= subframe_idx) {
4013 			ic->ic_stats.is_ht_rx_ba_no_buf++;
4014 			goto drop;
4015 		}
4016 	} else {
4017 		/* This data is the same for all A-MSDU subframes. */
4018 		entries[index].chanidx = chanidx;
4019 		entries[index].is_shortpre = is_shortpre;
4020 		entries[index].rate_n_flags = rate_n_flags;
4021 		entries[index].device_timestamp = device_timestamp;
4022 		memcpy(&entries[index].rxi, rxi, sizeof(entries[index].rxi));
4023 	}
4024 
4025 	/* put in reorder buffer */
4026 	ml_enqueue(&entries[index].frames, m);
4027 	buffer->num_stored++;
4028 	getmicrouptime(&entries[index].reorder_time);
4029 
4030 	if (is_amsdu) {
4031 		buffer->last_amsdu = sn;
4032 		buffer->last_sub_index = subframe_idx;
4033 	}
4034 
4035 	/*
4036 	 * We cannot trust NSSN for AMSDU sub-frames that are not the last.
4037 	 * The reason is that NSSN advances on the first sub-frame, and may
4038 	 * cause the reorder buffer to advance before all the sub-frames arrive.
4039 	 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
4040 	 * SN 1. NSSN for first sub frame will be 3 with the result of driver
4041 	 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
4042 	 * already ahead and it will be dropped.
4043 	 * If the last sub-frame is not on this queue - we will get frame
4044 	 * release notification with up to date NSSN.
4045 	 */
4046 	if (!is_amsdu || last_subframe)
4047 		iwx_release_frames(sc, ni, rxba, buffer, nssn, ml);
4048 
4049 	return 1;
4050 
4051 drop:
4052 	m_freem(m);
4053 	return 1;
4054 }
4055 
4056 void
4057 iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata,
4058     size_t maxlen, struct mbuf_list *ml)
4059 {
4060 	struct ieee80211com *ic = &sc->sc_ic;
4061 	struct ieee80211_rxinfo rxi;
4062 	struct iwx_rx_mpdu_desc *desc;
4063 	uint32_t len, hdrlen, rate_n_flags, device_timestamp;
4064 	int rssi;
4065 	uint8_t chanidx;
4066 	uint16_t phy_info;
4067 
4068 	desc = (struct iwx_rx_mpdu_desc *)pktdata;
4069 
4070 	if (!(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_CRC_OK)) ||
4071 	    !(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
4072 		m_freem(m);
4073 		return; /* drop */
4074 	}
4075 
4076 	len = le16toh(desc->mpdu_len);
4077 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4078 		/* Allow control frames in monitor mode. */
4079 		if (len < sizeof(struct ieee80211_frame_cts)) {
4080 			ic->ic_stats.is_rx_tooshort++;
4081 			IC2IFP(ic)->if_ierrors++;
4082 			m_freem(m);
4083 			return;
4084 		}
4085 	} else if (len < sizeof(struct ieee80211_frame)) {
4086 		ic->ic_stats.is_rx_tooshort++;
4087 		IC2IFP(ic)->if_ierrors++;
4088 		m_freem(m);
4089 		return;
4090 	}
4091 	if (len > maxlen - sizeof(*desc)) {
4092 		IC2IFP(ic)->if_ierrors++;
4093 		m_freem(m);
4094 		return;
4095 	}
4096 
4097 	m->m_data = pktdata + sizeof(*desc);
4098 	m->m_pkthdr.len = m->m_len = len;
4099 
4100 	/* Account for padding following the frame header. */
4101 	if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_PAD) {
4102 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4103 		int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4104 		if (type == IEEE80211_FC0_TYPE_CTL) {
4105 			switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
4106 			case IEEE80211_FC0_SUBTYPE_CTS:
4107 				hdrlen = sizeof(struct ieee80211_frame_cts);
4108 				break;
4109 			case IEEE80211_FC0_SUBTYPE_ACK:
4110 				hdrlen = sizeof(struct ieee80211_frame_ack);
4111 				break;
4112 			default:
4113 				hdrlen = sizeof(struct ieee80211_frame_min);
4114 				break;
4115 			}
4116 		} else
4117 			hdrlen = ieee80211_get_hdrlen(wh);
4118 
4119 		if ((le16toh(desc->status) &
4120 		    IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
4121 		    IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4122 			/* Padding is inserted after the IV. */
4123 			hdrlen += IEEE80211_CCMP_HDRLEN;
4124 		}
4125 
4126 		memmove(m->m_data + 2, m->m_data, hdrlen);
4127 		m_adj(m, 2);
4128 	}
4129 
4130 	memset(&rxi, 0, sizeof(rxi));
4131 
4132 	/*
4133 	 * Hardware de-aggregates A-MSDUs and copies the same MAC header
4134 	 * in place for each subframe. But it leaves the 'A-MSDU present'
4135 	 * bit set in the frame header. We need to clear this bit ourselves.
4136 	 * (XXX This workaround is not required on AX200/AX201 devices that
4137 	 * have been tested by me, but it's unclear when this problem was
4138 	 * fixed in the hardware. It definitely affects the 9k generation.
4139 	 * Leaving this in place for now since some 9k/AX200 hybrids seem
4140 	 * to exist that we may eventually add support for.)
4141 	 *
4142 	 * And we must allow the same CCMP PN for subframes following the
4143 	 * first subframe. Otherwise they would be discarded as replays.
4144 	 */
4145 	if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU) {
4146 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4147 		uint8_t subframe_idx = (desc->amsdu_info &
4148 		    IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
4149 		if (subframe_idx > 0)
4150 			rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
4151 		if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
4152 		    m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) {
4153 			struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,
4154 			    struct ieee80211_qosframe_addr4 *);
4155 			qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
4156 		} else if (ieee80211_has_qos(wh) &&
4157 		    m->m_len >= sizeof(struct ieee80211_qosframe)) {
4158 			struct ieee80211_qosframe *qwh = mtod(m,
4159 			    struct ieee80211_qosframe *);
4160 			qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
4161 		}
4162 	}
4163 
4164 	/*
4165 	 * Verify decryption before duplicate detection. The latter uses
4166 	 * the TID supplied in QoS frame headers and this TID is implicitly
4167 	 * verified as part of the CCMP nonce.
4168 	 */
4169 	if (iwx_rx_hwdecrypt(sc, m, le16toh(desc->status), &rxi)) {
4170 		m_freem(m);
4171 		return;
4172 	}
4173 
4174 	if (iwx_detect_duplicate(sc, m, desc, &rxi)) {
4175 		m_freem(m);
4176 		return;
4177 	}
4178 
4179 	phy_info = le16toh(desc->phy_info);
4180 	rate_n_flags = le32toh(desc->v1.rate_n_flags);
4181 	chanidx = desc->v1.channel;
4182 	device_timestamp = desc->v1.gp2_on_air_rise;
4183 
4184 	rssi = iwx_rxmq_get_signal_strength(sc, desc);
4185 	rssi = (0 - IWX_MIN_DBM) + rssi;	/* normalize */
4186 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
4187 
4188 	rxi.rxi_rssi = rssi;
4189 	rxi.rxi_tstamp = le64toh(desc->v1.tsf_on_air_rise);
4190 
4191 	if (iwx_rx_reorder(sc, m, chanidx, desc,
4192 	    (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
4193 	    rate_n_flags, device_timestamp, &rxi, ml))
4194 		return;
4195 
4196 	iwx_rx_frame(sc, m, chanidx, le16toh(desc->status),
4197 	    (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
4198 	    rate_n_flags, device_timestamp, &rxi, ml);
4199 }
4200 
4201 void
4202 iwx_rx_tx_cmd_single(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4203     struct iwx_node *in)
4204 {
4205 	struct ieee80211com *ic = &sc->sc_ic;
4206 	struct ifnet *ifp = IC2IFP(ic);
4207 	struct iwx_tx_resp *tx_resp = (void *)pkt->data;
4208 	int status = le16toh(tx_resp->status.status) & IWX_TX_STATUS_MSK;
4209 	int txfail;
4210 
4211 	KASSERT(tx_resp->frame_count == 1);
4212 
4213 	txfail = (status != IWX_TX_STATUS_SUCCESS &&
4214 	    status != IWX_TX_STATUS_DIRECT_DONE);
4215 
4216 	if (txfail)
4217 		ifp->if_oerrors++;
4218 }
4219 
4220 void
4221 iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_data *txd)
4222 {
4223 	struct ieee80211com *ic = &sc->sc_ic;
4224 
4225 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
4226 	    BUS_DMASYNC_POSTWRITE);
4227 	bus_dmamap_unload(sc->sc_dmat, txd->map);
4228 	m_freem(txd->m);
4229 	txd->m = NULL;
4230 
4231 	KASSERT(txd->in);
4232 	ieee80211_release_node(ic, &txd->in->in_ni);
4233 	txd->in = NULL;
4234 }
4235 
4236 void
4237 iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4238     struct iwx_rx_data *data)
4239 {
4240 	struct ieee80211com *ic = &sc->sc_ic;
4241 	struct ifnet *ifp = IC2IFP(ic);
4242 	struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
4243 	int idx = cmd_hdr->idx;
4244 	int qid = cmd_hdr->qid;
4245 	struct iwx_tx_ring *ring = &sc->txq[qid];
4246 	struct iwx_tx_data *txd;
4247 
4248 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,
4249 	    BUS_DMASYNC_POSTREAD);
4250 
4251 	sc->sc_tx_timer = 0;
4252 
4253 	txd = &ring->data[idx];
4254 	if (txd->m == NULL)
4255 		return;
4256 
4257 	iwx_rx_tx_cmd_single(sc, pkt, txd->in);
4258 	iwx_txd_done(sc, txd);
4259 	iwx_tx_update_byte_tbl(ring, idx, 0, 0);
4260 
4261 	/*
4262 	 * XXX Sometimes we miss Tx completion interrupts.
4263 	 * We cannot check Tx success/failure for affected frames; just free
4264 	 * the associated mbuf and release the associated node reference.
4265 	 */
4266 	while (ring->tail != idx) {
4267 		txd = &ring->data[ring->tail];
4268 		if (txd->m != NULL) {
4269 			DPRINTF(("%s: missed Tx completion: tail=%d idx=%d\n",
4270 			    __func__, ring->tail, idx));
4271 			iwx_txd_done(sc, txd);
4272 			iwx_tx_update_byte_tbl(ring, idx, 0, 0);
4273 			ring->queued--;
4274 		}
4275 		ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT;
4276 	}
4277 
4278 	if (--ring->queued < IWX_TX_RING_LOMARK) {
4279 		sc->qfullmsk &= ~(1 << ring->qid);
4280 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
4281 			ifq_clr_oactive(&ifp->if_snd);
4282 			/*
4283 			 * Well, we're in interrupt context, but then again
4284 			 * I guess net80211 does all sorts of stunts in
4285 			 * interrupt context, so maybe this is no biggie.
4286 			 */
4287 			(*ifp->if_start)(ifp);
4288 		}
4289 	}
4290 }
4291 
4292 void
4293 iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4294     struct iwx_rx_data *data)
4295 {
4296 	struct ieee80211com *ic = &sc->sc_ic;
4297 	struct iwx_missed_beacons_notif *mbn = (void *)pkt->data;
4298 	uint32_t missed;
4299 
4300 	if ((ic->ic_opmode != IEEE80211_M_STA) ||
4301 	    (ic->ic_state != IEEE80211_S_RUN))
4302 		return;
4303 
4304 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
4305 	    sizeof(*mbn), BUS_DMASYNC_POSTREAD);
4306 
4307 	missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
4308 	if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
4309 		if (ic->ic_if.if_flags & IFF_DEBUG)
4310 			printf("%s: receiving no beacons from %s; checking if "
4311 			    "this AP is still responding to probe requests\n",
4312 			    DEVNAME(sc), ether_sprintf(ic->ic_bss->ni_macaddr));
4313 		/*
4314 		 * Rather than go directly to scan state, try to send a
4315 		 * directed probe request first. If that fails then the
4316 		 * state machine will drop us into scanning after timing
4317 		 * out waiting for a probe response.
4318 		 */
4319 		IEEE80211_SEND_MGMT(ic, ic->ic_bss,
4320 		    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
4321 	}
4322 
4323 }
4324 
4325 int
4326 iwx_binding_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action)
4327 {
4328 	struct iwx_binding_cmd cmd;
4329 	struct iwx_phy_ctxt *phyctxt = in->in_phyctxt;
4330 	uint32_t mac_id = IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
4331 	int i, err, active = (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE);
4332 	uint32_t status;
4333 
4334 	if (action == IWX_FW_CTXT_ACTION_ADD && active)
4335 		panic("binding already added");
4336 	if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
4337 		panic("binding already removed");
4338 
4339 	if (phyctxt == NULL) /* XXX race with iwx_stop() */
4340 		return EINVAL;
4341 
4342 	memset(&cmd, 0, sizeof(cmd));
4343 
4344 	cmd.id_and_color
4345 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4346 	cmd.action = htole32(action);
4347 	cmd.phy = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4348 
4349 	cmd.macs[0] = htole32(mac_id);
4350 	for (i = 1; i < IWX_MAX_MACS_IN_BINDING; i++)
4351 		cmd.macs[i] = htole32(IWX_FW_CTXT_INVALID);
4352 
4353 	if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
4354 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
4355 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
4356 	else
4357 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
4358 
4359 	status = 0;
4360 	err = iwx_send_cmd_pdu_status(sc, IWX_BINDING_CONTEXT_CMD, sizeof(cmd),
4361 	    &cmd, &status);
4362 	if (err == 0 && status != 0)
4363 		err = EIO;
4364 
4365 	return err;
4366 }
4367 
4368 int
4369 iwx_phy_ctxt_cmd_uhb(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
4370     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
4371     uint32_t apply_time)
4372 {
4373 	struct ieee80211com *ic = &sc->sc_ic;
4374 	struct iwx_phy_context_cmd_uhb cmd;
4375 	uint8_t active_cnt, idle_cnt;
4376 	struct ieee80211_channel *chan = ctxt->channel;
4377 
4378 	memset(&cmd, 0, sizeof(cmd));
4379 	cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
4380 	    ctxt->color));
4381 	cmd.action = htole32(action);
4382 	cmd.apply_time = htole32(apply_time);
4383 
4384 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
4385 	    IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
4386 	cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
4387 	cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
4388 	cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4389 
4390 	idle_cnt = chains_static;
4391 	active_cnt = chains_dynamic;
4392 	cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
4393 					IWX_PHY_RX_CHAIN_VALID_POS);
4394 	cmd.rxchain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
4395 	cmd.rxchain_info |= htole32(active_cnt <<
4396 	    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
4397 	cmd.txchain_info = htole32(iwx_fw_valid_tx_ant(sc));
4398 
4399 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
4400 }
4401 
4402 int
4403 iwx_phy_ctxt_cmd(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
4404     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
4405     uint32_t apply_time)
4406 {
4407 	struct ieee80211com *ic = &sc->sc_ic;
4408 	struct iwx_phy_context_cmd cmd;
4409 	uint8_t active_cnt, idle_cnt;
4410 	struct ieee80211_channel *chan = ctxt->channel;
4411 
4412 	/*
4413 	 * Intel increased the size of the fw_channel_info struct and neglected
4414 	 * to bump the phy_context_cmd struct, which contains an fw_channel_info
4415 	 * member in the middle.
4416 	 * To keep things simple we use a separate function to handle the larger
4417 	 * variant of the phy context command.
4418 	 */
4419 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS))
4420 		return iwx_phy_ctxt_cmd_uhb(sc, ctxt, chains_static,
4421 		    chains_dynamic, action, apply_time);
4422 
4423 	memset(&cmd, 0, sizeof(cmd));
4424 	cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
4425 	    ctxt->color));
4426 	cmd.action = htole32(action);
4427 	cmd.apply_time = htole32(apply_time);
4428 
4429 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
4430 	    IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
4431 	cmd.ci.channel = ieee80211_chan2ieee(ic, chan);
4432 	cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
4433 	cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4434 
4435 	idle_cnt = chains_static;
4436 	active_cnt = chains_dynamic;
4437 	cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
4438 					IWX_PHY_RX_CHAIN_VALID_POS);
4439 	cmd.rxchain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
4440 	cmd.rxchain_info |= htole32(active_cnt <<
4441 	    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
4442 	cmd.txchain_info = htole32(iwx_fw_valid_tx_ant(sc));
4443 
4444 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
4445 }
4446 
4447 int
4448 iwx_send_cmd(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
4449 {
4450 	struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
4451 	struct iwx_tfh_tfd *desc;
4452 	struct iwx_tx_data *txdata;
4453 	struct iwx_device_cmd *cmd;
4454 	struct mbuf *m;
4455 	bus_addr_t paddr;
4456 	uint64_t addr;
4457 	int err = 0, i, paylen, off, s;
4458 	int idx, code, async, group_id;
4459 	size_t hdrlen, datasz;
4460 	uint8_t *data;
4461 	int generation = sc->sc_generation;
4462 
4463 	code = hcmd->id;
4464 	async = hcmd->flags & IWX_CMD_ASYNC;
4465 	idx = ring->cur;
4466 
4467 	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
4468 		paylen += hcmd->len[i];
4469 	}
4470 
4471 	/* If this command waits for a response, allocate response buffer. */
4472 	hcmd->resp_pkt = NULL;
4473 	if (hcmd->flags & IWX_CMD_WANT_RESP) {
4474 		uint8_t *resp_buf;
4475 		KASSERT(!async);
4476 		KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet));
4477 		KASSERT(hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX);
4478 		if (sc->sc_cmd_resp_pkt[idx] != NULL)
4479 			return ENOSPC;
4480 		resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
4481 		    M_NOWAIT | M_ZERO);
4482 		if (resp_buf == NULL)
4483 			return ENOMEM;
4484 		sc->sc_cmd_resp_pkt[idx] = resp_buf;
4485 		sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
4486 	} else {
4487 		sc->sc_cmd_resp_pkt[idx] = NULL;
4488 	}
4489 
4490 	s = splnet();
4491 
4492 	desc = &ring->desc[idx];
4493 	txdata = &ring->data[idx];
4494 
4495 	group_id = iwx_cmd_groupid(code);
4496 	if (group_id != 0) {
4497 		hdrlen = sizeof(cmd->hdr_wide);
4498 		datasz = sizeof(cmd->data_wide);
4499 	} else {
4500 		hdrlen = sizeof(cmd->hdr);
4501 		datasz = sizeof(cmd->data);
4502 	}
4503 
4504 	if (paylen > datasz) {
4505 		/* Command is too large to fit in pre-allocated space. */
4506 		size_t totlen = hdrlen + paylen;
4507 		if (paylen > IWX_MAX_CMD_PAYLOAD_SIZE) {
4508 			printf("%s: firmware command too long (%zd bytes)\n",
4509 			    DEVNAME(sc), totlen);
4510 			err = EINVAL;
4511 			goto out;
4512 		}
4513 		m = MCLGETL(NULL, M_DONTWAIT, totlen);
4514 		if (m == NULL) {
4515 			printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
4516 			    DEVNAME(sc), totlen);
4517 			err = ENOMEM;
4518 			goto out;
4519 		}
4520 		cmd = mtod(m, struct iwx_device_cmd *);
4521 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
4522 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4523 		if (err) {
4524 			printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
4525 			    DEVNAME(sc), totlen);
4526 			m_freem(m);
4527 			goto out;
4528 		}
4529 		txdata->m = m; /* mbuf will be freed in iwx_cmd_done() */
4530 		paddr = txdata->map->dm_segs[0].ds_addr;
4531 	} else {
4532 		cmd = &ring->cmd[idx];
4533 		paddr = txdata->cmd_paddr;
4534 	}
4535 
4536 	if (group_id != 0) {
4537 		cmd->hdr_wide.opcode = iwx_cmd_opcode(code);
4538 		cmd->hdr_wide.group_id = group_id;
4539 		cmd->hdr_wide.qid = ring->qid;
4540 		cmd->hdr_wide.idx = idx;
4541 		cmd->hdr_wide.length = htole16(paylen);
4542 		cmd->hdr_wide.version = iwx_cmd_version(code);
4543 		data = cmd->data_wide;
4544 	} else {
4545 		cmd->hdr.code = code;
4546 		cmd->hdr.flags = 0;
4547 		cmd->hdr.qid = ring->qid;
4548 		cmd->hdr.idx = idx;
4549 		data = cmd->data;
4550 	}
4551 
4552 	for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
4553 		if (hcmd->len[i] == 0)
4554 			continue;
4555 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
4556 		off += hcmd->len[i];
4557 	}
4558 	KASSERT(off == paylen);
4559 
4560 	desc->tbs[0].tb_len = htole16(hdrlen + paylen);
4561 	addr = htole64((uint64_t)paddr);
4562 	memcpy(&desc->tbs[0].addr, &addr, sizeof(addr));
4563 	desc->num_tbs = 1;
4564 
4565 	if (paylen > datasz) {
4566 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
4567 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
4568 	} else {
4569 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4570 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
4571 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
4572 	}
4573 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4574 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
4575 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
4576 	/* Kick command ring. */
4577 	DPRINTF(("%s: sending command 0x%x\n", __func__, code));
4578 	ring->queued++;
4579 	ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
4580 	IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur);
4581 
4582 	if (!async) {
4583 		err = tsleep_nsec(desc, PCATCH, "iwxcmd", SEC_TO_NSEC(1));
4584 		if (err == 0) {
4585 			/* if hardware is no longer up, return error */
4586 			if (generation != sc->sc_generation) {
4587 				err = ENXIO;
4588 				goto out;
4589 			}
4590 
4591 			/* Response buffer will be freed in iwx_free_resp(). */
4592 			hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
4593 			sc->sc_cmd_resp_pkt[idx] = NULL;
4594 		} else if (generation == sc->sc_generation) {
4595 			free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
4596 			    sc->sc_cmd_resp_len[idx]);
4597 			sc->sc_cmd_resp_pkt[idx] = NULL;
4598 		}
4599 	}
4600  out:
4601 	splx(s);
4602 
4603 	return err;
4604 }
4605 
4606 int
4607 iwx_send_cmd_pdu(struct iwx_softc *sc, uint32_t id, uint32_t flags,
4608     uint16_t len, const void *data)
4609 {
4610 	struct iwx_host_cmd cmd = {
4611 		.id = id,
4612 		.len = { len, },
4613 		.data = { data, },
4614 		.flags = flags,
4615 	};
4616 
4617 	return iwx_send_cmd(sc, &cmd);
4618 }
4619 
4620 int
4621 iwx_send_cmd_status(struct iwx_softc *sc, struct iwx_host_cmd *cmd,
4622     uint32_t *status)
4623 {
4624 	struct iwx_rx_packet *pkt;
4625 	struct iwx_cmd_response *resp;
4626 	int err, resp_len;
4627 
4628 	KASSERT((cmd->flags & IWX_CMD_WANT_RESP) == 0);
4629 	cmd->flags |= IWX_CMD_WANT_RESP;
4630 	cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
4631 
4632 	err = iwx_send_cmd(sc, cmd);
4633 	if (err)
4634 		return err;
4635 
4636 	pkt = cmd->resp_pkt;
4637 	if (pkt == NULL || (pkt->hdr.flags & IWX_CMD_FAILED_MSK))
4638 		return EIO;
4639 
4640 	resp_len = iwx_rx_packet_payload_len(pkt);
4641 	if (resp_len != sizeof(*resp)) {
4642 		iwx_free_resp(sc, cmd);
4643 		return EIO;
4644 	}
4645 
4646 	resp = (void *)pkt->data;
4647 	*status = le32toh(resp->status);
4648 	iwx_free_resp(sc, cmd);
4649 	return err;
4650 }
4651 
4652 int
4653 iwx_send_cmd_pdu_status(struct iwx_softc *sc, uint32_t id, uint16_t len,
4654     const void *data, uint32_t *status)
4655 {
4656 	struct iwx_host_cmd cmd = {
4657 		.id = id,
4658 		.len = { len, },
4659 		.data = { data, },
4660 	};
4661 
4662 	return iwx_send_cmd_status(sc, &cmd, status);
4663 }
4664 
4665 void
4666 iwx_free_resp(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
4667 {
4668 	KASSERT((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP);
4669 	free(hcmd->resp_pkt, M_DEVBUF, hcmd->resp_pkt_len);
4670 	hcmd->resp_pkt = NULL;
4671 }
4672 
4673 void
4674 iwx_cmd_done(struct iwx_softc *sc, int qid, int idx, int code)
4675 {
4676 	struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
4677 	struct iwx_tx_data *data;
4678 
4679 	if (qid != IWX_DQA_CMD_QUEUE) {
4680 		return;	/* Not a command ack. */
4681 	}
4682 
4683 	data = &ring->data[idx];
4684 
4685 	if (data->m != NULL) {
4686 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
4687 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4688 		bus_dmamap_unload(sc->sc_dmat, data->map);
4689 		m_freem(data->m);
4690 		data->m = NULL;
4691 	}
4692 	wakeup(&ring->desc[idx]);
4693 
4694 	DPRINTF(("%s: command 0x%x done\n", __func__, code));
4695 	if (ring->queued == 0) {
4696 		DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
4697 			DEVNAME(sc), code));
4698 	} else if (ring->queued > 0)
4699 		ring->queued--;
4700 }
4701 
4702 /*
4703  * Fill in various bit for management frames, and leave them
4704  * unfilled for data frames (firmware takes care of that).
4705  * Return the selected TX rate.
4706  */
4707 const struct iwx_rate *
4708 iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in,
4709     struct ieee80211_frame *wh, struct iwx_tx_cmd_gen2 *tx)
4710 {
4711 	struct ieee80211com *ic = &sc->sc_ic;
4712 	struct ieee80211_node *ni = &in->in_ni;
4713 	struct ieee80211_rateset *rs = &ni->ni_rates;
4714 	const struct iwx_rate *rinfo;
4715 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4716 	int min_ridx = iwx_rval2ridx(ieee80211_min_basic_rate(ic));
4717 	int ridx, rate_flags;
4718 	uint32_t flags = 0;
4719 
4720 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4721 	    type != IEEE80211_FC0_TYPE_DATA) {
4722 		/* for non-data, use the lowest supported rate */
4723 		ridx = min_ridx;
4724 		flags |= IWX_TX_FLAGS_CMD_RATE;
4725 	} else if (ic->ic_fixed_mcs != -1) {
4726 		ridx = sc->sc_fixed_ridx;
4727 		flags |= IWX_TX_FLAGS_CMD_RATE;
4728 	} else if (ic->ic_fixed_rate != -1) {
4729 		ridx = sc->sc_fixed_ridx;
4730 		flags |= IWX_TX_FLAGS_CMD_RATE;
4731 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
4732 		ridx = iwx_mcs2ridx[ni->ni_txmcs];
4733 	} else {
4734 		uint8_t rval;
4735 		rval = (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL);
4736 		ridx = iwx_rval2ridx(rval);
4737 		if (ridx < min_ridx)
4738 			ridx = min_ridx;
4739 	}
4740 
4741 	if ((ic->ic_flags & IEEE80211_F_RSNON) &&
4742 	    ni->ni_rsn_supp_state == RSNA_SUPP_PTKNEGOTIATING)
4743 		flags |= IWX_TX_FLAGS_HIGH_PRI;
4744 	tx->flags = htole32(flags);
4745 
4746 	rinfo = &iwx_rates[ridx];
4747 	if (iwx_is_mimo_ht_plcp(rinfo->ht_plcp))
4748 		rate_flags = IWX_RATE_MCS_ANT_AB_MSK;
4749 	else
4750 		rate_flags = IWX_RATE_MCS_ANT_A_MSK;
4751 	if (IWX_RIDX_IS_CCK(ridx))
4752 		rate_flags |= IWX_RATE_MCS_CCK_MSK;
4753 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4754 	    rinfo->ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP) {
4755 		rate_flags |= IWX_RATE_MCS_HT_MSK;
4756 		if (ieee80211_node_supports_ht_sgi20(ni))
4757 			rate_flags |= IWX_RATE_MCS_SGI_MSK;
4758 		tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
4759 	} else
4760 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
4761 
4762 	return rinfo;
4763 }
4764 
4765 void
4766 iwx_tx_update_byte_tbl(struct iwx_tx_ring *txq, int idx, uint16_t byte_cnt,
4767     uint16_t num_tbs)
4768 {
4769 	uint8_t filled_tfd_size, num_fetch_chunks;
4770 	uint16_t len = byte_cnt;
4771 	uint16_t bc_ent;
4772 	struct iwx_agn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.vaddr;
4773 
4774 	filled_tfd_size = offsetof(struct iwx_tfh_tfd, tbs) +
4775 			  num_tbs * sizeof(struct iwx_tfh_tb);
4776 	/*
4777 	 * filled_tfd_size contains the number of filled bytes in the TFD.
4778 	 * Dividing it by 64 will give the number of chunks to fetch
4779 	 * to SRAM- 0 for one chunk, 1 for 2 and so on.
4780 	 * If, for example, TFD contains only 3 TBs then 32 bytes
4781 	 * of the TFD are used, and only one chunk of 64 bytes should
4782 	 * be fetched
4783 	 */
4784 	num_fetch_chunks = howmany(filled_tfd_size, 64) - 1;
4785 
4786 	/* Before AX210, the HW expects DW */
4787 	len = howmany(len, 4);
4788 	bc_ent = htole16(len | (num_fetch_chunks << 12));
4789 	scd_bc_tbl->tfd_offset[idx] = bc_ent;
4790 }
4791 
4792 int
4793 iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
4794 {
4795 	struct ieee80211com *ic = &sc->sc_ic;
4796 	struct iwx_node *in = (void *)ni;
4797 	struct iwx_tx_ring *ring;
4798 	struct iwx_tx_data *data;
4799 	struct iwx_tfh_tfd *desc;
4800 	struct iwx_device_cmd *cmd;
4801 	struct iwx_tx_cmd_gen2 *tx;
4802 	struct ieee80211_frame *wh;
4803 	struct ieee80211_key *k = NULL;
4804 	const struct iwx_rate *rinfo;
4805 	uint64_t paddr;
4806 	u_int hdrlen;
4807 	bus_dma_segment_t *seg;
4808 	uint16_t num_tbs;
4809 	uint8_t type;
4810 	int i, totlen, err, pad;
4811 
4812 	wh = mtod(m, struct ieee80211_frame *);
4813 	hdrlen = ieee80211_get_hdrlen(wh);
4814 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4815 
4816 	/*
4817 	 * Map EDCA categories to Tx data queues.
4818 	 *
4819 	 * We use static data queue assignments even in DQA mode. We do not
4820 	 * need to share Tx queues between stations because we only implement
4821 	 * client mode; the firmware's station table contains only one entry
4822 	 * which represents our access point.
4823 	 *
4824 	 * Tx aggregation will require additional queues (one queue per TID
4825 	 * for which aggregation is enabled) but we do not implement this yet.
4826 	 */
4827 	ring = &sc->txq[ac + IWX_DQA_AUX_QUEUE + 1];
4828 	desc = &ring->desc[ring->cur];
4829 	memset(desc, 0, sizeof(*desc));
4830 	data = &ring->data[ring->cur];
4831 
4832 	cmd = &ring->cmd[ring->cur];
4833 	cmd->hdr.code = IWX_TX_CMD;
4834 	cmd->hdr.flags = 0;
4835 	cmd->hdr.qid = ring->qid;
4836 	cmd->hdr.idx = ring->cur;
4837 
4838 	tx = (void *)cmd->data;
4839 	memset(tx, 0, sizeof(*tx));
4840 
4841 	rinfo = iwx_tx_fill_cmd(sc, in, wh, tx);
4842 
4843 #if NBPFILTER > 0
4844 	if (sc->sc_drvbpf != NULL) {
4845 		struct iwx_tx_radiotap_header *tap = &sc->sc_txtap;
4846 		uint16_t chan_flags;
4847 
4848 		tap->wt_flags = 0;
4849 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
4850 		chan_flags = ni->ni_chan->ic_flags;
4851 		if (ic->ic_curmode != IEEE80211_MODE_11N)
4852 			chan_flags &= ~IEEE80211_CHAN_HT;
4853 		tap->wt_chan_flags = htole16(chan_flags);
4854 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4855 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4856 		    type == IEEE80211_FC0_TYPE_DATA &&
4857 		    rinfo->ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP) {
4858 			tap->wt_rate = (0x80 | rinfo->ht_plcp);
4859 		} else
4860 			tap->wt_rate = rinfo->rate;
4861 		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
4862 		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
4863 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4864 
4865 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
4866 		    m, BPF_DIRECTION_OUT);
4867 	}
4868 #endif
4869 
4870 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
4871                 k = ieee80211_get_txkey(ic, wh, ni);
4872 		if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
4873 			if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
4874 				return ENOBUFS;
4875 			/* 802.11 header may have moved. */
4876 			wh = mtod(m, struct ieee80211_frame *);
4877 			tx->flags |= htole32(IWX_TX_FLAGS_ENCRYPT_DIS);
4878 		} else {
4879 			k->k_tsc++;
4880 			/* Hardware increments PN internally and adds IV. */
4881 		}
4882 	} else
4883 		tx->flags |= htole32(IWX_TX_FLAGS_ENCRYPT_DIS);
4884 
4885 	totlen = m->m_pkthdr.len;
4886 
4887 	if (hdrlen & 3) {
4888 		/* First segment length must be a multiple of 4. */
4889 		pad = 4 - (hdrlen & 3);
4890 		tx->offload_assist |= htole16(IWX_TX_CMD_OFFLD_PAD);
4891 	} else
4892 		pad = 0;
4893 
4894 	tx->len = htole16(totlen);
4895 
4896 	/* Copy 802.11 header in TX command. */
4897 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
4898 
4899 	/* Trim 802.11 header. */
4900 	m_adj(m, hdrlen);
4901 
4902 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4903 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4904 	if (err && err != EFBIG) {
4905 		printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
4906 		m_freem(m);
4907 		return err;
4908 	}
4909 	if (err) {
4910 		/* Too many DMA segments, linearize mbuf. */
4911 		if (m_defrag(m, M_DONTWAIT)) {
4912 			m_freem(m);
4913 			return ENOBUFS;
4914 		}
4915 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4916 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4917 		if (err) {
4918 			printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
4919 			    err);
4920 			m_freem(m);
4921 			return err;
4922 		}
4923 	}
4924 	data->m = m;
4925 	data->in = in;
4926 
4927 	/* Fill TX descriptor. */
4928 	num_tbs = 2 + data->map->dm_nsegs;
4929 	desc->num_tbs = htole16(num_tbs);
4930 
4931 	desc->tbs[0].tb_len = htole16(IWX_FIRST_TB_SIZE);
4932 	paddr = htole64(data->cmd_paddr);
4933 	memcpy(&desc->tbs[0].addr, &paddr, sizeof(paddr));
4934 	if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[0].tb_len)) >> 32)
4935 		DPRINTF(("%s: TB0 crosses 32bit boundary\n", __func__));
4936 	desc->tbs[1].tb_len = htole16(sizeof(struct iwx_cmd_header) +
4937 	    sizeof(*tx) + hdrlen + pad - IWX_FIRST_TB_SIZE);
4938 	paddr = htole64(data->cmd_paddr + IWX_FIRST_TB_SIZE);
4939 	memcpy(&desc->tbs[1].addr, &paddr, sizeof(paddr));
4940 
4941 	if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[1].tb_len)) >> 32)
4942 		DPRINTF(("%s: TB1 crosses 32bit boundary\n", __func__));
4943 
4944 	/* Other DMA segments are for data payload. */
4945 	seg = data->map->dm_segs;
4946 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
4947 		desc->tbs[i + 2].tb_len = htole16(seg->ds_len);
4948 		paddr = htole64(seg->ds_addr);
4949 		memcpy(&desc->tbs[i + 2].addr, &paddr, sizeof(paddr));
4950 		if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[i + 2].tb_len)) >> 32)
4951 			DPRINTF(("%s: TB%d crosses 32bit boundary\n", __func__, i + 2));
4952 	}
4953 
4954 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
4955 	    BUS_DMASYNC_PREWRITE);
4956 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4957 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
4958 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
4959 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4960 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
4961 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
4962 
4963 	iwx_tx_update_byte_tbl(ring, ring->cur, totlen, num_tbs);
4964 
4965 	/* Kick TX ring. */
4966 	ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
4967 	IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur);
4968 
4969 	/* Mark TX ring as full if we reach a certain threshold. */
4970 	if (++ring->queued > IWX_TX_RING_HIMARK) {
4971 		sc->qfullmsk |= 1 << ring->qid;
4972 	}
4973 
4974 	return 0;
4975 }
4976 
4977 int
4978 iwx_flush_tx_path(struct iwx_softc *sc)
4979 {
4980 	struct iwx_tx_path_flush_cmd flush_cmd = {
4981 		.sta_id = htole32(IWX_STATION_ID),
4982 		.tid_mask = htole16(0xffff),
4983 	};
4984 	int err;
4985 
4986 	err = iwx_send_cmd_pdu(sc, IWX_TXPATH_FLUSH, 0,
4987 	    sizeof(flush_cmd), &flush_cmd);
4988 	if (err)
4989                 printf("%s: Flushing tx queue failed: %d\n", DEVNAME(sc), err);
4990 	return err;
4991 }
4992 
4993 #define IWX_POWER_KEEP_ALIVE_PERIOD_SEC    25
4994 
4995 int
4996 iwx_beacon_filter_send_cmd(struct iwx_softc *sc,
4997     struct iwx_beacon_filter_cmd *cmd)
4998 {
4999 	size_t len;
5000 
5001 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_BEACON_FILTER_V4))
5002 		len = sizeof(struct iwx_beacon_filter_cmd);
5003 	else
5004 		len = offsetof(struct iwx_beacon_filter_cmd,
5005 		    bf_threshold_absolute_low);
5006 
5007 	return iwx_send_cmd_pdu(sc, IWX_REPLY_BEACON_FILTERING_CMD,
5008 	    0, len, cmd);
5009 }
5010 
5011 int
5012 iwx_update_beacon_abort(struct iwx_softc *sc, struct iwx_node *in, int enable)
5013 {
5014 	struct iwx_beacon_filter_cmd cmd = {
5015 		IWX_BF_CMD_CONFIG_DEFAULTS,
5016 		.bf_enable_beacon_filter = htole32(1),
5017 		.ba_enable_beacon_abort = htole32(enable),
5018 	};
5019 
5020 	if (!sc->sc_bf.bf_enabled)
5021 		return 0;
5022 
5023 	sc->sc_bf.ba_enabled = enable;
5024 	return iwx_beacon_filter_send_cmd(sc, &cmd);
5025 }
5026 
5027 void
5028 iwx_power_build_cmd(struct iwx_softc *sc, struct iwx_node *in,
5029     struct iwx_mac_power_cmd *cmd)
5030 {
5031 	struct ieee80211com *ic = &sc->sc_ic;
5032 	struct ieee80211_node *ni = &in->in_ni;
5033 	int dtim_period, dtim_msec, keep_alive;
5034 
5035 	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
5036 	    in->in_color));
5037 	if (ni->ni_dtimperiod)
5038 		dtim_period = ni->ni_dtimperiod;
5039 	else
5040 		dtim_period = 1;
5041 
5042 	/*
5043 	 * Regardless of power management state the driver must set
5044 	 * keep alive period. FW will use it for sending keep alive NDPs
5045 	 * immediately after association. Check that keep alive period
5046 	 * is at least 3 * DTIM.
5047 	 */
5048 	dtim_msec = dtim_period * ni->ni_intval;
5049 	keep_alive = MAX(3 * dtim_msec, 1000 * IWX_POWER_KEEP_ALIVE_PERIOD_SEC);
5050 	keep_alive = roundup(keep_alive, 1000) / 1000;
5051 	cmd->keep_alive_seconds = htole16(keep_alive);
5052 
5053 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
5054 		cmd->flags = htole16(IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK);
5055 }
5056 
5057 int
5058 iwx_power_mac_update_mode(struct iwx_softc *sc, struct iwx_node *in)
5059 {
5060 	int err;
5061 	int ba_enable;
5062 	struct iwx_mac_power_cmd cmd;
5063 
5064 	memset(&cmd, 0, sizeof(cmd));
5065 
5066 	iwx_power_build_cmd(sc, in, &cmd);
5067 
5068 	err = iwx_send_cmd_pdu(sc, IWX_MAC_PM_POWER_TABLE, 0,
5069 	    sizeof(cmd), &cmd);
5070 	if (err != 0)
5071 		return err;
5072 
5073 	ba_enable = !!(cmd.flags &
5074 	    htole16(IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
5075 	return iwx_update_beacon_abort(sc, in, ba_enable);
5076 }
5077 
5078 int
5079 iwx_power_update_device(struct iwx_softc *sc)
5080 {
5081 	struct iwx_device_power_cmd cmd = { };
5082 	struct ieee80211com *ic = &sc->sc_ic;
5083 
5084 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
5085 		cmd.flags = htole16(IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
5086 
5087 	return iwx_send_cmd_pdu(sc,
5088 	    IWX_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
5089 }
5090 
5091 int
5092 iwx_enable_beacon_filter(struct iwx_softc *sc, struct iwx_node *in)
5093 {
5094 	struct iwx_beacon_filter_cmd cmd = {
5095 		IWX_BF_CMD_CONFIG_DEFAULTS,
5096 		.bf_enable_beacon_filter = htole32(1),
5097 		.ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled),
5098 	};
5099 	int err;
5100 
5101 	err = iwx_beacon_filter_send_cmd(sc, &cmd);
5102 	if (err == 0)
5103 		sc->sc_bf.bf_enabled = 1;
5104 
5105 	return err;
5106 }
5107 
5108 int
5109 iwx_disable_beacon_filter(struct iwx_softc *sc)
5110 {
5111 	struct iwx_beacon_filter_cmd cmd;
5112 	int err;
5113 
5114 	memset(&cmd, 0, sizeof(cmd));
5115 
5116 	err = iwx_beacon_filter_send_cmd(sc, &cmd);
5117 	if (err == 0)
5118 		sc->sc_bf.bf_enabled = 0;
5119 
5120 	return err;
5121 }
5122 
5123 int
5124 iwx_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update)
5125 {
5126 	struct iwx_add_sta_cmd add_sta_cmd;
5127 	int err;
5128 	uint32_t status;
5129 	struct ieee80211com *ic = &sc->sc_ic;
5130 
5131 	if (!update && (sc->sc_flags & IWX_FLAG_STA_ACTIVE))
5132 		panic("STA already added");
5133 
5134 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
5135 
5136 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5137 		add_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
5138 		add_sta_cmd.station_type = IWX_STA_GENERAL_PURPOSE;
5139 	} else {
5140 		add_sta_cmd.sta_id = IWX_STATION_ID;
5141 		add_sta_cmd.station_type = IWX_STA_LINK;
5142 	}
5143 	add_sta_cmd.mac_id_n_color
5144 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
5145 	if (!update) {
5146 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
5147 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
5148 			    etheranyaddr);
5149 		else
5150 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
5151 			    in->in_ni.ni_bssid);
5152 	}
5153 	add_sta_cmd.add_modify = update ? 1 : 0;
5154 	add_sta_cmd.station_flags_msk
5155 	    |= htole32(IWX_STA_FLG_FAT_EN_MSK | IWX_STA_FLG_MIMO_EN_MSK);
5156 	add_sta_cmd.tid_disable_tx = htole16(0xffff);
5157 	if (update)
5158 		add_sta_cmd.modify_mask |= (IWX_STA_MODIFY_TID_DISABLE_TX);
5159 
5160 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
5161 		add_sta_cmd.station_flags_msk
5162 		    |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_MSK |
5163 		    IWX_STA_FLG_AGG_MPDU_DENS_MSK);
5164 
5165 		add_sta_cmd.station_flags
5166 		    |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_64K);
5167 		switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
5168 		case IEEE80211_AMPDU_PARAM_SS_2:
5169 			add_sta_cmd.station_flags
5170 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_2US);
5171 			break;
5172 		case IEEE80211_AMPDU_PARAM_SS_4:
5173 			add_sta_cmd.station_flags
5174 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_4US);
5175 			break;
5176 		case IEEE80211_AMPDU_PARAM_SS_8:
5177 			add_sta_cmd.station_flags
5178 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_8US);
5179 			break;
5180 		case IEEE80211_AMPDU_PARAM_SS_16:
5181 			add_sta_cmd.station_flags
5182 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_16US);
5183 			break;
5184 		default:
5185 			break;
5186 		}
5187 	}
5188 
5189 	status = IWX_ADD_STA_SUCCESS;
5190 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(add_sta_cmd),
5191 	    &add_sta_cmd, &status);
5192 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
5193 		err = EIO;
5194 
5195 	return err;
5196 }
5197 
5198 int
5199 iwx_add_aux_sta(struct iwx_softc *sc)
5200 {
5201 	struct iwx_add_sta_cmd cmd;
5202 	int err, qid = IWX_DQA_AUX_QUEUE;
5203 	uint32_t status;
5204 
5205 	memset(&cmd, 0, sizeof(cmd));
5206 	cmd.sta_id = IWX_AUX_STA_ID;
5207 	cmd.station_type = IWX_STA_AUX_ACTIVITY;
5208 	cmd.mac_id_n_color =
5209 	    htole32(IWX_FW_CMD_ID_AND_COLOR(IWX_MAC_INDEX_AUX, 0));
5210 	cmd.tid_disable_tx = htole16(0xffff);
5211 
5212 	status = IWX_ADD_STA_SUCCESS;
5213 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(cmd), &cmd,
5214 	    &status);
5215 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
5216 		return EIO;
5217 
5218 	return iwx_enable_txq(sc, IWX_AUX_STA_ID, qid, IWX_MGMT_TID,
5219 	    IWX_TX_RING_COUNT);
5220 }
5221 
5222 int
5223 iwx_rm_sta_cmd(struct iwx_softc *sc, struct iwx_node *in)
5224 {
5225 	struct ieee80211com *ic = &sc->sc_ic;
5226 	struct iwx_rm_sta_cmd rm_sta_cmd;
5227 	int err;
5228 
5229 	if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
5230 		panic("sta already removed");
5231 
5232 	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
5233 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
5234 		rm_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
5235 	else
5236 		rm_sta_cmd.sta_id = IWX_STATION_ID;
5237 
5238 	err = iwx_send_cmd_pdu(sc, IWX_REMOVE_STA, 0, sizeof(rm_sta_cmd),
5239 	    &rm_sta_cmd);
5240 
5241 	return err;
5242 }
5243 
5244 uint8_t
5245 iwx_umac_scan_fill_channels(struct iwx_softc *sc,
5246     struct iwx_scan_channel_cfg_umac *chan, int n_ssids, int bgscan)
5247 {
5248 	struct ieee80211com *ic = &sc->sc_ic;
5249 	struct ieee80211_channel *c;
5250 	uint8_t nchan;
5251 
5252 	for (nchan = 0, c = &ic->ic_channels[1];
5253 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5254 	    nchan < sc->sc_capa_n_scan_channels;
5255 	    c++) {
5256 		uint8_t channel_num;
5257 
5258 		if (c->ic_flags == 0)
5259 			continue;
5260 
5261 		channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
5262 		if (isset(sc->sc_ucode_api,
5263 		    IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
5264 			chan->v2.channel_num = channel_num;
5265 			if (IEEE80211_IS_CHAN_2GHZ(c))
5266 				chan->v2.band = IWX_PHY_BAND_24;
5267 			else
5268 				chan->v2.band = IWX_PHY_BAND_5;
5269 			chan->v2.iter_count = 1;
5270 			chan->v2.iter_interval = 0;
5271 		} else {
5272 			chan->v1.channel_num = channel_num;
5273 			chan->v1.iter_count = 1;
5274 			chan->v1.iter_interval = htole16(0);
5275 		}
5276 		if (n_ssids != 0 && !bgscan)
5277 			chan->flags = htole32(1 << 0); /* select SSID 0 */
5278 		chan++;
5279 		nchan++;
5280 	}
5281 
5282 	return nchan;
5283 }
5284 
5285 int
5286 iwx_fill_probe_req_v1(struct iwx_softc *sc, struct iwx_scan_probe_req_v1 *preq1)
5287 {
5288 	struct iwx_scan_probe_req preq2;
5289 	int err, i;
5290 
5291 	err = iwx_fill_probe_req(sc, &preq2);
5292 	if (err)
5293 		return err;
5294 
5295 	preq1->mac_header = preq2.mac_header;
5296 	for (i = 0; i < nitems(preq1->band_data); i++)
5297 		preq1->band_data[i] = preq2.band_data[i];
5298 	preq1->common_data = preq2.common_data;
5299 	memcpy(preq1->buf, preq2.buf, sizeof(preq1->buf));
5300 	return 0;
5301 }
5302 
5303 int
5304 iwx_fill_probe_req(struct iwx_softc *sc, struct iwx_scan_probe_req *preq)
5305 {
5306 	struct ieee80211com *ic = &sc->sc_ic;
5307 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
5308 	struct ieee80211_rateset *rs;
5309 	size_t remain = sizeof(preq->buf);
5310 	uint8_t *frm, *pos;
5311 
5312 	memset(preq, 0, sizeof(*preq));
5313 
5314 	if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
5315 		return ENOBUFS;
5316 
5317 	/*
5318 	 * Build a probe request frame.  Most of the following code is a
5319 	 * copy & paste of what is done in net80211.
5320 	 */
5321 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
5322 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
5323 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
5324 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
5325 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
5326 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
5327 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
5328 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
5329 
5330 	frm = (uint8_t *)(wh + 1);
5331 	*frm++ = IEEE80211_ELEMID_SSID;
5332 	*frm++ = 0;
5333 	/* hardware inserts SSID */
5334 
5335 	/* Tell the firmware where the MAC header is. */
5336 	preq->mac_header.offset = 0;
5337 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
5338 	remain -= frm - (uint8_t *)wh;
5339 
5340 	/* Fill in 2GHz IEs and tell firmware where they are. */
5341 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
5342 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
5343 		if (remain < 4 + rs->rs_nrates)
5344 			return ENOBUFS;
5345 	} else if (remain < 2 + rs->rs_nrates)
5346 		return ENOBUFS;
5347 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
5348 	pos = frm;
5349 	frm = ieee80211_add_rates(frm, rs);
5350 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5351 		frm = ieee80211_add_xrates(frm, rs);
5352 	remain -= frm - pos;
5353 
5354 	if (isset(sc->sc_enabled_capa,
5355 	    IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
5356 		if (remain < 3)
5357 			return ENOBUFS;
5358 		*frm++ = IEEE80211_ELEMID_DSPARMS;
5359 		*frm++ = 1;
5360 		*frm++ = 0;
5361 		remain -= 3;
5362 	}
5363 	preq->band_data[0].len = htole16(frm - pos);
5364 
5365 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
5366 		/* Fill in 5GHz IEs. */
5367 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
5368 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
5369 			if (remain < 4 + rs->rs_nrates)
5370 				return ENOBUFS;
5371 		} else if (remain < 2 + rs->rs_nrates)
5372 			return ENOBUFS;
5373 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
5374 		pos = frm;
5375 		frm = ieee80211_add_rates(frm, rs);
5376 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5377 			frm = ieee80211_add_xrates(frm, rs);
5378 		preq->band_data[1].len = htole16(frm - pos);
5379 		remain -= frm - pos;
5380 	}
5381 
5382 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
5383 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
5384 	pos = frm;
5385 	if (ic->ic_flags & IEEE80211_F_HTON) {
5386 		if (remain < 28)
5387 			return ENOBUFS;
5388 		frm = ieee80211_add_htcaps(frm, ic);
5389 		/* XXX add WME info? */
5390 	}
5391 	preq->common_data.len = htole16(frm - pos);
5392 
5393 	return 0;
5394 }
5395 
5396 int
5397 iwx_config_umac_scan(struct iwx_softc *sc)
5398 {
5399 	struct ieee80211com *ic = &sc->sc_ic;
5400 	struct iwx_scan_config *scan_config;
5401 	int err, nchan;
5402 	size_t cmd_size;
5403 	struct ieee80211_channel *c;
5404 	struct iwx_host_cmd hcmd = {
5405 		.id = iwx_cmd_id(IWX_SCAN_CFG_CMD, IWX_LONG_GROUP, 0),
5406 		.flags = 0,
5407 	};
5408 	static const uint32_t rates = (IWX_SCAN_CONFIG_RATE_1M |
5409 	    IWX_SCAN_CONFIG_RATE_2M | IWX_SCAN_CONFIG_RATE_5M |
5410 	    IWX_SCAN_CONFIG_RATE_11M | IWX_SCAN_CONFIG_RATE_6M |
5411 	    IWX_SCAN_CONFIG_RATE_9M | IWX_SCAN_CONFIG_RATE_12M |
5412 	    IWX_SCAN_CONFIG_RATE_18M | IWX_SCAN_CONFIG_RATE_24M |
5413 	    IWX_SCAN_CONFIG_RATE_36M | IWX_SCAN_CONFIG_RATE_48M |
5414 	    IWX_SCAN_CONFIG_RATE_54M);
5415 
5416 	cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
5417 
5418 	scan_config = malloc(cmd_size, M_DEVBUF, M_WAIT | M_CANFAIL | M_ZERO);
5419 	if (scan_config == NULL)
5420 		return ENOMEM;
5421 
5422 	scan_config->tx_chains = htole32(iwx_fw_valid_tx_ant(sc));
5423 	scan_config->rx_chains = htole32(iwx_fw_valid_rx_ant(sc));
5424 	scan_config->legacy_rates = htole32(rates |
5425 	    IWX_SCAN_CONFIG_SUPPORTED_RATE(rates));
5426 
5427 	/* These timings correspond to iwlwifi's UNASSOC scan. */
5428 	scan_config->dwell.active = 10;
5429 	scan_config->dwell.passive = 110;
5430 	scan_config->dwell.fragmented = 44;
5431 	scan_config->dwell.extended = 90;
5432 	scan_config->out_of_channel_time[IWX_SCAN_LB_LMAC_IDX] = htole32(0);
5433 	scan_config->out_of_channel_time[IWX_SCAN_HB_LMAC_IDX] = htole32(0);
5434 	scan_config->suspend_time[IWX_SCAN_LB_LMAC_IDX] = htole32(0);
5435 	scan_config->suspend_time[IWX_SCAN_HB_LMAC_IDX] = htole32(0);
5436 
5437 	IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
5438 
5439 	scan_config->bcast_sta_id = IWX_AUX_STA_ID;
5440 	scan_config->channel_flags = 0;
5441 
5442 	for (c = &ic->ic_channels[1], nchan = 0;
5443 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5444 	    nchan < sc->sc_capa_n_scan_channels; c++) {
5445 		if (c->ic_flags == 0)
5446 			continue;
5447 		scan_config->channel_array[nchan++] =
5448 		    ieee80211_mhz2ieee(c->ic_freq, 0);
5449 	}
5450 
5451 	scan_config->flags = htole32(IWX_SCAN_CONFIG_FLAG_ACTIVATE |
5452 	    IWX_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
5453 	    IWX_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
5454 	    IWX_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
5455 	    IWX_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
5456 	    IWX_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
5457 	    IWX_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
5458 	    IWX_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
5459 	    IWX_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
5460 	    IWX_SCAN_CONFIG_N_CHANNELS(nchan) |
5461 	    IWX_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
5462 
5463 	hcmd.data[0] = scan_config;
5464 	hcmd.len[0] = cmd_size;
5465 
5466 	err = iwx_send_cmd(sc, &hcmd);
5467 	free(scan_config, M_DEVBUF, cmd_size);
5468 	return err;
5469 }
5470 
5471 int
5472 iwx_umac_scan_size(struct iwx_softc *sc)
5473 {
5474 	int base_size = IWX_SCAN_REQ_UMAC_SIZE_V1;
5475 	int tail_size;
5476 
5477 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
5478 		base_size = IWX_SCAN_REQ_UMAC_SIZE_V8;
5479 	else if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL))
5480 		base_size = IWX_SCAN_REQ_UMAC_SIZE_V7;
5481 #ifdef notyet
5482 	else if (sc->sc_device_family >= IWX_DEVICE_FAMILY_22000)
5483 		base_size = IWX_SCAN_REQ_UMAC_SIZE_V6;
5484 #endif
5485 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
5486 		tail_size = sizeof(struct iwx_scan_req_umac_tail_v2);
5487 	else
5488 		tail_size = sizeof(struct iwx_scan_req_umac_tail_v1);
5489 
5490 	return base_size + sizeof(struct iwx_scan_channel_cfg_umac) *
5491 	    sc->sc_capa_n_scan_channels + tail_size;
5492 }
5493 
5494 struct iwx_scan_umac_chan_param *
5495 iwx_get_scan_req_umac_chan_param(struct iwx_softc *sc,
5496     struct iwx_scan_req_umac *req)
5497 {
5498 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
5499 		return &req->v8.channel;
5500 
5501 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL))
5502 		return &req->v7.channel;
5503 #ifdef notyet
5504 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_22000)
5505 		return &req->v6.channel;
5506 #endif
5507 	return &req->v1.channel;
5508 }
5509 
5510 void *
5511 iwx_get_scan_req_umac_data(struct iwx_softc *sc, struct iwx_scan_req_umac *req)
5512 {
5513 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
5514 		return (void *)&req->v8.data;
5515 
5516 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL))
5517 		return (void *)&req->v7.data;
5518 #ifdef notyet
5519 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_22000)
5520 		return (void *)&req->v6.data;
5521 #endif
5522 	return (void *)&req->v1.data;
5523 
5524 }
5525 
5526 /* adaptive dwell max budget time [TU] for full scan */
5527 #define IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
5528 /* adaptive dwell max budget time [TU] for directed scan */
5529 #define IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
5530 /* adaptive dwell default high band APs number */
5531 #define IWX_SCAN_ADWELL_DEFAULT_HB_N_APS 8
5532 /* adaptive dwell default low band APs number */
5533 #define IWX_SCAN_ADWELL_DEFAULT_LB_N_APS 2
5534 /* adaptive dwell default APs number in social channels (1, 6, 11) */
5535 #define IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
5536 
5537 int
5538 iwx_umac_scan(struct iwx_softc *sc, int bgscan)
5539 {
5540 	struct ieee80211com *ic = &sc->sc_ic;
5541 	struct iwx_host_cmd hcmd = {
5542 		.id = iwx_cmd_id(IWX_SCAN_REQ_UMAC, IWX_LONG_GROUP, 0),
5543 		.len = { 0, },
5544 		.data = { NULL, },
5545 		.flags = 0,
5546 	};
5547 	struct iwx_scan_req_umac *req;
5548 	void *cmd_data, *tail_data;
5549 	struct iwx_scan_req_umac_tail_v2 *tail;
5550 	struct iwx_scan_req_umac_tail_v1 *tailv1;
5551 	struct iwx_scan_umac_chan_param *chanparam;
5552 	size_t req_len;
5553 	int err, async = bgscan;
5554 
5555 	req_len = iwx_umac_scan_size(sc);
5556 	if ((req_len < IWX_SCAN_REQ_UMAC_SIZE_V1 +
5557 	    sizeof(struct iwx_scan_req_umac_tail_v1)) ||
5558 	    req_len > IWX_MAX_CMD_PAYLOAD_SIZE)
5559 		return ERANGE;
5560 	req = malloc(req_len, M_DEVBUF,
5561 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
5562 	if (req == NULL)
5563 		return ENOMEM;
5564 
5565 	hcmd.len[0] = (uint16_t)req_len;
5566 	hcmd.data[0] = (void *)req;
5567 	hcmd.flags |= async ? IWX_CMD_ASYNC : 0;
5568 
5569 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL)) {
5570 		req->v7.adwell_default_n_aps_social =
5571 			IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
5572 		req->v7.adwell_default_n_aps =
5573 			IWX_SCAN_ADWELL_DEFAULT_LB_N_APS;
5574 
5575 		if (ic->ic_des_esslen != 0)
5576 			req->v7.adwell_max_budget =
5577 			    htole16(IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
5578 		else
5579 			req->v7.adwell_max_budget =
5580 			    htole16(IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
5581 
5582 		req->v7.scan_priority = htole32(IWX_SCAN_PRIORITY_HIGH);
5583 		req->v7.max_out_time[IWX_SCAN_LB_LMAC_IDX] = 0;
5584 		req->v7.suspend_time[IWX_SCAN_LB_LMAC_IDX] = 0;
5585 
5586 		if (isset(sc->sc_ucode_api,
5587 		    IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
5588 			req->v8.active_dwell[IWX_SCAN_LB_LMAC_IDX] = 10;
5589 			req->v8.passive_dwell[IWX_SCAN_LB_LMAC_IDX] = 110;
5590 		} else {
5591 			req->v7.active_dwell = 10;
5592 			req->v7.passive_dwell = 110;
5593 			req->v7.fragmented_dwell = 44;
5594 		}
5595 	} else {
5596 		/* These timings correspond to iwlwifi's UNASSOC scan. */
5597 		req->v1.active_dwell = 10;
5598 		req->v1.passive_dwell = 110;
5599 		req->v1.fragmented_dwell = 44;
5600 		req->v1.extended_dwell = 90;
5601 
5602 		req->v1.scan_priority = htole32(IWX_SCAN_PRIORITY_HIGH);
5603 	}
5604 
5605 	if (bgscan) {
5606 		const uint32_t timeout = htole32(120);
5607 		if (isset(sc->sc_ucode_api,
5608 		    IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
5609 			req->v8.max_out_time[IWX_SCAN_LB_LMAC_IDX] = timeout;
5610 			req->v8.suspend_time[IWX_SCAN_LB_LMAC_IDX] = timeout;
5611 		} else if (isset(sc->sc_ucode_api,
5612 		    IWX_UCODE_TLV_API_ADAPTIVE_DWELL)) {
5613 			req->v7.max_out_time[IWX_SCAN_LB_LMAC_IDX] = timeout;
5614 			req->v7.suspend_time[IWX_SCAN_LB_LMAC_IDX] = timeout;
5615 		} else {
5616 			req->v1.max_out_time = timeout;
5617 			req->v1.suspend_time = timeout;
5618 		}
5619 	}
5620 
5621 	req->ooc_priority = htole32(IWX_SCAN_PRIORITY_HIGH);
5622 
5623 	cmd_data = iwx_get_scan_req_umac_data(sc, req);
5624 	chanparam = iwx_get_scan_req_umac_chan_param(sc, req);
5625 	chanparam->count = iwx_umac_scan_fill_channels(sc,
5626 	    (struct iwx_scan_channel_cfg_umac *)cmd_data,
5627 	    ic->ic_des_esslen != 0, bgscan);
5628 	chanparam->flags = 0;
5629 
5630 	tail_data = cmd_data + sizeof(struct iwx_scan_channel_cfg_umac) *
5631 	    sc->sc_capa_n_scan_channels;
5632 	tail = tail_data;
5633 	/* tail v1 layout differs in preq and direct_scan member fields. */
5634 	tailv1 = tail_data;
5635 
5636 	req->general_flags = htole32(IWX_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
5637 	    IWX_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE);
5638 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
5639 		req->v8.general_flags2 =
5640 			IWX_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER;
5641 	}
5642 
5643 #if 0 /* XXX Active scan causes firmware errors after association. */
5644 	/* Check if we're doing an active directed scan. */
5645 	if (ic->ic_des_esslen != 0) {
5646 		if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
5647 			tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5648 			tail->direct_scan[0].len = ic->ic_des_esslen;
5649 			memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
5650 			    ic->ic_des_esslen);
5651 		} else {
5652 			tailv1->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5653 			tailv1->direct_scan[0].len = ic->ic_des_esslen;
5654 			memcpy(tailv1->direct_scan[0].ssid, ic->ic_des_essid,
5655 			    ic->ic_des_esslen);
5656 		}
5657 		req->general_flags |=
5658 		    htole32(IWX_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
5659 	} else
5660 #endif
5661 		req->general_flags |= htole32(IWX_UMAC_SCAN_GEN_FLAGS_PASSIVE);
5662 
5663 	if (isset(sc->sc_enabled_capa,
5664 	    IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5665 		req->general_flags |=
5666 		    htole32(IWX_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
5667 
5668 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL)) {
5669 		req->general_flags |=
5670 		    htole32(IWX_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL);
5671 	} else {
5672 		req->general_flags |=
5673 		    htole32(IWX_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
5674 	}
5675 
5676 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
5677 		err = iwx_fill_probe_req(sc, &tail->preq);
5678 	else
5679 		err = iwx_fill_probe_req_v1(sc, &tailv1->preq);
5680 	if (err) {
5681 		free(req, M_DEVBUF, req_len);
5682 		return err;
5683 	}
5684 
5685 	/* Specify the scan plan: We'll do one iteration. */
5686 	tail->schedule[0].interval = 0;
5687 	tail->schedule[0].iter_count = 1;
5688 
5689 	err = iwx_send_cmd(sc, &hcmd);
5690 	free(req, M_DEVBUF, req_len);
5691 	return err;
5692 }
5693 
5694 void
5695 iwx_mcc_update(struct iwx_softc *sc, struct iwx_mcc_chub_notif *notif)
5696 {
5697 	struct ieee80211com *ic = &sc->sc_ic;
5698 	struct ifnet *ifp = IC2IFP(ic);
5699 	char alpha2[3];
5700 
5701 	snprintf(alpha2, sizeof(alpha2), "%c%c",
5702 	    (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
5703 
5704 	if (ifp->if_flags & IFF_DEBUG) {
5705 		printf("%s: firmware has detected regulatory domain '%s' "
5706 		    "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
5707 	}
5708 
5709 	/* TODO: Schedule a task to send MCC_UPDATE_CMD? */
5710 }
5711 
5712 uint8_t
5713 iwx_ridx2rate(struct ieee80211_rateset *rs, int ridx)
5714 {
5715 	int i;
5716 	uint8_t rval;
5717 
5718 	for (i = 0; i < rs->rs_nrates; i++) {
5719 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
5720 		if (rval == iwx_rates[ridx].rate)
5721 			return rs->rs_rates[i];
5722 	}
5723 
5724 	return 0;
5725 }
5726 
5727 int
5728 iwx_rval2ridx(int rval)
5729 {
5730 	int ridx;
5731 
5732 	for (ridx = 0; ridx < nitems(iwx_rates); ridx++) {
5733 		if (iwx_rates[ridx].plcp == IWX_RATE_INVM_PLCP)
5734 			continue;
5735 		if (rval == iwx_rates[ridx].rate)
5736 			break;
5737 	}
5738 
5739        return ridx;
5740 }
5741 
5742 void
5743 iwx_ack_rates(struct iwx_softc *sc, struct iwx_node *in, int *cck_rates,
5744     int *ofdm_rates)
5745 {
5746 	struct ieee80211_node *ni = &in->in_ni;
5747 	struct ieee80211_rateset *rs = &ni->ni_rates;
5748 	int lowest_present_ofdm = -1;
5749 	int lowest_present_cck = -1;
5750 	uint8_t cck = 0;
5751 	uint8_t ofdm = 0;
5752 	int i;
5753 
5754 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
5755 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
5756 		for (i = IWX_FIRST_CCK_RATE; i < IWX_FIRST_OFDM_RATE; i++) {
5757 			if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5758 				continue;
5759 			cck |= (1 << i);
5760 			if (lowest_present_cck == -1 || lowest_present_cck > i)
5761 				lowest_present_cck = i;
5762 		}
5763 	}
5764 	for (i = IWX_FIRST_OFDM_RATE; i <= IWX_LAST_NON_HT_RATE; i++) {
5765 		if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5766 			continue;
5767 		ofdm |= (1 << (i - IWX_FIRST_OFDM_RATE));
5768 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
5769 			lowest_present_ofdm = i;
5770 	}
5771 
5772 	/*
5773 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
5774 	 * variables. This isn't sufficient though, as there might not
5775 	 * be all the right rates in the bitmap. E.g. if the only basic
5776 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
5777 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
5778 	 *
5779 	 *    [...] a STA responding to a received frame shall transmit
5780 	 *    its Control Response frame [...] at the highest rate in the
5781 	 *    BSSBasicRateSet parameter that is less than or equal to the
5782 	 *    rate of the immediately previous frame in the frame exchange
5783 	 *    sequence ([...]) and that is of the same modulation class
5784 	 *    ([...]) as the received frame. If no rate contained in the
5785 	 *    BSSBasicRateSet parameter meets these conditions, then the
5786 	 *    control frame sent in response to a received frame shall be
5787 	 *    transmitted at the highest mandatory rate of the PHY that is
5788 	 *    less than or equal to the rate of the received frame, and
5789 	 *    that is of the same modulation class as the received frame.
5790 	 *
5791 	 * As a consequence, we need to add all mandatory rates that are
5792 	 * lower than all of the basic rates to these bitmaps.
5793 	 */
5794 
5795 	if (IWX_RATE_24M_INDEX < lowest_present_ofdm)
5796 		ofdm |= IWX_RATE_BIT_MSK(24) >> IWX_FIRST_OFDM_RATE;
5797 	if (IWX_RATE_12M_INDEX < lowest_present_ofdm)
5798 		ofdm |= IWX_RATE_BIT_MSK(12) >> IWX_FIRST_OFDM_RATE;
5799 	/* 6M already there or needed so always add */
5800 	ofdm |= IWX_RATE_BIT_MSK(6) >> IWX_FIRST_OFDM_RATE;
5801 
5802 	/*
5803 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
5804 	 * Note, however:
5805 	 *  - if no CCK rates are basic, it must be ERP since there must
5806 	 *    be some basic rates at all, so they're OFDM => ERP PHY
5807 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
5808 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
5809 	 *  - if 5.5M is basic, 1M and 2M are mandatory
5810 	 *  - if 2M is basic, 1M is mandatory
5811 	 *  - if 1M is basic, that's the only valid ACK rate.
5812 	 * As a consequence, it's not as complicated as it sounds, just add
5813 	 * any lower rates to the ACK rate bitmap.
5814 	 */
5815 	if (IWX_RATE_11M_INDEX < lowest_present_cck)
5816 		cck |= IWX_RATE_BIT_MSK(11) >> IWX_FIRST_CCK_RATE;
5817 	if (IWX_RATE_5M_INDEX < lowest_present_cck)
5818 		cck |= IWX_RATE_BIT_MSK(5) >> IWX_FIRST_CCK_RATE;
5819 	if (IWX_RATE_2M_INDEX < lowest_present_cck)
5820 		cck |= IWX_RATE_BIT_MSK(2) >> IWX_FIRST_CCK_RATE;
5821 	/* 1M already there or needed so always add */
5822 	cck |= IWX_RATE_BIT_MSK(1) >> IWX_FIRST_CCK_RATE;
5823 
5824 	*cck_rates = cck;
5825 	*ofdm_rates = ofdm;
5826 }
5827 
5828 void
5829 iwx_mac_ctxt_cmd_common(struct iwx_softc *sc, struct iwx_node *in,
5830     struct iwx_mac_ctx_cmd *cmd, uint32_t action)
5831 {
5832 #define IWX_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
5833 	struct ieee80211com *ic = &sc->sc_ic;
5834 	struct ieee80211_node *ni = ic->ic_bss;
5835 	int cck_ack_rates, ofdm_ack_rates;
5836 	int i;
5837 
5838 	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
5839 	    in->in_color));
5840 	cmd->action = htole32(action);
5841 
5842 	if (action == IWX_FW_CTXT_ACTION_REMOVE)
5843 		return;
5844 
5845 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
5846 		cmd->mac_type = htole32(IWX_FW_MAC_TYPE_LISTENER);
5847 	else if (ic->ic_opmode == IEEE80211_M_STA)
5848 		cmd->mac_type = htole32(IWX_FW_MAC_TYPE_BSS_STA);
5849 	else
5850 		panic("unsupported operating mode %d", ic->ic_opmode);
5851 	cmd->tsf_id = htole32(IWX_TSF_ID_A);
5852 
5853 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
5854 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5855 		IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
5856 		return;
5857 	}
5858 
5859 	IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
5860 	iwx_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
5861 	cmd->cck_rates = htole32(cck_ack_rates);
5862 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
5863 
5864 	cmd->cck_short_preamble
5865 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5866 	      ? IWX_MAC_FLG_SHORT_PREAMBLE : 0);
5867 	cmd->short_slot
5868 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
5869 	      ? IWX_MAC_FLG_SHORT_SLOT : 0);
5870 
5871 	for (i = 0; i < EDCA_NUM_AC; i++) {
5872 		struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
5873 		int txf = iwx_ac_to_tx_fifo[i];
5874 
5875 		cmd->ac[txf].cw_min = htole16(IWX_EXP2(ac->ac_ecwmin));
5876 		cmd->ac[txf].cw_max = htole16(IWX_EXP2(ac->ac_ecwmax));
5877 		cmd->ac[txf].aifsn = ac->ac_aifsn;
5878 		cmd->ac[txf].fifos_mask = (1 << txf);
5879 		cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32);
5880 	}
5881 	if (ni->ni_flags & IEEE80211_NODE_QOS)
5882 		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_UPDATE_EDCA);
5883 
5884 	if (ni->ni_flags & IEEE80211_NODE_HT) {
5885 		enum ieee80211_htprot htprot =
5886 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
5887 		switch (htprot) {
5888 		case IEEE80211_HTPROT_NONE:
5889 			break;
5890 		case IEEE80211_HTPROT_NONMEMBER:
5891 		case IEEE80211_HTPROT_NONHT_MIXED:
5892 			cmd->protection_flags |=
5893 			    htole32(IWX_MAC_PROT_FLG_HT_PROT);
5894 			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
5895 				cmd->protection_flags |=
5896 				    htole32(IWX_MAC_PROT_FLG_SELF_CTS_EN);
5897 			break;
5898 		case IEEE80211_HTPROT_20MHZ:
5899 			if (ic->ic_htcaps & IEEE80211_HTCAP_CBW20_40) {
5900 				/* XXX ... and if our channel is 40 MHz ... */
5901 				cmd->protection_flags |=
5902 				    htole32(IWX_MAC_PROT_FLG_HT_PROT |
5903 				    IWX_MAC_PROT_FLG_FAT_PROT);
5904 				if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
5905 					cmd->protection_flags |= htole32(
5906 					    IWX_MAC_PROT_FLG_SELF_CTS_EN);
5907 			}
5908 			break;
5909 		default:
5910 			break;
5911 		}
5912 
5913 		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_TGN);
5914 	}
5915 	if (ic->ic_flags & IEEE80211_F_USEPROT)
5916 		cmd->protection_flags |= htole32(IWX_MAC_PROT_FLG_TGG_PROTECT);
5917 
5918 	cmd->filter_flags = htole32(IWX_MAC_FILTER_ACCEPT_GRP);
5919 #undef IWX_EXP2
5920 }
5921 
5922 void
5923 iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *sc, struct iwx_node *in,
5924     struct iwx_mac_data_sta *sta, int assoc)
5925 {
5926 	struct ieee80211_node *ni = &in->in_ni;
5927 	uint32_t dtim_off;
5928 	uint64_t tsf;
5929 
5930 	dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU;
5931 	memcpy(&tsf, ni->ni_tstamp, sizeof(tsf));
5932 	tsf = letoh64(tsf);
5933 
5934 	sta->is_assoc = htole32(assoc);
5935 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
5936 	sta->dtim_tsf = htole64(tsf + dtim_off);
5937 	sta->bi = htole32(ni->ni_intval);
5938 	sta->bi_reciprocal = htole32(iwx_reciprocal(ni->ni_intval));
5939 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod);
5940 	sta->dtim_reciprocal = htole32(iwx_reciprocal(sta->dtim_interval));
5941 	sta->listen_interval = htole32(10);
5942 	sta->assoc_id = htole32(ni->ni_associd);
5943 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
5944 }
5945 
5946 int
5947 iwx_mac_ctxt_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action,
5948     int assoc)
5949 {
5950 	struct ieee80211com *ic = &sc->sc_ic;
5951 	struct ieee80211_node *ni = &in->in_ni;
5952 	struct iwx_mac_ctx_cmd cmd;
5953 	int active = (sc->sc_flags & IWX_FLAG_MAC_ACTIVE);
5954 
5955 	if (action == IWX_FW_CTXT_ACTION_ADD && active)
5956 		panic("MAC already added");
5957 	if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
5958 		panic("MAC already removed");
5959 
5960 	memset(&cmd, 0, sizeof(cmd));
5961 
5962 	iwx_mac_ctxt_cmd_common(sc, in, &cmd, action);
5963 
5964 	if (action == IWX_FW_CTXT_ACTION_REMOVE) {
5965 		return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0,
5966 		    sizeof(cmd), &cmd);
5967 	}
5968 
5969 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5970 		cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_PROMISC |
5971 		    IWX_MAC_FILTER_IN_CONTROL_AND_MGMT |
5972 		    IWX_MAC_FILTER_ACCEPT_GRP |
5973 		    IWX_MAC_FILTER_IN_BEACON |
5974 		    IWX_MAC_FILTER_IN_PROBE_REQUEST |
5975 		    IWX_MAC_FILTER_IN_CRC32);
5976 	} else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod)
5977 		/*
5978 		 * Allow beacons to pass through as long as we are not
5979 		 * associated or we do not have dtim period information.
5980 		 */
5981 		cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_BEACON);
5982 	else
5983 		iwx_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
5984 
5985 	return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5986 }
5987 
5988 int
5989 iwx_clear_statistics(struct iwx_softc *sc)
5990 {
5991 	struct iwx_statistics_cmd scmd = {
5992 		.flags = htole32(IWX_STATISTICS_FLG_CLEAR)
5993 	};
5994 	struct iwx_host_cmd cmd = {
5995 		.id = IWX_STATISTICS_CMD,
5996 		.len[0] = sizeof(scmd),
5997 		.data[0] = &scmd,
5998 		.flags = IWX_CMD_WANT_RESP,
5999 		.resp_pkt_len = sizeof(struct iwx_notif_statistics),
6000 	};
6001 	int err;
6002 
6003 	err = iwx_send_cmd(sc, &cmd);
6004 	if (err)
6005 		return err;
6006 
6007 	iwx_free_resp(sc, &cmd);
6008 	return 0;
6009 }
6010 
6011 int
6012 iwx_update_quotas(struct iwx_softc *sc, struct iwx_node *in, int running)
6013 {
6014 	struct iwx_time_quota_cmd cmd;
6015 	int i, idx, num_active_macs, quota, quota_rem;
6016 	int colors[IWX_MAX_BINDINGS] = { -1, -1, -1, -1, };
6017 	int n_ifs[IWX_MAX_BINDINGS] = {0, };
6018 	uint16_t id;
6019 
6020 	memset(&cmd, 0, sizeof(cmd));
6021 
6022 	/* currently, PHY ID == binding ID */
6023 	if (in && in->in_phyctxt) {
6024 		id = in->in_phyctxt->id;
6025 		KASSERT(id < IWX_MAX_BINDINGS);
6026 		colors[id] = in->in_phyctxt->color;
6027 		if (running)
6028 			n_ifs[id] = 1;
6029 	}
6030 
6031 	/*
6032 	 * The FW's scheduling session consists of
6033 	 * IWX_MAX_QUOTA fragments. Divide these fragments
6034 	 * equally between all the bindings that require quota
6035 	 */
6036 	num_active_macs = 0;
6037 	for (i = 0; i < IWX_MAX_BINDINGS; i++) {
6038 		cmd.quotas[i].id_and_color = htole32(IWX_FW_CTXT_INVALID);
6039 		num_active_macs += n_ifs[i];
6040 	}
6041 
6042 	quota = 0;
6043 	quota_rem = 0;
6044 	if (num_active_macs) {
6045 		quota = IWX_MAX_QUOTA / num_active_macs;
6046 		quota_rem = IWX_MAX_QUOTA % num_active_macs;
6047 	}
6048 
6049 	for (idx = 0, i = 0; i < IWX_MAX_BINDINGS; i++) {
6050 		if (colors[i] < 0)
6051 			continue;
6052 
6053 		cmd.quotas[idx].id_and_color =
6054 			htole32(IWX_FW_CMD_ID_AND_COLOR(i, colors[i]));
6055 
6056 		if (n_ifs[i] <= 0) {
6057 			cmd.quotas[idx].quota = htole32(0);
6058 			cmd.quotas[idx].max_duration = htole32(0);
6059 		} else {
6060 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
6061 			cmd.quotas[idx].max_duration = htole32(0);
6062 		}
6063 		idx++;
6064 	}
6065 
6066 	/* Give the remainder of the session to the first binding */
6067 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
6068 
6069 	return iwx_send_cmd_pdu(sc, IWX_TIME_QUOTA_CMD, 0,
6070 	    sizeof(cmd), &cmd);
6071 }
6072 
6073 void
6074 iwx_add_task(struct iwx_softc *sc, struct taskq *taskq, struct task *task)
6075 {
6076 	int s = splnet();
6077 
6078 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
6079 		splx(s);
6080 		return;
6081 	}
6082 
6083 	refcnt_take(&sc->task_refs);
6084 	if (!task_add(taskq, task))
6085 		refcnt_rele_wake(&sc->task_refs);
6086 	splx(s);
6087 }
6088 
6089 void
6090 iwx_del_task(struct iwx_softc *sc, struct taskq *taskq, struct task *task)
6091 {
6092 	if (task_del(taskq, task))
6093 		refcnt_rele(&sc->task_refs);
6094 }
6095 
6096 int
6097 iwx_scan(struct iwx_softc *sc)
6098 {
6099 	struct ieee80211com *ic = &sc->sc_ic;
6100 	struct ifnet *ifp = IC2IFP(ic);
6101 	int err;
6102 
6103 	if (sc->sc_flags & IWX_FLAG_BGSCAN) {
6104 		err = iwx_scan_abort(sc);
6105 		if (err) {
6106 			printf("%s: could not abort background scan\n",
6107 			    DEVNAME(sc));
6108 			return err;
6109 		}
6110 	}
6111 
6112 	err = iwx_umac_scan(sc, 0);
6113 	if (err) {
6114 		printf("%s: could not initiate scan\n", DEVNAME(sc));
6115 		return err;
6116 	}
6117 
6118 	/*
6119 	 * The current mode might have been fixed during association.
6120 	 * Ensure all channels get scanned.
6121 	 */
6122 	if (IFM_MODE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
6123 		ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
6124 
6125 	sc->sc_flags |= IWX_FLAG_SCANNING;
6126 	if (ifp->if_flags & IFF_DEBUG)
6127 		printf("%s: %s -> %s\n", ifp->if_xname,
6128 		    ieee80211_state_name[ic->ic_state],
6129 		    ieee80211_state_name[IEEE80211_S_SCAN]);
6130 	if ((sc->sc_flags & IWX_FLAG_BGSCAN) == 0) {
6131 		ieee80211_set_link_state(ic, LINK_STATE_DOWN);
6132 		ieee80211_node_cleanup(ic, ic->ic_bss);
6133 	}
6134 	ic->ic_state = IEEE80211_S_SCAN;
6135 	wakeup(&ic->ic_state); /* wake iwx_init() */
6136 
6137 	return 0;
6138 }
6139 
6140 int
6141 iwx_bgscan(struct ieee80211com *ic)
6142 {
6143 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
6144 	int err;
6145 
6146 	if (sc->sc_flags & IWX_FLAG_SCANNING)
6147 		return 0;
6148 
6149 	err = iwx_umac_scan(sc, 1);
6150 	if (err) {
6151 		printf("%s: could not initiate scan\n", DEVNAME(sc));
6152 		return err;
6153 	}
6154 
6155 	sc->sc_flags |= IWX_FLAG_BGSCAN;
6156 	return 0;
6157 }
6158 
6159 int
6160 iwx_umac_scan_abort(struct iwx_softc *sc)
6161 {
6162 	struct iwx_umac_scan_abort cmd = { 0 };
6163 
6164 	return iwx_send_cmd_pdu(sc,
6165 	    IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC),
6166 	    0, sizeof(cmd), &cmd);
6167 }
6168 
6169 int
6170 iwx_scan_abort(struct iwx_softc *sc)
6171 {
6172 	int err;
6173 
6174 	err = iwx_umac_scan_abort(sc);
6175 	if (err == 0)
6176 		sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
6177 	return err;
6178 }
6179 
6180 int
6181 iwx_enable_data_tx_queues(struct iwx_softc *sc)
6182 {
6183 	int err, ac;
6184 
6185 	for (ac = 0; ac < EDCA_NUM_AC; ac++) {
6186 		int qid = ac + IWX_DQA_AUX_QUEUE + 1;
6187 		/*
6188 		 * Regular data frames use the "MGMT" TID and queue.
6189 		 * Other TIDs and queues are reserved for frame aggregation.
6190 		 */
6191 		err = iwx_enable_txq(sc, IWX_STATION_ID, qid, IWX_TID_NON_QOS,
6192 		    IWX_TX_RING_COUNT);
6193 		if (err) {
6194 			printf("%s: could not enable Tx queue %d (error %d)\n",
6195 			    DEVNAME(sc), ac, err);
6196 			return err;
6197 		}
6198 	}
6199 
6200 	return 0;
6201 }
6202 
6203 int
6204 iwx_rs_rval2idx(uint8_t rval)
6205 {
6206 	/* Firmware expects indices which match our 11g rate set. */
6207 	const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11g;
6208 	int i;
6209 
6210 	for (i = 0; i < rs->rs_nrates; i++) {
6211 		if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
6212 			return i;
6213 	}
6214 
6215 	return -1;
6216 }
6217 
6218 uint16_t
6219 iwx_rs_ht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int rsidx)
6220 {
6221 	struct ieee80211com *ic = &sc->sc_ic;
6222 	const struct ieee80211_ht_rateset *rs;
6223 	uint16_t htrates = 0;
6224 	int mcs;
6225 
6226 	rs = &ieee80211_std_ratesets_11n[rsidx];
6227 	for (mcs = rs->min_mcs; mcs <= rs->max_mcs; mcs++) {
6228 		if (!isset(ni->ni_rxmcs, mcs) ||
6229 		    !isset(ic->ic_sup_mcs, mcs))
6230 			continue;
6231 		htrates |= (1 << (mcs - rs->min_mcs));
6232 	}
6233 
6234 	return htrates;
6235 }
6236 
6237 int
6238 iwx_rs_init(struct iwx_softc *sc, struct iwx_node *in)
6239 {
6240 	struct ieee80211_node *ni = &in->in_ni;
6241 	struct ieee80211_rateset *rs = &ni->ni_rates;
6242 	struct iwx_tlc_config_cmd cfg_cmd;
6243 	uint32_t cmd_id;
6244 	int i;
6245 
6246 	memset(&cfg_cmd, 0, sizeof(cfg_cmd));
6247 
6248 	for (i = 0; i < rs->rs_nrates; i++) {
6249 		uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
6250 		int idx = iwx_rs_rval2idx(rval);
6251 		if (idx == -1)
6252 			return EINVAL;
6253 		cfg_cmd.non_ht_rates |= (1 << idx);
6254 	}
6255 
6256 	if (ni->ni_flags & IEEE80211_NODE_HT) {
6257 		cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
6258 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_HT_BW_NONE_160] =
6259 		    iwx_rs_ht_rates(sc, ni, IEEE80211_HT_RATESET_SISO);
6260 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_HT_BW_NONE_160] =
6261 		    iwx_rs_ht_rates(sc, ni, IEEE80211_HT_RATESET_MIMO2);
6262 	} else
6263 		cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
6264 
6265 	cfg_cmd.sta_id = IWX_STATION_ID;
6266 	cfg_cmd.max_ch_width = IWX_RATE_MCS_CHAN_WIDTH_20;
6267 	cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
6268 	cfg_cmd.max_mpdu_len = IEEE80211_MAX_LEN;
6269 	if (ieee80211_node_supports_ht_sgi20(ni))
6270 		cfg_cmd.sgi_ch_width_supp = (1 << IWX_TLC_MNG_CH_WIDTH_20MHZ);
6271 
6272 	cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
6273 	return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, sizeof(cfg_cmd),
6274 	    &cfg_cmd);
6275 }
6276 
6277 void
6278 iwx_rs_update(struct iwx_softc *sc, struct iwx_tlc_update_notif *notif)
6279 {
6280 	struct ieee80211com *ic = &sc->sc_ic;
6281 	struct ieee80211_node *ni = ic->ic_bss;
6282 	struct ieee80211_rateset *rs = &ni->ni_rates;
6283 	uint32_t rate_n_flags;
6284 	int i;
6285 
6286 	if (notif->sta_id != IWX_STATION_ID ||
6287 	    (le32toh(notif->flags) & IWX_TLC_NOTIF_FLAG_RATE) == 0)
6288 		return;
6289 
6290 	rate_n_flags = le32toh(notif->rate);
6291 	if (rate_n_flags & IWX_RATE_MCS_HT_MSK) {
6292 		ni->ni_txmcs = (rate_n_flags &
6293 		    (IWX_RATE_HT_MCS_RATE_CODE_MSK |
6294 		    IWX_RATE_HT_MCS_NSS_MSK));
6295 	} else {
6296 		uint8_t plcp = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
6297 		uint8_t rval = 0;
6298 		for (i = IWX_RATE_1M_INDEX; i < nitems(iwx_rates); i++) {
6299 			if (iwx_rates[i].plcp == plcp) {
6300 				rval = iwx_rates[i].rate;
6301 				break;
6302 			}
6303 		}
6304 		if (rval) {
6305 			uint8_t rv;
6306 			for (i = 0; i < rs->rs_nrates; i++) {
6307 				rv = rs->rs_rates[i] & IEEE80211_RATE_VAL;
6308 				if (rv == rval) {
6309 					ni->ni_txrate = i;
6310 					break;
6311 				}
6312 			}
6313 		}
6314 	}
6315 }
6316 
6317 int
6318 iwx_auth(struct iwx_softc *sc)
6319 {
6320 	struct ieee80211com *ic = &sc->sc_ic;
6321 	struct iwx_node *in = (void *)ic->ic_bss;
6322 	uint32_t duration;
6323 	int generation = sc->sc_generation, err;
6324 
6325 	splassert(IPL_NET);
6326 
6327 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6328 		sc->sc_phyctxt[0].channel = ic->ic_ibss_chan;
6329 	else
6330 		sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
6331 	err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
6332 	    IWX_FW_CTXT_ACTION_MODIFY, 0);
6333 	if (err) {
6334 		printf("%s: could not update PHY context (error %d)\n",
6335 		    DEVNAME(sc), err);
6336 		return err;
6337 	}
6338 	in->in_phyctxt = &sc->sc_phyctxt[0];
6339 
6340 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD, 0);
6341 	if (err) {
6342 		printf("%s: could not add MAC context (error %d)\n",
6343 		    DEVNAME(sc), err);
6344 		return err;
6345  	}
6346 	sc->sc_flags |= IWX_FLAG_MAC_ACTIVE;
6347 
6348 	err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD);
6349 	if (err) {
6350 		printf("%s: could not add binding (error %d)\n",
6351 		    DEVNAME(sc), err);
6352 		goto rm_mac_ctxt;
6353 	}
6354 	sc->sc_flags |= IWX_FLAG_BINDING_ACTIVE;
6355 
6356 	err = iwx_add_sta_cmd(sc, in, 0);
6357 	if (err) {
6358 		printf("%s: could not add sta (error %d)\n",
6359 		    DEVNAME(sc), err);
6360 		goto rm_binding;
6361 	}
6362 	sc->sc_flags |= IWX_FLAG_STA_ACTIVE;
6363 
6364 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6365 		err = iwx_enable_txq(sc, IWX_MONITOR_STA_ID,
6366 		    IWX_DQA_INJECT_MONITOR_QUEUE, IWX_MGMT_TID,
6367 		    IWX_TX_RING_COUNT);
6368 		if (err)
6369 			goto rm_sta;
6370 		return 0;
6371 	}
6372 
6373 	err = iwx_enable_data_tx_queues(sc);
6374 	if (err)
6375 		goto rm_sta;
6376 
6377 	err = iwx_clear_statistics(sc);
6378 	if (err)
6379 		goto rm_sta;
6380 
6381 	/*
6382 	 * Prevent the FW from wandering off channel during association
6383 	 * by "protecting" the session with a time event.
6384 	 */
6385 	if (in->in_ni.ni_intval)
6386 		duration = in->in_ni.ni_intval * 2;
6387 	else
6388 		duration = IEEE80211_DUR_TU;
6389 	iwx_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
6390 
6391 	return 0;
6392 
6393 rm_sta:
6394 	if (generation == sc->sc_generation) {
6395 		iwx_rm_sta_cmd(sc, in);
6396 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
6397 	}
6398 rm_binding:
6399 	if (generation == sc->sc_generation) {
6400 		iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
6401 		sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
6402 	}
6403 rm_mac_ctxt:
6404 	if (generation == sc->sc_generation) {
6405 		iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
6406 		sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
6407 	}
6408 	return err;
6409 }
6410 
6411 int
6412 iwx_deauth(struct iwx_softc *sc)
6413 {
6414 	struct ieee80211com *ic = &sc->sc_ic;
6415 	struct iwx_node *in = (void *)ic->ic_bss;
6416 	int err;
6417 
6418 	splassert(IPL_NET);
6419 
6420 	iwx_unprotect_session(sc, in);
6421 
6422 	if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
6423 		err = iwx_flush_tx_path(sc);
6424 		if (err) {
6425 			printf("%s: could not flush Tx path (error %d)\n",
6426 			    DEVNAME(sc), err);
6427 			return err;
6428 		}
6429 		err = iwx_rm_sta_cmd(sc, in);
6430 		if (err) {
6431 			printf("%s: could not remove STA (error %d)\n",
6432 			    DEVNAME(sc), err);
6433 			return err;
6434 		}
6435 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
6436 		sc->sc_rx_ba_sessions = 0;
6437 		in->in_flags = 0;
6438 	}
6439 
6440 	if (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE) {
6441 		err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
6442 		if (err) {
6443 			printf("%s: could not remove binding (error %d)\n",
6444 			    DEVNAME(sc), err);
6445 			return err;
6446 		}
6447 		sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
6448 	}
6449 
6450 	if (sc->sc_flags & IWX_FLAG_MAC_ACTIVE) {
6451 		err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
6452 		if (err) {
6453 			printf("%s: could not remove MAC context (error %d)\n",
6454 			    DEVNAME(sc), err);
6455 			return err;
6456 		}
6457 		sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
6458 	}
6459 
6460 	return 0;
6461 }
6462 
6463 int
6464 iwx_assoc(struct iwx_softc *sc)
6465 {
6466 	struct ieee80211com *ic = &sc->sc_ic;
6467 	struct iwx_node *in = (void *)ic->ic_bss;
6468 	int update_sta = (sc->sc_flags & IWX_FLAG_STA_ACTIVE);
6469 	int err;
6470 
6471 	splassert(IPL_NET);
6472 
6473 	err = iwx_add_sta_cmd(sc, in, update_sta);
6474 	if (err) {
6475 		printf("%s: could not %s STA (error %d)\n",
6476 		    DEVNAME(sc), update_sta ? "update" : "add", err);
6477 		return err;
6478 	}
6479 
6480 	if (!update_sta)
6481 		err = iwx_enable_data_tx_queues(sc);
6482 
6483 	return err;
6484 }
6485 
6486 int
6487 iwx_disassoc(struct iwx_softc *sc)
6488 {
6489 	struct ieee80211com *ic = &sc->sc_ic;
6490 	struct iwx_node *in = (void *)ic->ic_bss;
6491 	int err;
6492 
6493 	splassert(IPL_NET);
6494 
6495 	if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
6496 		err = iwx_rm_sta_cmd(sc, in);
6497 		if (err) {
6498 			printf("%s: could not remove STA (error %d)\n",
6499 			    DEVNAME(sc), err);
6500 			return err;
6501 		}
6502 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
6503 		in->in_flags = 0;
6504 		sc->sc_rx_ba_sessions = 0;
6505 		sc->ba_start_tidmask = 0;
6506 		sc->ba_stop_tidmask = 0;
6507 		sc->ba_start_tidmask = 0;
6508 		sc->ba_stop_tidmask = 0;
6509 	}
6510 
6511 	return 0;
6512 }
6513 
6514 int
6515 iwx_run(struct iwx_softc *sc)
6516 {
6517 	struct ieee80211com *ic = &sc->sc_ic;
6518 	struct iwx_node *in = (void *)ic->ic_bss;
6519 	int err;
6520 
6521 	splassert(IPL_NET);
6522 
6523 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6524 		/* Add a MAC context and a sniffing STA. */
6525 		err = iwx_auth(sc);
6526 		if (err)
6527 			return err;
6528 	}
6529 
6530 	/* Configure Rx chains for MIMO. */
6531 	if ((ic->ic_opmode == IEEE80211_M_MONITOR ||
6532 	    (in->in_ni.ni_flags & IEEE80211_NODE_HT)) &&
6533 	    iwx_mimo_enabled(sc)) {
6534 		err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0],
6535 		    2, 2, IWX_FW_CTXT_ACTION_MODIFY, 0);
6536 		if (err) {
6537 			printf("%s: failed to update PHY\n",
6538 			    DEVNAME(sc));
6539 			return err;
6540 		}
6541 	}
6542 
6543 	/* We have now been assigned an associd by the AP. */
6544 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
6545 	if (err) {
6546 		printf("%s: failed to update MAC\n", DEVNAME(sc));
6547 		return err;
6548 	}
6549 
6550 	err = iwx_sf_config(sc, IWX_SF_FULL_ON);
6551 	if (err) {
6552 		printf("%s: could not set sf full on (error %d)\n",
6553 		    DEVNAME(sc), err);
6554 		return err;
6555 	}
6556 
6557 	err = iwx_allow_mcast(sc);
6558 	if (err) {
6559 		printf("%s: could not allow mcast (error %d)\n",
6560 		    DEVNAME(sc), err);
6561 		return err;
6562 	}
6563 
6564 	err = iwx_power_update_device(sc);
6565 	if (err) {
6566 		printf("%s: could not send power command (error %d)\n",
6567 		    DEVNAME(sc), err);
6568 		return err;
6569 	}
6570 #ifdef notyet
6571 	/*
6572 	 * Disabled for now. Default beacon filter settings
6573 	 * prevent net80211 from getting ERP and HT protection
6574 	 * updates from beacons.
6575 	 */
6576 	err = iwx_enable_beacon_filter(sc, in);
6577 	if (err) {
6578 		printf("%s: could not enable beacon filter\n",
6579 		    DEVNAME(sc));
6580 		return err;
6581 	}
6582 #endif
6583 	err = iwx_power_mac_update_mode(sc, in);
6584 	if (err) {
6585 		printf("%s: could not update MAC power (error %d)\n",
6586 		    DEVNAME(sc), err);
6587 		return err;
6588 	}
6589 
6590 	if (!isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
6591 		err = iwx_update_quotas(sc, in, 1);
6592 		if (err) {
6593 			printf("%s: could not update quotas (error %d)\n",
6594 			    DEVNAME(sc), err);
6595 			return err;
6596 		}
6597 	}
6598 
6599 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6600 		return 0;
6601 
6602 	/* Start at lowest available bit-rate. Firmware will raise. */
6603 	in->in_ni.ni_txrate = 0;
6604 	in->in_ni.ni_txmcs = 0;
6605 
6606 	err = iwx_rs_init(sc, in);
6607 	if (err) {
6608 		printf("%s: could not init rate scaling (error %d)\n",
6609 		    DEVNAME(sc), err);
6610 		return err;
6611 	}
6612 
6613 	return 0;
6614 }
6615 
6616 int
6617 iwx_run_stop(struct iwx_softc *sc)
6618 {
6619 	struct ieee80211com *ic = &sc->sc_ic;
6620 	struct iwx_node *in = (void *)ic->ic_bss;
6621 	int err;
6622 
6623 	splassert(IPL_NET);
6624 
6625 	err = iwx_sf_config(sc, IWX_SF_INIT_OFF);
6626 	if (err)
6627 		return err;
6628 
6629 	err = iwx_disable_beacon_filter(sc);
6630 	if (err) {
6631 		printf("%s: could not disable beacon filter (error %d)\n",
6632 		    DEVNAME(sc), err);
6633 		return err;
6634 	}
6635 
6636 	if (!isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
6637 		err = iwx_update_quotas(sc, in, 0);
6638 		if (err) {
6639 			printf("%s: could not update quotas (error %d)\n",
6640 			    DEVNAME(sc), err);
6641 			return err;
6642 		}
6643 	}
6644 
6645 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 0);
6646 	if (err) {
6647 		printf("%s: failed to update MAC\n", DEVNAME(sc));
6648 		return err;
6649 	}
6650 
6651 	/* Reset Tx chains in case MIMO was enabled. */
6652 	if ((in->in_ni.ni_flags & IEEE80211_NODE_HT) &&
6653 	    iwx_mimo_enabled(sc)) {
6654 		err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
6655 		    IWX_FW_CTXT_ACTION_MODIFY, 0);
6656 		if (err) {
6657 			printf("%s: failed to update PHY\n", DEVNAME(sc));
6658 			return err;
6659 		}
6660 	}
6661 
6662 	return 0;
6663 }
6664 
6665 struct ieee80211_node *
6666 iwx_node_alloc(struct ieee80211com *ic)
6667 {
6668 	return malloc(sizeof (struct iwx_node), M_DEVBUF, M_NOWAIT | M_ZERO);
6669 }
6670 
6671 int
6672 iwx_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
6673     struct ieee80211_key *k)
6674 {
6675 	struct iwx_softc *sc = ic->ic_softc;
6676 	struct iwx_setkey_task_arg *a;
6677 
6678 	if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
6679 		/* Fallback to software crypto for other ciphers. */
6680 		return (ieee80211_set_key(ic, ni, k));
6681 	}
6682 
6683 	if (sc->setkey_nkeys >= nitems(sc->setkey_arg))
6684 		return ENOSPC;
6685 
6686 	a = &sc->setkey_arg[sc->setkey_cur];
6687 	a->sta_id = IWX_STATION_ID;
6688 	a->ni = ni;
6689 	a->k = k;
6690 	sc->setkey_cur = (sc->setkey_cur + 1) % nitems(sc->setkey_arg);
6691 	sc->setkey_nkeys++;
6692 	iwx_add_task(sc, systq, &sc->setkey_task);
6693 	return EBUSY;
6694 }
6695 
6696 int
6697 iwx_add_sta_key(struct iwx_softc *sc, int sta_id, struct ieee80211_node *ni,
6698     struct ieee80211_key *k)
6699 {
6700 	struct ieee80211com *ic = &sc->sc_ic;
6701 	struct iwx_node *in = (void *)ni;
6702 	struct iwx_add_sta_key_cmd cmd;
6703 	uint32_t status;
6704 	const int want_keymask = (IWX_NODE_FLAG_HAVE_PAIRWISE_KEY |
6705 	    IWX_NODE_FLAG_HAVE_GROUP_KEY);
6706 	int err;
6707 
6708 	/*
6709 	 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
6710 	 * Currently we only implement station mode where 'ni' is always
6711 	 * ic->ic_bss so there is no need to validate arguments beyond this:
6712 	 */
6713 	KASSERT(ni == ic->ic_bss);
6714 
6715 	memset(&cmd, 0, sizeof(cmd));
6716 
6717 	cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
6718 	    IWX_STA_KEY_FLG_WEP_KEY_MAP |
6719 	    ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
6720 	    IWX_STA_KEY_FLG_KEYID_MSK));
6721 	if (k->k_flags & IEEE80211_KEY_GROUP) {
6722 		cmd.common.key_offset = 1;
6723 		cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
6724 	} else
6725 		cmd.common.key_offset = 0;
6726 
6727 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
6728 	cmd.common.sta_id = sta_id;
6729 
6730 	cmd.transmit_seq_cnt = htole64(k->k_tsc);
6731 
6732 	status = IWX_ADD_STA_SUCCESS;
6733 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
6734 	    &status);
6735 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
6736 		return ECANCELED;
6737 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
6738 		err = EIO;
6739 	if (err) {
6740 		IEEE80211_SEND_MGMT(ic, ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
6741 		    IEEE80211_REASON_AUTH_LEAVE);
6742 		ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
6743 		return err;
6744 	}
6745 
6746 	if (k->k_flags & IEEE80211_KEY_GROUP)
6747 		in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
6748 	else
6749 		in->in_flags |= IWX_NODE_FLAG_HAVE_PAIRWISE_KEY;
6750 
6751 	if ((in->in_flags & want_keymask) == want_keymask) {
6752 		DPRINTF(("marking port %s valid\n",
6753 		    ether_sprintf(ni->ni_macaddr)));
6754 		ni->ni_port_valid = 1;
6755 		ieee80211_set_link_state(ic, LINK_STATE_UP);
6756 	}
6757 
6758 	return 0;
6759 }
6760 
6761 void
6762 iwx_setkey_task(void *arg)
6763 {
6764 	struct iwx_softc *sc = arg;
6765 	struct iwx_setkey_task_arg *a;
6766 	int err = 0, s = splnet();
6767 
6768 	while (sc->setkey_nkeys > 0) {
6769 		if (err || (sc->sc_flags & IWX_FLAG_SHUTDOWN))
6770 			break;
6771 		a = &sc->setkey_arg[sc->setkey_tail];
6772 		err = iwx_add_sta_key(sc, a->sta_id, a->ni, a->k);
6773 		a->sta_id = 0;
6774 		a->ni = NULL;
6775 		a->k = NULL;
6776 		sc->setkey_tail = (sc->setkey_tail + 1) %
6777 		    nitems(sc->setkey_arg);
6778 		sc->setkey_nkeys--;
6779 	}
6780 
6781 	refcnt_rele_wake(&sc->task_refs);
6782 	splx(s);
6783 }
6784 
6785 void
6786 iwx_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
6787     struct ieee80211_key *k)
6788 {
6789 	struct iwx_softc *sc = ic->ic_softc;
6790 	struct iwx_add_sta_key_cmd cmd;
6791 
6792 	if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
6793 		/* Fallback to software crypto for other ciphers. */
6794                 ieee80211_delete_key(ic, ni, k);
6795 		return;
6796 	}
6797 
6798 	memset(&cmd, 0, sizeof(cmd));
6799 
6800 	cmd.common.key_flags = htole16(IWX_STA_KEY_NOT_VALID |
6801 	    IWX_STA_KEY_FLG_NO_ENC | IWX_STA_KEY_FLG_WEP_KEY_MAP |
6802 	    ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
6803 	    IWX_STA_KEY_FLG_KEYID_MSK));
6804 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
6805 	if (k->k_flags & IEEE80211_KEY_GROUP)
6806 		cmd.common.key_offset = 1;
6807 	else
6808 		cmd.common.key_offset = 0;
6809 	cmd.common.sta_id = IWX_STATION_ID;
6810 
6811 	iwx_send_cmd_pdu(sc, IWX_ADD_STA_KEY, IWX_CMD_ASYNC, sizeof(cmd), &cmd);
6812 }
6813 
6814 int
6815 iwx_media_change(struct ifnet *ifp)
6816 {
6817 	struct iwx_softc *sc = ifp->if_softc;
6818 	struct ieee80211com *ic = &sc->sc_ic;
6819 	uint8_t rate, ridx;
6820 	int err;
6821 
6822 	err = ieee80211_media_change(ifp);
6823 	if (err != ENETRESET)
6824 		return err;
6825 
6826 	if (ic->ic_fixed_mcs != -1)
6827 		sc->sc_fixed_ridx = iwx_mcs2ridx[ic->ic_fixed_mcs];
6828 	else if (ic->ic_fixed_rate != -1) {
6829 		rate = ic->ic_sup_rates[ic->ic_curmode].
6830 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
6831 		/* Map 802.11 rate to HW rate index. */
6832 		for (ridx = 0; ridx <= IWX_RIDX_MAX; ridx++)
6833 			if (iwx_rates[ridx].rate == rate)
6834 				break;
6835 		sc->sc_fixed_ridx = ridx;
6836 	}
6837 
6838 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6839 	    (IFF_UP | IFF_RUNNING)) {
6840 		iwx_stop(ifp);
6841 		err = iwx_init(ifp);
6842 	}
6843 	return err;
6844 }
6845 
6846 void
6847 iwx_newstate_task(void *psc)
6848 {
6849 	struct iwx_softc *sc = (struct iwx_softc *)psc;
6850 	struct ieee80211com *ic = &sc->sc_ic;
6851 	enum ieee80211_state nstate = sc->ns_nstate;
6852 	enum ieee80211_state ostate = ic->ic_state;
6853 	int arg = sc->ns_arg;
6854 	int err = 0, s = splnet();
6855 
6856 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
6857 		/* iwx_stop() is waiting for us. */
6858 		refcnt_rele_wake(&sc->task_refs);
6859 		splx(s);
6860 		return;
6861 	}
6862 
6863 	if (ostate == IEEE80211_S_SCAN) {
6864 		if (nstate == ostate) {
6865 			if (sc->sc_flags & IWX_FLAG_SCANNING) {
6866 				refcnt_rele_wake(&sc->task_refs);
6867 				splx(s);
6868 				return;
6869 			}
6870 			/* Firmware is no longer scanning. Do another scan. */
6871 			goto next_scan;
6872 		}
6873 	}
6874 
6875 	if (nstate <= ostate) {
6876 		switch (ostate) {
6877 		case IEEE80211_S_RUN:
6878 			err = iwx_run_stop(sc);
6879 			if (err)
6880 				goto out;
6881 			/* FALLTHROUGH */
6882 		case IEEE80211_S_ASSOC:
6883 			if (nstate <= IEEE80211_S_ASSOC) {
6884 				err = iwx_disassoc(sc);
6885 				if (err)
6886 					goto out;
6887 			}
6888 			/* FALLTHROUGH */
6889 		case IEEE80211_S_AUTH:
6890 			if (nstate <= IEEE80211_S_AUTH) {
6891 				err = iwx_deauth(sc);
6892 				if (err)
6893 					goto out;
6894 			}
6895 			/* FALLTHROUGH */
6896 		case IEEE80211_S_SCAN:
6897 		case IEEE80211_S_INIT:
6898 			break;
6899 		}
6900 
6901 		/* Die now if iwx_stop() was called while we were sleeping. */
6902 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
6903 			refcnt_rele_wake(&sc->task_refs);
6904 			splx(s);
6905 			return;
6906 		}
6907 	}
6908 
6909 	switch (nstate) {
6910 	case IEEE80211_S_INIT:
6911 		break;
6912 
6913 	case IEEE80211_S_SCAN:
6914 next_scan:
6915 		err = iwx_scan(sc);
6916 		if (err)
6917 			break;
6918 		refcnt_rele_wake(&sc->task_refs);
6919 		splx(s);
6920 		return;
6921 
6922 	case IEEE80211_S_AUTH:
6923 		err = iwx_auth(sc);
6924 		break;
6925 
6926 	case IEEE80211_S_ASSOC:
6927 		err = iwx_assoc(sc);
6928 		break;
6929 
6930 	case IEEE80211_S_RUN:
6931 		err = iwx_run(sc);
6932 		break;
6933 	}
6934 
6935 out:
6936 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
6937 		if (err)
6938 			task_add(systq, &sc->init_task);
6939 		else
6940 			sc->sc_newstate(ic, nstate, arg);
6941 	}
6942 	refcnt_rele_wake(&sc->task_refs);
6943 	splx(s);
6944 }
6945 
6946 int
6947 iwx_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
6948 {
6949 	struct ifnet *ifp = IC2IFP(ic);
6950 	struct iwx_softc *sc = ifp->if_softc;
6951 	int i;
6952 
6953 	if (ic->ic_state == IEEE80211_S_RUN) {
6954 		iwx_del_task(sc, systq, &sc->ba_task);
6955 		iwx_del_task(sc, systq, &sc->setkey_task);
6956 		memset(sc->setkey_arg, 0, sizeof(sc->setkey_arg));
6957 		sc->setkey_cur = sc->setkey_tail = sc->setkey_nkeys = 0;
6958 		iwx_del_task(sc, systq, &sc->mac_ctxt_task);
6959 		for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
6960 			struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
6961 			iwx_clear_reorder_buffer(sc, rxba);
6962 		}
6963 	}
6964 
6965 	sc->ns_nstate = nstate;
6966 	sc->ns_arg = arg;
6967 
6968 	iwx_add_task(sc, sc->sc_nswq, &sc->newstate_task);
6969 
6970 	return 0;
6971 }
6972 
6973 void
6974 iwx_endscan(struct iwx_softc *sc)
6975 {
6976 	struct ieee80211com *ic = &sc->sc_ic;
6977 
6978 	if ((sc->sc_flags & (IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN)) == 0)
6979 		return;
6980 
6981 	sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
6982 	ieee80211_end_scan(&ic->ic_if);
6983 }
6984 
6985 /*
6986  * Aging and idle timeouts for the different possible scenarios
6987  * in default configuration
6988  */
6989 static const uint32_t
6990 iwx_sf_full_timeout_def[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
6991 	{
6992 		htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
6993 		htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
6994 	},
6995 	{
6996 		htole32(IWX_SF_AGG_UNICAST_AGING_TIMER_DEF),
6997 		htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF)
6998 	},
6999 	{
7000 		htole32(IWX_SF_MCAST_AGING_TIMER_DEF),
7001 		htole32(IWX_SF_MCAST_IDLE_TIMER_DEF)
7002 	},
7003 	{
7004 		htole32(IWX_SF_BA_AGING_TIMER_DEF),
7005 		htole32(IWX_SF_BA_IDLE_TIMER_DEF)
7006 	},
7007 	{
7008 		htole32(IWX_SF_TX_RE_AGING_TIMER_DEF),
7009 		htole32(IWX_SF_TX_RE_IDLE_TIMER_DEF)
7010 	},
7011 };
7012 
7013 /*
7014  * Aging and idle timeouts for the different possible scenarios
7015  * in single BSS MAC configuration.
7016  */
7017 static const uint32_t
7018 iwx_sf_full_timeout[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
7019 	{
7020 		htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER),
7021 		htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER)
7022 	},
7023 	{
7024 		htole32(IWX_SF_AGG_UNICAST_AGING_TIMER),
7025 		htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER)
7026 	},
7027 	{
7028 		htole32(IWX_SF_MCAST_AGING_TIMER),
7029 		htole32(IWX_SF_MCAST_IDLE_TIMER)
7030 	},
7031 	{
7032 		htole32(IWX_SF_BA_AGING_TIMER),
7033 		htole32(IWX_SF_BA_IDLE_TIMER)
7034 	},
7035 	{
7036 		htole32(IWX_SF_TX_RE_AGING_TIMER),
7037 		htole32(IWX_SF_TX_RE_IDLE_TIMER)
7038 	},
7039 };
7040 
7041 void
7042 iwx_fill_sf_command(struct iwx_softc *sc, struct iwx_sf_cfg_cmd *sf_cmd,
7043     struct ieee80211_node *ni)
7044 {
7045 	int i, j, watermark;
7046 
7047 	sf_cmd->watermark[IWX_SF_LONG_DELAY_ON] = htole32(IWX_SF_W_MARK_SCAN);
7048 
7049 	/*
7050 	 * If we are in association flow - check antenna configuration
7051 	 * capabilities of the AP station, and choose the watermark accordingly.
7052 	 */
7053 	if (ni) {
7054 		if (ni->ni_flags & IEEE80211_NODE_HT) {
7055 			if (ni->ni_rxmcs[1] != 0)
7056 				watermark = IWX_SF_W_MARK_MIMO2;
7057 			else
7058 				watermark = IWX_SF_W_MARK_SISO;
7059 		} else {
7060 			watermark = IWX_SF_W_MARK_LEGACY;
7061 		}
7062 	/* default watermark value for unassociated mode. */
7063 	} else {
7064 		watermark = IWX_SF_W_MARK_MIMO2;
7065 	}
7066 	sf_cmd->watermark[IWX_SF_FULL_ON] = htole32(watermark);
7067 
7068 	for (i = 0; i < IWX_SF_NUM_SCENARIO; i++) {
7069 		for (j = 0; j < IWX_SF_NUM_TIMEOUT_TYPES; j++) {
7070 			sf_cmd->long_delay_timeouts[i][j] =
7071 					htole32(IWX_SF_LONG_DELAY_AGING_TIMER);
7072 		}
7073 	}
7074 
7075 	if (ni) {
7076 		memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout,
7077 		       sizeof(iwx_sf_full_timeout));
7078 	} else {
7079 		memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout_def,
7080 		       sizeof(iwx_sf_full_timeout_def));
7081 	}
7082 
7083 }
7084 
7085 int
7086 iwx_sf_config(struct iwx_softc *sc, int new_state)
7087 {
7088 	struct ieee80211com *ic = &sc->sc_ic;
7089 	struct iwx_sf_cfg_cmd sf_cmd = {
7090 		.state = htole32(new_state),
7091 	};
7092 	int err = 0;
7093 
7094 	switch (new_state) {
7095 	case IWX_SF_UNINIT:
7096 	case IWX_SF_INIT_OFF:
7097 		iwx_fill_sf_command(sc, &sf_cmd, NULL);
7098 		break;
7099 	case IWX_SF_FULL_ON:
7100 		iwx_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
7101 		break;
7102 	default:
7103 		return EINVAL;
7104 	}
7105 
7106 	err = iwx_send_cmd_pdu(sc, IWX_REPLY_SF_CFG_CMD, IWX_CMD_ASYNC,
7107 				   sizeof(sf_cmd), &sf_cmd);
7108 	return err;
7109 }
7110 
7111 int
7112 iwx_send_bt_init_conf(struct iwx_softc *sc)
7113 {
7114 	struct iwx_bt_coex_cmd bt_cmd;
7115 
7116 	bt_cmd.mode = htole32(IWX_BT_COEX_WIFI);
7117 	bt_cmd.enabled_modules = 0;
7118 
7119 	return iwx_send_cmd_pdu(sc, IWX_BT_CONFIG, 0, sizeof(bt_cmd),
7120 	    &bt_cmd);
7121 }
7122 
7123 int
7124 iwx_send_soc_conf(struct iwx_softc *sc)
7125 {
7126 	struct iwx_soc_configuration_cmd cmd;
7127 	int err;
7128 	uint32_t cmd_id, flags = 0;
7129 
7130 	memset(&cmd, 0, sizeof(cmd));
7131 
7132 	/*
7133 	 * In VER_1 of this command, the discrete value is considered
7134 	 * an integer; In VER_2, it's a bitmask.  Since we have only 2
7135 	 * values in VER_1, this is backwards-compatible with VER_2,
7136 	 * as long as we don't set any other flag bits.
7137 	 */
7138 	if (!sc->sc_integrated) { /* VER_1 */
7139 		flags = IWX_SOC_CONFIG_CMD_FLAGS_DISCRETE;
7140 	} else { /* VER_2 */
7141 		uint8_t scan_cmd_ver;
7142 		if (sc->sc_ltr_delay != IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
7143 			flags |= (sc->sc_ltr_delay &
7144 			    IWX_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
7145 		scan_cmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP,
7146 		    IWX_SCAN_REQ_UMAC);
7147 		if (scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
7148 			flags |= IWX_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
7149 	}
7150 	cmd.flags = htole32(flags);
7151 
7152 	cmd.latency = htole32(sc->sc_xtal_latency);
7153 
7154 	cmd_id = iwx_cmd_id(IWX_SOC_CONFIGURATION_CMD, IWX_SYSTEM_GROUP, 0);
7155 	err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
7156 	if (err)
7157 		printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
7158 	return err;
7159 }
7160 
7161 int
7162 iwx_send_update_mcc_cmd(struct iwx_softc *sc, const char *alpha2)
7163 {
7164 	struct iwx_mcc_update_cmd mcc_cmd;
7165 	struct iwx_host_cmd hcmd = {
7166 		.id = IWX_MCC_UPDATE_CMD,
7167 		.flags = IWX_CMD_WANT_RESP,
7168 		.data = { &mcc_cmd },
7169 	};
7170 	struct iwx_rx_packet *pkt;
7171 	struct iwx_mcc_update_resp *resp;
7172 	size_t resp_len;
7173 	int err;
7174 
7175 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
7176 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
7177 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
7178 	    isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC))
7179 		mcc_cmd.source_id = IWX_MCC_SOURCE_GET_CURRENT;
7180 	else
7181 		mcc_cmd.source_id = IWX_MCC_SOURCE_OLD_FW;
7182 
7183 	hcmd.len[0] = sizeof(struct iwx_mcc_update_cmd);
7184 	hcmd.resp_pkt_len = IWX_CMD_RESP_MAX;
7185 
7186 	err = iwx_send_cmd(sc, &hcmd);
7187 	if (err)
7188 		return err;
7189 
7190 	pkt = hcmd.resp_pkt;
7191 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
7192 		err = EIO;
7193 		goto out;
7194 	}
7195 
7196 	resp_len = iwx_rx_packet_payload_len(pkt);
7197 	if (resp_len < sizeof(*resp)) {
7198 		err = EIO;
7199 		goto out;
7200 	}
7201 
7202 	resp = (void *)pkt->data;
7203 	if (resp_len != sizeof(*resp) +
7204 	    resp->n_channels * sizeof(resp->channels[0])) {
7205 		err = EIO;
7206 		goto out;
7207 	}
7208 
7209 	DPRINTF(("MCC status=0x%x mcc=0x%x cap=0x%x time=0x%x geo_info=0x%x source_id=0x%d n_channels=%u\n",
7210 	    resp->status, resp->mcc, resp->cap, resp->time, resp->geo_info, resp->source_id, resp->n_channels));
7211 
7212 	/* Update channel map for net80211 and our scan configuration. */
7213 	iwx_init_channel_map(sc, NULL, resp->channels, resp->n_channels);
7214 
7215 out:
7216 	iwx_free_resp(sc, &hcmd);
7217 
7218 	return err;
7219 }
7220 
7221 int
7222 iwx_send_temp_report_ths_cmd(struct iwx_softc *sc)
7223 {
7224 	struct iwx_temp_report_ths_cmd cmd;
7225 	int err;
7226 
7227 	/*
7228 	 * In order to give responsibility for critical-temperature-kill
7229 	 * and TX backoff to FW we need to send an empty temperature
7230 	 * reporting command at init time.
7231 	 */
7232 	memset(&cmd, 0, sizeof(cmd));
7233 
7234 	err = iwx_send_cmd_pdu(sc,
7235 	    IWX_WIDE_ID(IWX_PHY_OPS_GROUP, IWX_TEMP_REPORTING_THRESHOLDS_CMD),
7236 	    0, sizeof(cmd), &cmd);
7237 	if (err)
7238 		printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
7239 		    DEVNAME(sc), err);
7240 
7241 	return err;
7242 }
7243 
7244 int
7245 iwx_init_hw(struct iwx_softc *sc)
7246 {
7247 	struct ieee80211com *ic = &sc->sc_ic;
7248 	int err, i;
7249 
7250 	err = iwx_preinit(sc);
7251 	if (err)
7252 		return err;
7253 
7254 	err = iwx_start_hw(sc);
7255 	if (err) {
7256 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
7257 		return err;
7258 	}
7259 
7260 	err = iwx_run_init_mvm_ucode(sc, 0);
7261 	if (err)
7262 		return err;
7263 
7264 	if (!iwx_nic_lock(sc))
7265 		return EBUSY;
7266 
7267 	err = iwx_send_tx_ant_cfg(sc, iwx_fw_valid_tx_ant(sc));
7268 	if (err) {
7269 		printf("%s: could not init tx ant config (error %d)\n",
7270 		    DEVNAME(sc), err);
7271 		goto err;
7272 	}
7273 
7274 	if (sc->sc_tx_with_siso_diversity) {
7275 		err = iwx_send_phy_cfg_cmd(sc);
7276 		if (err) {
7277 			printf("%s: could not send phy config (error %d)\n",
7278 			    DEVNAME(sc), err);
7279 			goto err;
7280 		}
7281 	}
7282 
7283 	err = iwx_send_bt_init_conf(sc);
7284 	if (err) {
7285 		printf("%s: could not init bt coex (error %d)\n",
7286 		    DEVNAME(sc), err);
7287 		return err;
7288 	}
7289 
7290 	err = iwx_send_soc_conf(sc);
7291 	if (err)
7292 		return err;
7293 
7294 	err = iwx_send_dqa_cmd(sc);
7295 	if (err)
7296 		return err;
7297 
7298 	/* Add auxiliary station for scanning */
7299 	err = iwx_add_aux_sta(sc);
7300 	if (err) {
7301 		printf("%s: could not add aux station (error %d)\n",
7302 		    DEVNAME(sc), err);
7303 		goto err;
7304 	}
7305 
7306 	for (i = 0; i < 1; i++) {
7307 		/*
7308 		 * The channel used here isn't relevant as it's
7309 		 * going to be overwritten in the other flows.
7310 		 * For now use the first channel we have.
7311 		 */
7312 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
7313 		err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
7314 		    IWX_FW_CTXT_ACTION_ADD, 0);
7315 		if (err) {
7316 			printf("%s: could not add phy context %d (error %d)\n",
7317 			    DEVNAME(sc), i, err);
7318 			goto err;
7319 		}
7320 	}
7321 
7322 	err = iwx_config_ltr(sc);
7323 	if (err) {
7324 		printf("%s: PCIe LTR configuration failed (error %d)\n",
7325 		    DEVNAME(sc), err);
7326 	}
7327 
7328 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
7329 		err = iwx_send_temp_report_ths_cmd(sc);
7330 		if (err)
7331 			goto err;
7332 	}
7333 
7334 	err = iwx_power_update_device(sc);
7335 	if (err) {
7336 		printf("%s: could not send power command (error %d)\n",
7337 		    DEVNAME(sc), err);
7338 		goto err;
7339 	}
7340 
7341 	if (sc->sc_nvm.lar_enabled) {
7342 		err = iwx_send_update_mcc_cmd(sc, "ZZ");
7343 		if (err) {
7344 			printf("%s: could not init LAR (error %d)\n",
7345 			    DEVNAME(sc), err);
7346 			goto err;
7347 		}
7348 	}
7349 
7350 	err = iwx_config_umac_scan(sc);
7351 	if (err) {
7352 		printf("%s: could not configure scan (error %d)\n",
7353 		    DEVNAME(sc), err);
7354 		goto err;
7355 	}
7356 
7357 	err = iwx_disable_beacon_filter(sc);
7358 	if (err) {
7359 		printf("%s: could not disable beacon filter (error %d)\n",
7360 		    DEVNAME(sc), err);
7361 		goto err;
7362 	}
7363 
7364 err:
7365 	iwx_nic_unlock(sc);
7366 	return err;
7367 }
7368 
7369 /* Allow multicast from our BSSID. */
7370 int
7371 iwx_allow_mcast(struct iwx_softc *sc)
7372 {
7373 	struct ieee80211com *ic = &sc->sc_ic;
7374 	struct ieee80211_node *ni = ic->ic_bss;
7375 	struct iwx_mcast_filter_cmd *cmd;
7376 	size_t size;
7377 	int err;
7378 
7379 	size = roundup(sizeof(*cmd), 4);
7380 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
7381 	if (cmd == NULL)
7382 		return ENOMEM;
7383 	cmd->filter_own = 1;
7384 	cmd->port_id = 0;
7385 	cmd->count = 0;
7386 	cmd->pass_all = 1;
7387 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
7388 
7389 	err = iwx_send_cmd_pdu(sc, IWX_MCAST_FILTER_CMD,
7390 	    0, size, cmd);
7391 	free(cmd, M_DEVBUF, size);
7392 	return err;
7393 }
7394 
7395 int
7396 iwx_init(struct ifnet *ifp)
7397 {
7398 	struct iwx_softc *sc = ifp->if_softc;
7399 	struct ieee80211com *ic = &sc->sc_ic;
7400 	int err, generation;
7401 
7402 	rw_assert_wrlock(&sc->ioctl_rwl);
7403 
7404 	generation = ++sc->sc_generation;
7405 
7406 	KASSERT(sc->task_refs.refs == 0);
7407 	refcnt_init(&sc->task_refs);
7408 
7409 	err = iwx_init_hw(sc);
7410 	if (err) {
7411 		if (generation == sc->sc_generation)
7412 			iwx_stop(ifp);
7413 		return err;
7414 	}
7415 
7416 	if (sc->sc_nvm.sku_cap_11n_enable)
7417 		iwx_setup_ht_rates(sc);
7418 
7419 	ifq_clr_oactive(&ifp->if_snd);
7420 	ifp->if_flags |= IFF_RUNNING;
7421 
7422 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7423 		ic->ic_bss->ni_chan = ic->ic_ibss_chan;
7424 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
7425 		return 0;
7426 	}
7427 
7428 	ieee80211_begin_scan(ifp);
7429 
7430 	/*
7431 	 * ieee80211_begin_scan() ends up scheduling iwx_newstate_task().
7432 	 * Wait until the transition to SCAN state has completed.
7433 	 */
7434 	do {
7435 		err = tsleep_nsec(&ic->ic_state, PCATCH, "iwxinit",
7436 		    SEC_TO_NSEC(1));
7437 		if (generation != sc->sc_generation)
7438 			return ENXIO;
7439 		if (err)
7440 			return err;
7441 	} while (ic->ic_state != IEEE80211_S_SCAN);
7442 
7443 	return 0;
7444 }
7445 
7446 void
7447 iwx_start(struct ifnet *ifp)
7448 {
7449 	struct iwx_softc *sc = ifp->if_softc;
7450 	struct ieee80211com *ic = &sc->sc_ic;
7451 	struct ieee80211_node *ni;
7452 	struct ether_header *eh;
7453 	struct mbuf *m;
7454 	int ac = EDCA_AC_BE; /* XXX */
7455 
7456 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
7457 		return;
7458 
7459 	for (;;) {
7460 		/* why isn't this done per-queue? */
7461 		if (sc->qfullmsk != 0) {
7462 			ifq_set_oactive(&ifp->if_snd);
7463 			break;
7464 		}
7465 
7466 		/* need to send management frames even if we're not RUNning */
7467 		m = mq_dequeue(&ic->ic_mgtq);
7468 		if (m) {
7469 			ni = m->m_pkthdr.ph_cookie;
7470 			goto sendit;
7471 		}
7472 
7473 		if (ic->ic_state != IEEE80211_S_RUN ||
7474 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
7475 			break;
7476 
7477 		m = ifq_dequeue(&ifp->if_snd);
7478 		if (!m)
7479 			break;
7480 		if (m->m_len < sizeof (*eh) &&
7481 		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
7482 			ifp->if_oerrors++;
7483 			continue;
7484 		}
7485 #if NBPFILTER > 0
7486 		if (ifp->if_bpf != NULL)
7487 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
7488 #endif
7489 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
7490 			ifp->if_oerrors++;
7491 			continue;
7492 		}
7493 
7494  sendit:
7495 #if NBPFILTER > 0
7496 		if (ic->ic_rawbpf != NULL)
7497 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
7498 #endif
7499 		if (iwx_tx(sc, m, ni, ac) != 0) {
7500 			ieee80211_release_node(ic, ni);
7501 			ifp->if_oerrors++;
7502 			continue;
7503 		}
7504 
7505 		if (ifp->if_flags & IFF_UP) {
7506 			sc->sc_tx_timer = 15;
7507 			ifp->if_timer = 1;
7508 		}
7509 	}
7510 
7511 	return;
7512 }
7513 
7514 void
7515 iwx_stop(struct ifnet *ifp)
7516 {
7517 	struct iwx_softc *sc = ifp->if_softc;
7518 	struct ieee80211com *ic = &sc->sc_ic;
7519 	struct iwx_node *in = (void *)ic->ic_bss;
7520 	int i, s = splnet();
7521 
7522 	rw_assert_wrlock(&sc->ioctl_rwl);
7523 
7524 	sc->sc_flags |= IWX_FLAG_SHUTDOWN; /* Disallow new tasks. */
7525 
7526 	/* Cancel scheduled tasks and let any stale tasks finish up. */
7527 	task_del(systq, &sc->init_task);
7528 	iwx_del_task(sc, sc->sc_nswq, &sc->newstate_task);
7529 	iwx_del_task(sc, systq, &sc->ba_task);
7530 	iwx_del_task(sc, systq, &sc->setkey_task);
7531 	memset(sc->setkey_arg, 0, sizeof(sc->setkey_arg));
7532 	sc->setkey_cur = sc->setkey_tail = sc->setkey_nkeys = 0;
7533 	iwx_del_task(sc, systq, &sc->mac_ctxt_task);
7534 	KASSERT(sc->task_refs.refs >= 1);
7535 	refcnt_finalize(&sc->task_refs, "iwxstop");
7536 
7537 	iwx_stop_device(sc);
7538 
7539 	/* Reset soft state. */
7540 
7541 	sc->sc_generation++;
7542 	for (i = 0; i < nitems(sc->sc_cmd_resp_pkt); i++) {
7543 		free(sc->sc_cmd_resp_pkt[i], M_DEVBUF, sc->sc_cmd_resp_len[i]);
7544 		sc->sc_cmd_resp_pkt[i] = NULL;
7545 		sc->sc_cmd_resp_len[i] = 0;
7546 	}
7547 	ifp->if_flags &= ~IFF_RUNNING;
7548 	ifq_clr_oactive(&ifp->if_snd);
7549 
7550 	in->in_phyctxt = NULL;
7551 	in->in_flags = 0;
7552 
7553 	sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
7554 	sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
7555 	sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
7556 	sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
7557 	sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
7558 	sc->sc_flags &= ~IWX_FLAG_HW_ERR;
7559 	sc->sc_flags &= ~IWX_FLAG_SHUTDOWN;
7560 
7561 	sc->sc_rx_ba_sessions = 0;
7562 	sc->ba_start_tidmask = 0;
7563 	sc->ba_stop_tidmask = 0;
7564 	memset(sc->ba_ssn, 0, sizeof(sc->ba_ssn));
7565 	memset(sc->ba_winsize, 0, sizeof(sc->ba_winsize));
7566 	memset(sc->ba_timeout_val, 0, sizeof(sc->ba_timeout_val));
7567 
7568 	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
7569 
7570 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
7571 		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
7572 		iwx_clear_reorder_buffer(sc, rxba);
7573 	}
7574 	ifp->if_timer = sc->sc_tx_timer = 0;
7575 
7576 	splx(s);
7577 }
7578 
7579 void
7580 iwx_watchdog(struct ifnet *ifp)
7581 {
7582 	struct iwx_softc *sc = ifp->if_softc;
7583 
7584 	ifp->if_timer = 0;
7585 	if (sc->sc_tx_timer > 0) {
7586 		if (--sc->sc_tx_timer == 0) {
7587 			printf("%s: device timeout\n", DEVNAME(sc));
7588 #ifdef IWX_DEBUG
7589 			iwx_nic_error(sc);
7590 #endif
7591 			if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
7592 				task_add(systq, &sc->init_task);
7593 			ifp->if_oerrors++;
7594 			return;
7595 		}
7596 		ifp->if_timer = 1;
7597 	}
7598 
7599 	ieee80211_watchdog(ifp);
7600 }
7601 
7602 int
7603 iwx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
7604 {
7605 	struct iwx_softc *sc = ifp->if_softc;
7606 	int s, err = 0, generation = sc->sc_generation;
7607 
7608 	/*
7609 	 * Prevent processes from entering this function while another
7610 	 * process is tsleep'ing in it.
7611 	 */
7612 	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
7613 	if (err == 0 && generation != sc->sc_generation) {
7614 		rw_exit(&sc->ioctl_rwl);
7615 		return ENXIO;
7616 	}
7617 	if (err)
7618 		return err;
7619 	s = splnet();
7620 
7621 	switch (cmd) {
7622 	case SIOCSIFADDR:
7623 		ifp->if_flags |= IFF_UP;
7624 		/* FALLTHROUGH */
7625 	case SIOCSIFFLAGS:
7626 		if (ifp->if_flags & IFF_UP) {
7627 			if (!(ifp->if_flags & IFF_RUNNING)) {
7628 				err = iwx_init(ifp);
7629 			}
7630 		} else {
7631 			if (ifp->if_flags & IFF_RUNNING)
7632 				iwx_stop(ifp);
7633 		}
7634 		break;
7635 
7636 	default:
7637 		err = ieee80211_ioctl(ifp, cmd, data);
7638 	}
7639 
7640 	if (err == ENETRESET) {
7641 		err = 0;
7642 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
7643 		    (IFF_UP | IFF_RUNNING)) {
7644 			iwx_stop(ifp);
7645 			err = iwx_init(ifp);
7646 		}
7647 	}
7648 
7649 	splx(s);
7650 	rw_exit(&sc->ioctl_rwl);
7651 
7652 	return err;
7653 }
7654 
7655 #if 1 /* usually #ifdef IWX_DEBUG but always enabled for now */
7656 /*
7657  * Note: This structure is read from the device with IO accesses,
7658  * and the reading already does the endian conversion. As it is
7659  * read with uint32_t-sized accesses, any members with a different size
7660  * need to be ordered correctly though!
7661  */
7662 struct iwx_error_event_table {
7663 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
7664 	uint32_t error_id;		/* type of error */
7665 	uint32_t trm_hw_status0;	/* TRM HW status */
7666 	uint32_t trm_hw_status1;	/* TRM HW status */
7667 	uint32_t blink2;		/* branch link */
7668 	uint32_t ilink1;		/* interrupt link */
7669 	uint32_t ilink2;		/* interrupt link */
7670 	uint32_t data1;		/* error-specific data */
7671 	uint32_t data2;		/* error-specific data */
7672 	uint32_t data3;		/* error-specific data */
7673 	uint32_t bcon_time;		/* beacon timer */
7674 	uint32_t tsf_low;		/* network timestamp function timer */
7675 	uint32_t tsf_hi;		/* network timestamp function timer */
7676 	uint32_t gp1;		/* GP1 timer register */
7677 	uint32_t gp2;		/* GP2 timer register */
7678 	uint32_t fw_rev_type;	/* firmware revision type */
7679 	uint32_t major;		/* uCode version major */
7680 	uint32_t minor;		/* uCode version minor */
7681 	uint32_t hw_ver;		/* HW Silicon version */
7682 	uint32_t brd_ver;		/* HW board version */
7683 	uint32_t log_pc;		/* log program counter */
7684 	uint32_t frame_ptr;		/* frame pointer */
7685 	uint32_t stack_ptr;		/* stack pointer */
7686 	uint32_t hcmd;		/* last host command header */
7687 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
7688 				 * rxtx_flag */
7689 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
7690 				 * host_flag */
7691 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
7692 				 * enc_flag */
7693 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
7694 				 * time_flag */
7695 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
7696 				 * wico interrupt */
7697 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
7698 	uint32_t wait_event;		/* wait event() caller address */
7699 	uint32_t l2p_control;	/* L2pControlField */
7700 	uint32_t l2p_duration;	/* L2pDurationField */
7701 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
7702 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
7703 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
7704 				 * (LMPM_PMG_SEL) */
7705 	uint32_t u_timestamp;	/* indicate when the date and time of the
7706 				 * compilation */
7707 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
7708 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
7709 
7710 /*
7711  * UMAC error struct - relevant starting from family 8000 chip.
7712  * Note: This structure is read from the device with IO accesses,
7713  * and the reading already does the endian conversion. As it is
7714  * read with u32-sized accesses, any members with a different size
7715  * need to be ordered correctly though!
7716  */
7717 struct iwx_umac_error_event_table {
7718 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
7719 	uint32_t error_id;	/* type of error */
7720 	uint32_t blink1;	/* branch link */
7721 	uint32_t blink2;	/* branch link */
7722 	uint32_t ilink1;	/* interrupt link */
7723 	uint32_t ilink2;	/* interrupt link */
7724 	uint32_t data1;		/* error-specific data */
7725 	uint32_t data2;		/* error-specific data */
7726 	uint32_t data3;		/* error-specific data */
7727 	uint32_t umac_major;
7728 	uint32_t umac_minor;
7729 	uint32_t frame_pointer;	/* core register 27*/
7730 	uint32_t stack_pointer;	/* core register 28 */
7731 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
7732 	uint32_t nic_isr_pref;	/* ISR status register */
7733 } __packed;
7734 
7735 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
7736 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
7737 
7738 void
7739 iwx_nic_umac_error(struct iwx_softc *sc)
7740 {
7741 	struct iwx_umac_error_event_table table;
7742 	uint32_t base;
7743 
7744 	base = sc->sc_uc.uc_umac_error_event_table;
7745 
7746 	if (base < 0x800000) {
7747 		printf("%s: Invalid error log pointer 0x%08x\n",
7748 		    DEVNAME(sc), base);
7749 		return;
7750 	}
7751 
7752 	if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
7753 		printf("%s: reading errlog failed\n", DEVNAME(sc));
7754 		return;
7755 	}
7756 
7757 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
7758 		printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
7759 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
7760 			sc->sc_flags, table.valid);
7761 	}
7762 
7763 	printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
7764 		iwx_desc_lookup(table.error_id));
7765 	printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
7766 	printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
7767 	printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
7768 	printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
7769 	printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
7770 	printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
7771 	printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
7772 	printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
7773 	printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
7774 	printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
7775 	    table.frame_pointer);
7776 	printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
7777 	    table.stack_pointer);
7778 	printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
7779 	printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
7780 	    table.nic_isr_pref);
7781 }
7782 
7783 #define IWX_FW_SYSASSERT_CPU_MASK 0xf0000000
7784 static struct {
7785 	const char *name;
7786 	uint8_t num;
7787 } advanced_lookup[] = {
7788 	{ "NMI_INTERRUPT_WDG", 0x34 },
7789 	{ "SYSASSERT", 0x35 },
7790 	{ "UCODE_VERSION_MISMATCH", 0x37 },
7791 	{ "BAD_COMMAND", 0x38 },
7792 	{ "BAD_COMMAND", 0x39 },
7793 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
7794 	{ "FATAL_ERROR", 0x3D },
7795 	{ "NMI_TRM_HW_ERR", 0x46 },
7796 	{ "NMI_INTERRUPT_TRM", 0x4C },
7797 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
7798 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
7799 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
7800 	{ "NMI_INTERRUPT_HOST", 0x66 },
7801 	{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
7802 	{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
7803 	{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
7804 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
7805 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
7806 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
7807 	{ "ADVANCED_SYSASSERT", 0 },
7808 };
7809 
7810 const char *
7811 iwx_desc_lookup(uint32_t num)
7812 {
7813 	int i;
7814 
7815 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
7816 		if (advanced_lookup[i].num ==
7817 		    (num & ~IWX_FW_SYSASSERT_CPU_MASK))
7818 			return advanced_lookup[i].name;
7819 
7820 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
7821 	return advanced_lookup[i].name;
7822 }
7823 
7824 /*
7825  * Support for dumping the error log seemed like a good idea ...
7826  * but it's mostly hex junk and the only sensible thing is the
7827  * hw/ucode revision (which we know anyway).  Since it's here,
7828  * I'll just leave it in, just in case e.g. the Intel guys want to
7829  * help us decipher some "ADVANCED_SYSASSERT" later.
7830  */
7831 void
7832 iwx_nic_error(struct iwx_softc *sc)
7833 {
7834 	struct iwx_error_event_table table;
7835 	uint32_t base;
7836 
7837 	printf("%s: dumping device error log\n", DEVNAME(sc));
7838 	base = sc->sc_uc.uc_lmac_error_event_table[0];
7839 	if (base < 0x800000) {
7840 		printf("%s: Invalid error log pointer 0x%08x\n",
7841 		    DEVNAME(sc), base);
7842 		return;
7843 	}
7844 
7845 	if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
7846 		printf("%s: reading errlog failed\n", DEVNAME(sc));
7847 		return;
7848 	}
7849 
7850 	if (!table.valid) {
7851 		printf("%s: errlog not found, skipping\n", DEVNAME(sc));
7852 		return;
7853 	}
7854 
7855 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
7856 		printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
7857 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
7858 		    sc->sc_flags, table.valid);
7859 	}
7860 
7861 	printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
7862 	    iwx_desc_lookup(table.error_id));
7863 	printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
7864 	    table.trm_hw_status0);
7865 	printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
7866 	    table.trm_hw_status1);
7867 	printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
7868 	printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
7869 	printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
7870 	printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
7871 	printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
7872 	printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
7873 	printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
7874 	printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
7875 	printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
7876 	printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
7877 	printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
7878 	printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
7879 	    table.fw_rev_type);
7880 	printf("%s: %08X | uCode version major\n", DEVNAME(sc),
7881 	    table.major);
7882 	printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
7883 	    table.minor);
7884 	printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
7885 	printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
7886 	printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
7887 	printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
7888 	printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
7889 	printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
7890 	printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
7891 	printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
7892 	printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
7893 	printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
7894 	printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
7895 	printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
7896 	printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
7897 	printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
7898 	printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
7899 	printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
7900 	printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
7901 
7902 	if (sc->sc_uc.uc_umac_error_event_table)
7903 		iwx_nic_umac_error(sc);
7904 }
7905 #endif
7906 
7907 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
7908 do {									\
7909 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
7910 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
7911 	_var_ = (void *)((_pkt_)+1);					\
7912 } while (/*CONSTCOND*/0)
7913 
7914 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
7915 do {									\
7916 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
7917 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
7918 	_ptr_ = (void *)((_pkt_)+1);					\
7919 } while (/*CONSTCOND*/0)
7920 
7921 int
7922 iwx_rx_pkt_valid(struct iwx_rx_packet *pkt)
7923 {
7924 	int qid, idx, code;
7925 
7926 	qid = pkt->hdr.qid & ~0x80;
7927 	idx = pkt->hdr.idx;
7928 	code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
7929 
7930 	return (!(qid == 0 && idx == 0 && code == 0) &&
7931 	    pkt->len_n_flags != htole32(IWX_FH_RSCSR_FRAME_INVALID));
7932 }
7933 
7934 void
7935 iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf_list *ml)
7936 {
7937 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7938 	struct iwx_rx_packet *pkt, *nextpkt;
7939 	uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
7940 	struct mbuf *m0, *m;
7941 	const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
7942 	int qid, idx, code, handled = 1;
7943 
7944 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,
7945 	    BUS_DMASYNC_POSTREAD);
7946 
7947 	m0 = data->m;
7948 	while (m0 && offset + minsz < IWX_RBUF_SIZE) {
7949 		pkt = (struct iwx_rx_packet *)(m0->m_data + offset);
7950 		qid = pkt->hdr.qid;
7951 		idx = pkt->hdr.idx;
7952 
7953 		code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
7954 
7955 		if (!iwx_rx_pkt_valid(pkt))
7956 			break;
7957 
7958 		len = sizeof(pkt->len_n_flags) + iwx_rx_packet_len(pkt);
7959 		if (len < sizeof(pkt->hdr) ||
7960 		    len > (IWX_RBUF_SIZE - offset - minsz))
7961 			break;
7962 
7963 		if (code == IWX_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
7964 			/* Take mbuf m0 off the RX ring. */
7965 			if (iwx_rx_addbuf(sc, IWX_RBUF_SIZE, sc->rxq.cur)) {
7966 				ifp->if_ierrors++;
7967 				break;
7968 			}
7969 			KASSERT(data->m != m0);
7970 		}
7971 
7972 		switch (code) {
7973 		case IWX_REPLY_RX_PHY_CMD:
7974 			iwx_rx_rx_phy_cmd(sc, pkt, data);
7975 			break;
7976 
7977 		case IWX_REPLY_RX_MPDU_CMD: {
7978 			size_t maxlen = IWX_RBUF_SIZE - offset - minsz;
7979 			nextoff = offset +
7980 			    roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
7981 			nextpkt = (struct iwx_rx_packet *)
7982 			    (m0->m_data + nextoff);
7983 			if (nextoff + minsz >= IWX_RBUF_SIZE ||
7984 			    !iwx_rx_pkt_valid(nextpkt)) {
7985 				/* No need to copy last frame in buffer. */
7986 				if (offset > 0)
7987 					m_adj(m0, offset);
7988 				iwx_rx_mpdu_mq(sc, m0, pkt->data, maxlen, ml);
7989 				m0 = NULL; /* stack owns m0 now; abort loop */
7990 			} else {
7991 				/*
7992 				 * Create an mbuf which points to the current
7993 				 * packet. Always copy from offset zero to
7994 				 * preserve m_pkthdr.
7995 				 */
7996 				m = m_copym(m0, 0, M_COPYALL, M_DONTWAIT);
7997 				if (m == NULL) {
7998 					ifp->if_ierrors++;
7999 					m_freem(m0);
8000 					m0 = NULL;
8001 					break;
8002 				}
8003 				m_adj(m, offset);
8004 				iwx_rx_mpdu_mq(sc, m, pkt->data, maxlen, ml);
8005 			}
8006  			break;
8007 		}
8008 
8009 		case IWX_TX_CMD:
8010 			iwx_rx_tx_cmd(sc, pkt, data);
8011 			break;
8012 
8013 		case IWX_MISSED_BEACONS_NOTIFICATION:
8014 			iwx_rx_bmiss(sc, pkt, data);
8015 			break;
8016 
8017 		case IWX_MFUART_LOAD_NOTIFICATION:
8018 			break;
8019 
8020 		case IWX_ALIVE: {
8021 			struct iwx_alive_resp_v4 *resp4;
8022 
8023 			DPRINTF(("%s: firmware alive\n", __func__));
8024 			if (iwx_rx_packet_payload_len(pkt) == sizeof(*resp4)) {
8025 				SYNC_RESP_STRUCT(resp4, pkt);
8026 				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
8027 				    resp4->lmac_data[0].dbg_ptrs.error_event_table_ptr);
8028 				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
8029 				    resp4->lmac_data[1].dbg_ptrs.error_event_table_ptr);
8030 				sc->sc_uc.uc_log_event_table = le32toh(
8031 				    resp4->lmac_data[0].dbg_ptrs.log_event_table_ptr);
8032 				sc->sched_base = le32toh(
8033 				    resp4->lmac_data[0].dbg_ptrs.scd_base_ptr);
8034 				sc->sc_uc.uc_umac_error_event_table = le32toh(
8035 				    resp4->umac_data.dbg_ptrs.error_info_addr);
8036 				if (resp4->status == IWX_ALIVE_STATUS_OK)
8037 					sc->sc_uc.uc_ok = 1;
8038 				else
8039 					sc->sc_uc.uc_ok = 0;
8040 			}
8041 
8042 			sc->sc_uc.uc_intr = 1;
8043 			wakeup(&sc->sc_uc);
8044 			break;
8045 		}
8046 
8047 		case IWX_STATISTICS_NOTIFICATION: {
8048 			struct iwx_notif_statistics *stats;
8049 			SYNC_RESP_STRUCT(stats, pkt);
8050 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
8051 			sc->sc_noise = iwx_get_noise(&stats->rx.general);
8052 			break;
8053 		}
8054 
8055 		case IWX_DTS_MEASUREMENT_NOTIFICATION:
8056 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
8057 				 IWX_DTS_MEASUREMENT_NOTIF_WIDE):
8058 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
8059 				 IWX_TEMP_REPORTING_THRESHOLDS_CMD):
8060 			break;
8061 
8062 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
8063 		    IWX_CT_KILL_NOTIFICATION): {
8064 			struct iwx_ct_kill_notif *notif;
8065 			SYNC_RESP_STRUCT(notif, pkt);
8066 			printf("%s: device at critical temperature (%u degC), "
8067 			    "stopping device\n",
8068 			    DEVNAME(sc), le16toh(notif->temperature));
8069 			sc->sc_flags |= IWX_FLAG_HW_ERR;
8070 			task_add(systq, &sc->init_task);
8071 			break;
8072 		}
8073 
8074 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
8075 		    IWX_NVM_GET_INFO):
8076 		case IWX_ADD_STA_KEY:
8077 		case IWX_PHY_CONFIGURATION_CMD:
8078 		case IWX_TX_ANT_CONFIGURATION_CMD:
8079 		case IWX_ADD_STA:
8080 		case IWX_MAC_CONTEXT_CMD:
8081 		case IWX_REPLY_SF_CFG_CMD:
8082 		case IWX_POWER_TABLE_CMD:
8083 		case IWX_LTR_CONFIG:
8084 		case IWX_PHY_CONTEXT_CMD:
8085 		case IWX_BINDING_CONTEXT_CMD:
8086 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_CFG_CMD):
8087 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_REQ_UMAC):
8088 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC):
8089 		case IWX_REPLY_BEACON_FILTERING_CMD:
8090 		case IWX_MAC_PM_POWER_TABLE:
8091 		case IWX_TIME_QUOTA_CMD:
8092 		case IWX_REMOVE_STA:
8093 		case IWX_TXPATH_FLUSH:
8094 		case IWX_BT_CONFIG:
8095 		case IWX_MCC_UPDATE_CMD:
8096 		case IWX_TIME_EVENT_CMD:
8097 		case IWX_STATISTICS_CMD:
8098 		case IWX_SCD_QUEUE_CFG: {
8099 			size_t pkt_len;
8100 
8101 			if (sc->sc_cmd_resp_pkt[idx] == NULL)
8102 				break;
8103 
8104 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
8105 			    sizeof(*pkt), BUS_DMASYNC_POSTREAD);
8106 
8107 			pkt_len = sizeof(pkt->len_n_flags) +
8108 			    iwx_rx_packet_len(pkt);
8109 
8110 			if ((pkt->hdr.flags & IWX_CMD_FAILED_MSK) ||
8111 			    pkt_len < sizeof(*pkt) ||
8112 			    pkt_len > sc->sc_cmd_resp_len[idx]) {
8113 				free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
8114 				    sc->sc_cmd_resp_len[idx]);
8115 				sc->sc_cmd_resp_pkt[idx] = NULL;
8116 				break;
8117 			}
8118 
8119 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
8120 			    pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD);
8121 			memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
8122 			break;
8123 		}
8124 
8125 		case IWX_INIT_COMPLETE_NOTIF:
8126 			sc->sc_init_complete |= IWX_INIT_COMPLETE;
8127 			wakeup(&sc->sc_init_complete);
8128 			break;
8129 
8130 		case IWX_SCAN_COMPLETE_UMAC: {
8131 			struct iwx_umac_scan_complete *notif;
8132 			SYNC_RESP_STRUCT(notif, pkt);
8133 			iwx_endscan(sc);
8134 			break;
8135 		}
8136 
8137 		case IWX_SCAN_ITERATION_COMPLETE_UMAC: {
8138 			struct iwx_umac_scan_iter_complete_notif *notif;
8139 			SYNC_RESP_STRUCT(notif, pkt);
8140 			iwx_endscan(sc);
8141 			break;
8142 		}
8143 
8144 		case IWX_MCC_CHUB_UPDATE_CMD: {
8145 			struct iwx_mcc_chub_notif *notif;
8146 			SYNC_RESP_STRUCT(notif, pkt);
8147 			iwx_mcc_update(sc, notif);
8148 			break;
8149 		}
8150 
8151 		case IWX_REPLY_ERROR: {
8152 			struct iwx_error_resp *resp;
8153 			SYNC_RESP_STRUCT(resp, pkt);
8154 			printf("%s: firmware error 0x%x, cmd 0x%x\n",
8155 				DEVNAME(sc), le32toh(resp->error_type),
8156 				resp->cmd_id);
8157 			break;
8158 		}
8159 
8160 		case IWX_TIME_EVENT_NOTIFICATION: {
8161 			struct iwx_time_event_notif *notif;
8162 			uint32_t action;
8163 			SYNC_RESP_STRUCT(notif, pkt);
8164 
8165 			if (sc->sc_time_event_uid != le32toh(notif->unique_id))
8166 				break;
8167 			action = le32toh(notif->action);
8168 			if (action & IWX_TE_V2_NOTIF_HOST_EVENT_END)
8169 				sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
8170 			break;
8171 		}
8172 
8173 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP,
8174 		    IWX_FSEQ_VER_MISMATCH_NOTIFICATION):
8175 		    break;
8176 
8177 		/*
8178 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
8179 		 * messages. Just ignore them for now.
8180 		 */
8181 		case IWX_DEBUG_LOG_MSG:
8182 			break;
8183 
8184 		case IWX_MCAST_FILTER_CMD:
8185 			break;
8186 
8187 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_DQA_ENABLE_CMD):
8188 			break;
8189 
8190 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_SOC_CONFIGURATION_CMD):
8191 			break;
8192 
8193 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_INIT_EXTENDED_CFG_CMD):
8194 			break;
8195 
8196 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
8197 		    IWX_NVM_ACCESS_COMPLETE):
8198 			break;
8199 
8200 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RX_NO_DATA_NOTIF):
8201 			break; /* happens in monitor mode; ignore for now */
8202 
8203 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_TLC_MNG_CONFIG_CMD):
8204 			break;
8205 
8206 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
8207 		    IWX_TLC_MNG_UPDATE_NOTIF): {
8208 			struct iwx_tlc_update_notif *notif;
8209 			SYNC_RESP_STRUCT(notif, pkt);
8210 			if (iwx_rx_packet_payload_len(pkt) == sizeof(*notif))
8211 				iwx_rs_update(sc, notif);
8212 			break;
8213 		}
8214 
8215 		default:
8216 			handled = 0;
8217 			printf("%s: unhandled firmware response 0x%x/0x%x "
8218 			    "rx ring %d[%d]\n",
8219 			    DEVNAME(sc), code, pkt->len_n_flags,
8220 			    (qid & ~0x80), idx);
8221 			break;
8222 		}
8223 
8224 		/*
8225 		 * uCode sets bit 0x80 when it originates the notification,
8226 		 * i.e. when the notification is not a direct response to a
8227 		 * command sent by the driver.
8228 		 * For example, uCode issues IWX_REPLY_RX when it sends a
8229 		 * received frame to the driver.
8230 		 */
8231 		if (handled && !(qid & (1 << 7))) {
8232 			iwx_cmd_done(sc, qid, idx, code);
8233 		}
8234 
8235 		offset += roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
8236 	}
8237 
8238 	if (m0 && m0 != data->m)
8239 		m_freem(m0);
8240 }
8241 
8242 void
8243 iwx_notif_intr(struct iwx_softc *sc)
8244 {
8245 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
8246 	uint16_t hw;
8247 
8248 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
8249 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
8250 
8251 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
8252 	hw &= (IWX_RX_MQ_RING_COUNT - 1);
8253 	while (sc->rxq.cur != hw) {
8254 		struct iwx_rx_data *data = &sc->rxq.data[sc->rxq.cur];
8255 		iwx_rx_pkt(sc, data, &ml);
8256 		sc->rxq.cur = (sc->rxq.cur + 1) % IWX_RX_MQ_RING_COUNT;
8257 	}
8258 	if_input(&sc->sc_ic.ic_if, &ml);
8259 
8260 	/*
8261 	 * Tell the firmware what we have processed.
8262 	 * Seems like the hardware gets upset unless we align the write by 8??
8263 	 */
8264 	hw = (hw == 0) ? IWX_RX_MQ_RING_COUNT - 1 : hw - 1;
8265 	IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, hw & ~7);
8266 }
8267 
8268 int
8269 iwx_intr(void *arg)
8270 {
8271 	struct iwx_softc *sc = arg;
8272 	int handled = 0;
8273 	int r1, r2, rv = 0;
8274 
8275 	IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
8276 
8277 	if (sc->sc_flags & IWX_FLAG_USE_ICT) {
8278 		uint32_t *ict = sc->ict_dma.vaddr;
8279 		int tmp;
8280 
8281 		tmp = htole32(ict[sc->ict_cur]);
8282 		if (!tmp)
8283 			goto out_ena;
8284 
8285 		/*
8286 		 * ok, there was something.  keep plowing until we have all.
8287 		 */
8288 		r1 = r2 = 0;
8289 		while (tmp) {
8290 			r1 |= tmp;
8291 			ict[sc->ict_cur] = 0;
8292 			sc->ict_cur = (sc->ict_cur+1) % IWX_ICT_COUNT;
8293 			tmp = htole32(ict[sc->ict_cur]);
8294 		}
8295 
8296 		/* this is where the fun begins.  don't ask */
8297 		if (r1 == 0xffffffff)
8298 			r1 = 0;
8299 
8300 		/* i am not expected to understand this */
8301 		if (r1 & 0xc0000)
8302 			r1 |= 0x8000;
8303 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
8304 	} else {
8305 		r1 = IWX_READ(sc, IWX_CSR_INT);
8306 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
8307 			goto out;
8308 		r2 = IWX_READ(sc, IWX_CSR_FH_INT_STATUS);
8309 	}
8310 	if (r1 == 0 && r2 == 0) {
8311 		goto out_ena;
8312 	}
8313 
8314 	IWX_WRITE(sc, IWX_CSR_INT, r1 | ~sc->sc_intmask);
8315 
8316 	if (r1 & IWX_CSR_INT_BIT_ALIVE) {
8317 		int i;
8318 
8319 		/* Firmware has now configured the RFH. */
8320 		for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
8321 			iwx_update_rx_desc(sc, &sc->rxq, i);
8322 		IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
8323 	}
8324 
8325 	handled |= (r1 & (IWX_CSR_INT_BIT_ALIVE /*| IWX_CSR_INT_BIT_SCD*/));
8326 
8327 	if (r1 & IWX_CSR_INT_BIT_RF_KILL) {
8328 		handled |= IWX_CSR_INT_BIT_RF_KILL;
8329 		iwx_check_rfkill(sc);
8330 		task_add(systq, &sc->init_task);
8331 		rv = 1;
8332 		goto out_ena;
8333 	}
8334 
8335 	if (r1 & IWX_CSR_INT_BIT_SW_ERR) {
8336 #if 1 /* usually #ifdef IWX_DEBUG but always enabled for now */
8337 		int i;
8338 
8339 		iwx_nic_error(sc);
8340 
8341 		/* Dump driver status (TX and RX rings) while we're here. */
8342 		printf("driver status:\n");
8343 		for (i = 0; i < IWX_MAX_QUEUES; i++) {
8344 			struct iwx_tx_ring *ring = &sc->txq[i];
8345 			printf("  tx ring %2d: qid=%-2d cur=%-3d "
8346 			    "queued=%-3d\n",
8347 			    i, ring->qid, ring->cur, ring->queued);
8348 		}
8349 		printf("  rx ring: cur=%d\n", sc->rxq.cur);
8350 		printf("  802.11 state %s\n",
8351 		    ieee80211_state_name[sc->sc_ic.ic_state]);
8352 #endif
8353 
8354 		printf("%s: fatal firmware error\n", DEVNAME(sc));
8355 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
8356 			task_add(systq, &sc->init_task);
8357 		rv = 1;
8358 		goto out;
8359 
8360 	}
8361 
8362 	if (r1 & IWX_CSR_INT_BIT_HW_ERR) {
8363 		handled |= IWX_CSR_INT_BIT_HW_ERR;
8364 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
8365 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
8366 			sc->sc_flags |= IWX_FLAG_HW_ERR;
8367 			task_add(systq, &sc->init_task);
8368 		}
8369 		rv = 1;
8370 		goto out;
8371 	}
8372 
8373 	/* firmware chunk loaded */
8374 	if (r1 & IWX_CSR_INT_BIT_FH_TX) {
8375 		IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_TX_MASK);
8376 		handled |= IWX_CSR_INT_BIT_FH_TX;
8377 
8378 		sc->sc_fw_chunk_done = 1;
8379 		wakeup(&sc->sc_fw);
8380 	}
8381 
8382 	if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX |
8383 	    IWX_CSR_INT_BIT_RX_PERIODIC)) {
8384 		if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX)) {
8385 			handled |= (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX);
8386 			IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_RX_MASK);
8387 		}
8388 		if (r1 & IWX_CSR_INT_BIT_RX_PERIODIC) {
8389 			handled |= IWX_CSR_INT_BIT_RX_PERIODIC;
8390 			IWX_WRITE(sc, IWX_CSR_INT, IWX_CSR_INT_BIT_RX_PERIODIC);
8391 		}
8392 
8393 		/* Disable periodic interrupt; we use it as just a one-shot. */
8394 		IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG, IWX_CSR_INT_PERIODIC_DIS);
8395 
8396 		/*
8397 		 * Enable periodic interrupt in 8 msec only if we received
8398 		 * real RX interrupt (instead of just periodic int), to catch
8399 		 * any dangling Rx interrupt.  If it was just the periodic
8400 		 * interrupt, there was no dangling Rx activity, and no need
8401 		 * to extend the periodic interrupt; one-shot is enough.
8402 		 */
8403 		if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX))
8404 			IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG,
8405 			    IWX_CSR_INT_PERIODIC_ENA);
8406 
8407 		iwx_notif_intr(sc);
8408 	}
8409 
8410 	rv = 1;
8411 
8412  out_ena:
8413 	iwx_restore_interrupts(sc);
8414  out:
8415 	return rv;
8416 }
8417 
8418 int
8419 iwx_intr_msix(void *arg)
8420 {
8421 	struct iwx_softc *sc = arg;
8422 	uint32_t inta_fh, inta_hw;
8423 	int vector = 0;
8424 
8425 	inta_fh = IWX_READ(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD);
8426 	inta_hw = IWX_READ(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD);
8427 	IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
8428 	IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
8429 	inta_fh &= sc->sc_fh_mask;
8430 	inta_hw &= sc->sc_hw_mask;
8431 
8432 	if (inta_fh & IWX_MSIX_FH_INT_CAUSES_Q0 ||
8433 	    inta_fh & IWX_MSIX_FH_INT_CAUSES_Q1) {
8434 		iwx_notif_intr(sc);
8435 	}
8436 
8437 	/* firmware chunk loaded */
8438 	if (inta_fh & IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
8439 		sc->sc_fw_chunk_done = 1;
8440 		wakeup(&sc->sc_fw);
8441 	}
8442 
8443 	if ((inta_fh & IWX_MSIX_FH_INT_CAUSES_FH_ERR) ||
8444 	    (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
8445 	    (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
8446 #if 1 /* usually #ifdef IWX_DEBUG but always enabled for now */
8447 		int i;
8448 
8449 		iwx_nic_error(sc);
8450 
8451 		/* Dump driver status (TX and RX rings) while we're here. */
8452 		printf("driver status:\n");
8453 		for (i = 0; i < IWX_MAX_QUEUES; i++) {
8454 			struct iwx_tx_ring *ring = &sc->txq[i];
8455 			printf("  tx ring %2d: qid=%-2d cur=%-3d "
8456 			    "queued=%-3d\n",
8457 			    i, ring->qid, ring->cur, ring->queued);
8458 		}
8459 		printf("  rx ring: cur=%d\n", sc->rxq.cur);
8460 		printf("  802.11 state %s\n",
8461 		    ieee80211_state_name[sc->sc_ic.ic_state]);
8462 #endif
8463 
8464 		printf("%s: fatal firmware error\n", DEVNAME(sc));
8465 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
8466 			task_add(systq, &sc->init_task);
8467 		return 1;
8468 	}
8469 
8470 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
8471 		iwx_check_rfkill(sc);
8472 		task_add(systq, &sc->init_task);
8473 	}
8474 
8475 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
8476 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
8477 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
8478 			sc->sc_flags |= IWX_FLAG_HW_ERR;
8479 			task_add(systq, &sc->init_task);
8480 		}
8481 		return 1;
8482 	}
8483 
8484 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_ALIVE) {
8485 		int i;
8486 
8487 		/* Firmware has now configured the RFH. */
8488 		for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
8489 			iwx_update_rx_desc(sc, &sc->rxq, i);
8490 		IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
8491 	}
8492 
8493 	/*
8494 	 * Before sending the interrupt the HW disables it to prevent
8495 	 * a nested interrupt. This is done by writing 1 to the corresponding
8496 	 * bit in the mask register. After handling the interrupt, it should be
8497 	 * re-enabled by clearing this bit. This register is defined as
8498 	 * write 1 clear (W1C) register, meaning that it's being clear
8499 	 * by writing 1 to the bit.
8500 	 */
8501 	IWX_WRITE(sc, IWX_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
8502 	return 1;
8503 }
8504 
8505 typedef void *iwx_match_t;
8506 
8507 static const struct pci_matchid iwx_devices[] = {
8508 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_1 },
8509 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_2 },
8510 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_3 },
8511 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_4,},
8512 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_5,},
8513 };
8514 
8515 static const struct pci_matchid iwx_subsystem_id_ax201[] = {
8516 	{ PCI_VENDOR_INTEL,	0x0070 },
8517 	{ PCI_VENDOR_INTEL,	0x0074 },
8518 	{ PCI_VENDOR_INTEL,	0x0078 },
8519 	{ PCI_VENDOR_INTEL,	0x007c },
8520 	{ PCI_VENDOR_INTEL,	0x0310 },
8521 	{ PCI_VENDOR_INTEL,	0x2074 },
8522 	{ PCI_VENDOR_INTEL,	0x4070 },
8523 	/* TODO: There are more ax201 devices with "main" product ID 0x06f0 */
8524 };
8525 
8526 int
8527 iwx_match(struct device *parent, iwx_match_t match __unused, void *aux)
8528 {
8529 	struct pci_attach_args *pa = aux;
8530 	pcireg_t subid;
8531 	pci_vendor_id_t svid;
8532 	pci_product_id_t spid;
8533 	int i;
8534 
8535 	if (!pci_matchbyid(pa, iwx_devices, nitems(iwx_devices)))
8536 		return 0;
8537 
8538 	/*
8539 	 * Some PCI product IDs are shared among devices which use distinct
8540 	 * chips or firmware. We need to match the subsystem ID as well to
8541 	 * ensure that we have in fact found a supported device.
8542 	 */
8543 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
8544 	svid = PCI_VENDOR(subid);
8545 	spid = PCI_PRODUCT(subid);
8546 
8547 	switch (PCI_PRODUCT(pa->pa_id)) {
8548 	case PCI_PRODUCT_INTEL_WL_22500_1: /* AX200 */
8549 		return 1; /* match any device */
8550 	case PCI_PRODUCT_INTEL_WL_22500_2: /* AX201 */
8551 	case PCI_PRODUCT_INTEL_WL_22500_3: /* AX201 */
8552 	case PCI_PRODUCT_INTEL_WL_22500_4: /* AX201 */
8553 	case PCI_PRODUCT_INTEL_WL_22500_5: /* AX201 */
8554 		for (i = 0; i < nitems(iwx_subsystem_id_ax201); i++) {
8555 			if (svid == iwx_subsystem_id_ax201[i].pm_vid &&
8556 			    spid == iwx_subsystem_id_ax201[i].pm_pid)
8557 				return 1;
8558 
8559 		}
8560 		break;
8561 	default:
8562 		break;
8563 	}
8564 
8565 	return 0;
8566 }
8567 
8568 int
8569 iwx_preinit(struct iwx_softc *sc)
8570 {
8571 	struct ieee80211com *ic = &sc->sc_ic;
8572 	struct ifnet *ifp = IC2IFP(ic);
8573 	int err;
8574 	static int attached;
8575 
8576 	err = iwx_prepare_card_hw(sc);
8577 	if (err) {
8578 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
8579 		return err;
8580 	}
8581 
8582 	if (attached) {
8583 		/* Update MAC in case the upper layers changed it. */
8584 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
8585 		    ((struct arpcom *)ifp)->ac_enaddr);
8586 		return 0;
8587 	}
8588 
8589 	err = iwx_start_hw(sc);
8590 	if (err) {
8591 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
8592 		return err;
8593 	}
8594 
8595 	err = iwx_run_init_mvm_ucode(sc, 1);
8596 	iwx_stop_device(sc);
8597 	if (err)
8598 		return err;
8599 
8600 	/* Print version info and MAC address on first successful fw load. */
8601 	attached = 1;
8602 	printf("%s: hw rev 0x%x, fw ver %s, address %s\n",
8603 	    DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
8604 	    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
8605 
8606 	if (sc->sc_nvm.sku_cap_11n_enable)
8607 		iwx_setup_ht_rates(sc);
8608 
8609 	/* not all hardware can do 5GHz band */
8610 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
8611 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
8612 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
8613 
8614 	/* Configure channel information obtained from firmware. */
8615 	ieee80211_channel_init(ifp);
8616 
8617 	/* Configure MAC address. */
8618 	err = if_setlladdr(ifp, ic->ic_myaddr);
8619 	if (err)
8620 		printf("%s: could not set MAC address (error %d)\n",
8621 		    DEVNAME(sc), err);
8622 
8623 	ieee80211_media_init(ifp, iwx_media_change, ieee80211_media_status);
8624 
8625 	return 0;
8626 }
8627 
8628 void
8629 iwx_attach_hook(struct device *self)
8630 {
8631 	struct iwx_softc *sc = (void *)self;
8632 
8633 	KASSERT(!cold);
8634 
8635 	iwx_preinit(sc);
8636 }
8637 
8638 void
8639 iwx_attach(struct device *parent, struct device *self, void *aux)
8640 {
8641 	struct iwx_softc *sc = (void *)self;
8642 	struct pci_attach_args *pa = aux;
8643 	pci_intr_handle_t ih;
8644 	pcireg_t reg, memtype;
8645 	struct ieee80211com *ic = &sc->sc_ic;
8646 	struct ifnet *ifp = &ic->ic_if;
8647 	const char *intrstr;
8648 	int err;
8649 	int txq_i, i, j;
8650 
8651 	sc->sc_pct = pa->pa_pc;
8652 	sc->sc_pcitag = pa->pa_tag;
8653 	sc->sc_dmat = pa->pa_dmat;
8654 
8655 	rw_init(&sc->ioctl_rwl, "iwxioctl");
8656 
8657 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
8658 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
8659 	if (err == 0) {
8660 		printf("%s: PCIe capability structure not found!\n",
8661 		    DEVNAME(sc));
8662 		return;
8663 	}
8664 
8665 	/* Clear device-specific "PCI retry timeout" register (41h). */
8666 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
8667 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
8668 
8669 	/* Enable bus-mastering and hardware bug workaround. */
8670 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
8671 	reg |= PCI_COMMAND_MASTER_ENABLE;
8672 	/* if !MSI */
8673 	if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
8674 		reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
8675 	}
8676 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
8677 
8678 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
8679 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
8680 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
8681 	if (err) {
8682 		printf("%s: can't map mem space\n", DEVNAME(sc));
8683 		return;
8684 	}
8685 
8686 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
8687 		sc->sc_msix = 1;
8688 	} else if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
8689 		printf("%s: can't map interrupt\n", DEVNAME(sc));
8690 		return;
8691 	}
8692 
8693 	intrstr = pci_intr_string(sc->sc_pct, ih);
8694 	if (sc->sc_msix)
8695 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
8696 		    iwx_intr_msix, sc, DEVNAME(sc));
8697 	else
8698 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
8699 		    iwx_intr, sc, DEVNAME(sc));
8700 
8701 	if (sc->sc_ih == NULL) {
8702 		printf("\n");
8703 		printf("%s: can't establish interrupt", DEVNAME(sc));
8704 		if (intrstr != NULL)
8705 			printf(" at %s", intrstr);
8706 		printf("\n");
8707 		return;
8708 	}
8709 	printf(", %s\n", intrstr);
8710 
8711 	/* Clear pending interrupts. */
8712 	IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
8713 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
8714 	IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
8715 
8716 	sc->sc_hw_rev = IWX_READ(sc, IWX_CSR_HW_REV);
8717 
8718 	/*
8719 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
8720 	 * changed, and now the revision step also includes bit 0-1 (no more
8721 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
8722 	 * in the old format.
8723 	 */
8724 	sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
8725 			(IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
8726 
8727 	switch (PCI_PRODUCT(pa->pa_id)) {
8728 	case PCI_PRODUCT_INTEL_WL_22500_1:
8729 		sc->sc_fwname = "iwx-cc-a0-48";
8730 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
8731 		sc->sc_fwdmasegsz = IWX_FWDMASEGSZ_8000;
8732 		sc->sc_integrated = 1;
8733 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
8734 		sc->sc_low_latency_xtal = 0;
8735 		sc->sc_xtal_latency = 0;
8736 		sc->sc_tx_with_siso_diversity = 0;
8737 		sc->sc_uhb_supported = 0;
8738 		break;
8739 	case PCI_PRODUCT_INTEL_WL_22500_2:
8740 	case PCI_PRODUCT_INTEL_WL_22500_3:
8741 	case PCI_PRODUCT_INTEL_WL_22500_5:
8742 		if (sc->sc_hw_rev != IWX_CSR_HW_REV_TYPE_QUZ) {
8743 			printf("%s: unsupported AX201 adapter\n", DEVNAME(sc));
8744 			return;
8745 		}
8746 
8747 		sc->sc_fwname = "iwx-QuZ-a0-hr-b0-48";
8748 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
8749 		sc->sc_fwdmasegsz = IWX_FWDMASEGSZ_8000;
8750 		sc->sc_integrated = 1;
8751 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
8752 		sc->sc_low_latency_xtal = 0;
8753 		sc->sc_xtal_latency = 5000;
8754 		sc->sc_tx_with_siso_diversity = 0;
8755 		sc->sc_uhb_supported = 0;
8756 		break;
8757 	case PCI_PRODUCT_INTEL_WL_22500_4:
8758 	    sc->sc_fwname = "iwx-Qu-c0-hr-b0-48";
8759 	    sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
8760 	    sc->sc_fwdmasegsz = IWX_FWDMASEGSZ_8000;
8761 	    sc->sc_integrated = 1;
8762 	    sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
8763 	    sc->sc_low_latency_xtal = 0;
8764 	    sc->sc_xtal_latency = 5000;
8765 	    sc->sc_tx_with_siso_diversity = 0;
8766 	    sc->sc_uhb_supported = 0;
8767 	    break;
8768 	default:
8769 		printf("%s: unknown adapter type\n", DEVNAME(sc));
8770 		return;
8771 	}
8772 
8773 	if (iwx_prepare_card_hw(sc) != 0) {
8774 		printf("%s: could not initialize hardware\n",
8775 		    DEVNAME(sc));
8776 		return;
8777 	}
8778 
8779 	/*
8780 	 * In order to recognize C step the driver should read the
8781 	 * chip version id located at the AUX bus MISC address.
8782 	 */
8783 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
8784 		    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
8785 	DELAY(2);
8786 
8787 	err = iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
8788 			   IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
8789 			   IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
8790 			   25000);
8791 	if (!err) {
8792 		printf("%s: Failed to wake up the nic\n", DEVNAME(sc));
8793 		return;
8794 	}
8795 
8796 	if (iwx_nic_lock(sc)) {
8797 		uint32_t hw_step = iwx_read_prph(sc, IWX_WFPM_CTRL_REG);
8798 		hw_step |= IWX_ENABLE_WFPM;
8799 		iwx_write_prph(sc, IWX_WFPM_CTRL_REG, hw_step);
8800 		hw_step = iwx_read_prph(sc, IWX_AUX_MISC_REG);
8801 		hw_step = (hw_step >> IWX_HW_STEP_LOCATION_BITS) & 0xF;
8802 		if (hw_step == 0x3)
8803 			sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
8804 					(IWX_SILICON_C_STEP << 2);
8805 		iwx_nic_unlock(sc);
8806 	} else {
8807 		printf("%s: Failed to lock the nic\n", DEVNAME(sc));
8808 		return;
8809 	}
8810 
8811 	/* Allocate DMA memory for loading firmware. */
8812 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ctxt_info_dma,
8813 	    sizeof(struct iwx_context_info), 0);
8814 	if (err) {
8815 		printf("%s: could not allocate memory for loading firmware\n",
8816 		    DEVNAME(sc));
8817 		return;
8818 	}
8819 
8820 	/*
8821 	 * Allocate DMA memory for firmware transfers.
8822 	 * Must be aligned on a 16-byte boundary.
8823 	 */
8824 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
8825 	    sc->sc_fwdmasegsz, 16);
8826 	if (err) {
8827 		printf("%s: could not allocate memory for firmware transfers\n",
8828 		    DEVNAME(sc));
8829 		goto fail0;
8830 	}
8831 
8832 	/* Allocate interrupt cause table (ICT).*/
8833 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
8834 	    IWX_ICT_SIZE, 1<<IWX_ICT_PADDR_SHIFT);
8835 	if (err) {
8836 		printf("%s: could not allocate ICT table\n", DEVNAME(sc));
8837 		goto fail1;
8838 	}
8839 
8840 	/* TX scheduler rings must be aligned on a 1KB boundary. */
8841 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
8842 	    nitems(sc->txq) * sizeof(struct iwx_agn_scd_bc_tbl), 1024);
8843 	if (err) {
8844 		printf("%s: could not allocate TX scheduler rings\n",
8845 		    DEVNAME(sc));
8846 		goto fail3;
8847 	}
8848 
8849 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
8850 		err = iwx_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
8851 		if (err) {
8852 			printf("%s: could not allocate TX ring %d\n",
8853 			    DEVNAME(sc), txq_i);
8854 			goto fail4;
8855 		}
8856 	}
8857 
8858 	err = iwx_alloc_rx_ring(sc, &sc->rxq);
8859 	if (err) {
8860 		printf("%s: could not allocate RX ring\n", DEVNAME(sc));
8861 		goto fail4;
8862 	}
8863 
8864 	sc->sc_nswq = taskq_create("iwxns", 1, IPL_NET, 0);
8865 	if (sc->sc_nswq == NULL)
8866 		goto fail4;
8867 
8868 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
8869 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
8870 	ic->ic_state = IEEE80211_S_INIT;
8871 
8872 	/* Set device capabilities. */
8873 	ic->ic_caps =
8874 	    IEEE80211_C_WEP |		/* WEP */
8875 	    IEEE80211_C_RSN |		/* WPA/RSN */
8876 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
8877 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
8878 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
8879 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
8880 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
8881 
8882 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
8883 	ic->ic_htcaps |=
8884 	    (IEEE80211_HTCAP_SMPS_DIS << IEEE80211_HTCAP_SMPS_SHIFT);
8885 	ic->ic_htxcaps = 0;
8886 	ic->ic_txbfcaps = 0;
8887 	ic->ic_aselcaps = 0;
8888 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
8889 
8890 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
8891 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
8892 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
8893 
8894 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
8895 		sc->sc_phyctxt[i].id = i;
8896 	}
8897 
8898 	/* IBSS channel undefined for now. */
8899 	ic->ic_ibss_chan = &ic->ic_channels[1];
8900 
8901 	ic->ic_max_rssi = IWX_MAX_DBM - IWX_MIN_DBM;
8902 
8903 	ifp->if_softc = sc;
8904 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
8905 	ifp->if_ioctl = iwx_ioctl;
8906 	ifp->if_start = iwx_start;
8907 	ifp->if_watchdog = iwx_watchdog;
8908 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
8909 
8910 	if_attach(ifp);
8911 	ieee80211_ifattach(ifp);
8912 	ieee80211_media_init(ifp, iwx_media_change, ieee80211_media_status);
8913 
8914 #if NBPFILTER > 0
8915 	iwx_radiotap_attach(sc);
8916 #endif
8917 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
8918 		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
8919 		rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
8920 		rxba->sc = sc;
8921 		timeout_set(&rxba->session_timer, iwx_rx_ba_session_expired,
8922 		    rxba);
8923 		timeout_set(&rxba->reorder_buf.reorder_timer,
8924 		    iwx_reorder_timer_expired, &rxba->reorder_buf);
8925 		for (j = 0; j < nitems(rxba->entries); j++)
8926 			ml_init(&rxba->entries[j].frames);
8927 	}
8928 	task_set(&sc->init_task, iwx_init_task, sc);
8929 	task_set(&sc->newstate_task, iwx_newstate_task, sc);
8930 	task_set(&sc->ba_task, iwx_ba_task, sc);
8931 	task_set(&sc->setkey_task, iwx_setkey_task, sc);
8932 	task_set(&sc->mac_ctxt_task, iwx_mac_ctxt_task, sc);
8933 
8934 	ic->ic_node_alloc = iwx_node_alloc;
8935 	ic->ic_bgscan_start = iwx_bgscan;
8936 	ic->ic_set_key = iwx_set_key;
8937 	ic->ic_delete_key = iwx_delete_key;
8938 
8939 	/* Override 802.11 state transition machine. */
8940 	sc->sc_newstate = ic->ic_newstate;
8941 	ic->ic_newstate = iwx_newstate;
8942 	ic->ic_updateprot = iwx_updateprot;
8943 	ic->ic_updateslot = iwx_updateslot;
8944 	ic->ic_updateedca = iwx_updateedca;
8945 	ic->ic_ampdu_rx_start = iwx_ampdu_rx_start;
8946 	ic->ic_ampdu_rx_stop = iwx_ampdu_rx_stop;
8947 #ifdef notyet
8948 	ic->ic_ampdu_tx_start = iwx_ampdu_tx_start;
8949 	ic->ic_ampdu_tx_stop = iwx_ampdu_tx_stop;
8950 #endif
8951 	/*
8952 	 * We cannot read the MAC address without loading the
8953 	 * firmware from disk. Postpone until mountroot is done.
8954 	 */
8955 	config_mountroot(self, iwx_attach_hook);
8956 
8957 	return;
8958 
8959 fail4:	while (--txq_i >= 0)
8960 		iwx_free_tx_ring(sc, &sc->txq[txq_i]);
8961 	iwx_free_rx_ring(sc, &sc->rxq);
8962 	iwx_dma_contig_free(&sc->sched_dma);
8963 fail3:	if (sc->ict_dma.vaddr != NULL)
8964 		iwx_dma_contig_free(&sc->ict_dma);
8965 
8966 fail1:	iwx_dma_contig_free(&sc->fw_dma);
8967 fail0:	iwx_dma_contig_free(&sc->ctxt_info_dma);
8968 	return;
8969 }
8970 
8971 #if NBPFILTER > 0
8972 void
8973 iwx_radiotap_attach(struct iwx_softc *sc)
8974 {
8975 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
8976 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
8977 
8978 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
8979 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
8980 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWX_RX_RADIOTAP_PRESENT);
8981 
8982 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
8983 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
8984 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWX_TX_RADIOTAP_PRESENT);
8985 }
8986 #endif
8987 
8988 void
8989 iwx_init_task(void *arg1)
8990 {
8991 	struct iwx_softc *sc = arg1;
8992 	struct ifnet *ifp = &sc->sc_ic.ic_if;
8993 	int s = splnet();
8994 	int generation = sc->sc_generation;
8995 	int fatal = (sc->sc_flags & (IWX_FLAG_HW_ERR | IWX_FLAG_RFKILL));
8996 
8997 	rw_enter_write(&sc->ioctl_rwl);
8998 	if (generation != sc->sc_generation) {
8999 		rw_exit(&sc->ioctl_rwl);
9000 		splx(s);
9001 		return;
9002 	}
9003 
9004 	if (ifp->if_flags & IFF_RUNNING)
9005 		iwx_stop(ifp);
9006 	else
9007 		sc->sc_flags &= ~IWX_FLAG_HW_ERR;
9008 
9009 	if (!fatal && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
9010 		iwx_init(ifp);
9011 
9012 	rw_exit(&sc->ioctl_rwl);
9013 	splx(s);
9014 }
9015 
9016 int
9017 iwx_resume(struct iwx_softc *sc)
9018 {
9019 	pcireg_t reg;
9020 
9021 	/* Clear device-specific "PCI retry timeout" register (41h). */
9022 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
9023 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
9024 
9025 	/* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */
9026 	iwx_conf_msix_hw(sc, 0);
9027 
9028 	iwx_enable_rfkill_int(sc);
9029 	iwx_check_rfkill(sc);
9030 
9031 	return iwx_prepare_card_hw(sc);
9032 }
9033 
9034 int
9035 iwx_activate(struct device *self, int act)
9036 {
9037 	struct iwx_softc *sc = (struct iwx_softc *)self;
9038 	struct ifnet *ifp = &sc->sc_ic.ic_if;
9039 	int err = 0;
9040 
9041 	switch (act) {
9042 	case DVACT_QUIESCE:
9043 		if (ifp->if_flags & IFF_RUNNING) {
9044 			rw_enter_write(&sc->ioctl_rwl);
9045 			iwx_stop(ifp);
9046 			rw_exit(&sc->ioctl_rwl);
9047 		}
9048 		break;
9049 	case DVACT_RESUME:
9050 		err = iwx_resume(sc);
9051 		if (err)
9052 			printf("%s: could not initialize hardware\n",
9053 			    DEVNAME(sc));
9054 		break;
9055 	case DVACT_WAKEUP:
9056 		/* Hardware should be up at this point. */
9057 		if (iwx_set_hw_ready(sc))
9058 			task_add(systq, &sc->init_task);
9059 		break;
9060 	}
9061 
9062 	return 0;
9063 }
9064 
9065 struct cfdriver iwx_cd = {
9066 	NULL, "iwx", DV_IFNET
9067 };
9068 
9069 struct cfattach iwx_ca = {
9070 	sizeof(struct iwx_softc), iwx_match, iwx_attach,
9071 	NULL, iwx_activate
9072 };
9073